diff --git "a/1158.jsonl" "b/1158.jsonl" new file mode 100644--- /dev/null +++ "b/1158.jsonl" @@ -0,0 +1,413 @@ +{"seq_id": "150809340", "text": "import regex as re\n\ndef clean(text):\n text = re.sub(r'[(][^)]*[)]', '', text)\n return text\n\nif __name__ == '__main__':\n\n out_file = open('../data/wiki_lt_articles.clean', 'w')\n\n try:\n in_file = open(\"../data/wiki_lt_articles.extracted\")\n\n article_lines = []\n for line in in_file:\n article_lines.append(line)\n if '' in line:\n text = ' '.join(map(lambda x: x.strip(), article_lines[2:-1]))\n out_file.write(clean(text) + '\\n')\n #log_short(text)\n article_lines = []\n\n out_file.close()\n #printout()\n\n except IOError:\n print('No data/lt_articles.extracted, run get_wiki_data.sh first.')\n", "sub_path": "lt/clean_wiki_data.py", "file_name": "clean_wiki_data.py", "file_ext": "py", "file_size_in_byte": 726, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "regex.sub", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "262055111", "text": "\"\"\"\r\nProvider for local data\r\n\"\"\"\r\nimport os\r\nimport urllib\r\nimport json\r\n\r\nfrom d2o_common.data_provider.DataFetcherBase import DataFetcherBase\r\nfrom d2o_common.api.exception import APIMessageError\r\n\r\n\r\nclass StaticDataFetcher(DataFetcherBase):\r\n def __init__(self, **kwargs):\r\n super(StaticDataFetcher, self).__init__(logger_name='static_data_provider', **kwargs)\r\n\r\n def fetch_data(self, url):\r\n filename = urllib.quote(url, '')\r\n\r\n try:\r\n with open(os.path.join(self._cached_dir, filename), 'r') as f:\r\n try:\r\n return json.load(f)\r\n except ValueError as e:\r\n self._logger.info(url)\r\n self._logger.error(e.message)\r\n raise APIMessageError('Data is invalid')\r\n except IOError as e:\r\n self._logger.info(url)\r\n self._logger.error(e.message)\r\n raise APIMessageError('No such data')\r\n", "sub_path": "dashboard_May/d2o_common/data_provider/StaticDataFetcher.py", "file_name": "StaticDataFetcher.py", "file_ext": "py", "file_size_in_byte": 966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "d2o_common.data_provider.DataFetcherBase.DataFetcherBase", "line_number": 12, "usage_type": "name"}, {"api_name": "urllib.quote", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "d2o_common.api.exception.APIMessageError", "line_number": 26, "usage_type": "call"}, {"api_name": "d2o_common.api.exception.APIMessageError", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "108828748", "text": "import os\r\nfrom pydub import AudioSegment\r\nimport re\r\npath = r'D:\\PythonProject\\CrawlAudioText\\AudioTextResource'\r\ndirs = os.listdir(path)\r\ncount = 0\r\ntotal_time = 0\r\nwrite_to_file = open(r'../count_file.txt', 'w', encoding='utf-8')\r\nwrite_to_file.truncate()\r\n\r\n\r\ndef lastDirLength(path):\r\n if os.path.isdir(path):\r\n temp_file = os.listdir(path)[0]\r\n temp_path = os.path.join(path, temp_file)\r\n if os.path.isfile(temp_path):\r\n print(path)\r\n write_to_file.write(str(path.split('\\\\')[-1]) + ' ' + str(len(os.listdir(path))/2) + '\\n')\r\n write_to_file.flush()\r\n print(len(os.listdir(path)))\r\n # getTotalTime(path)\r\n global count\r\n count += len(os.listdir(path))\r\n else:\r\n for file in os.listdir(path):\r\n temp_path = os.path.join(path, file)\r\n lastDirLength(temp_path)\r\n\r\n\r\ndef getTotalTime(path):\r\n k = 1\r\n for file in os.listdir(path):\r\n if re.match(r'.*?\\.mp3', file):\r\n print(k)\r\n k += 1\r\n temp_path = os.path.join(path, file)\r\n # print(temp_path)\r\n global total_time\r\n total_time += len(AudioSegment.from_file(temp_path, format='mp3'))/1000\r\n\r\n\r\nfor dir_temp in dirs:\r\n lastDirLength(os.path.join(path, dir_temp))\r\nwrite_to_file.write('总共:' + str(count/2) + '个文件')\r\nprint('总共:' + str(count/2) + '个文件')\r\nprint('总时长:' + str(total_time) + 's')\r\nprint('合计共:' + str(total_time/3600) + 'h')", "sub_path": "CrawlAudioText/Python/CountFiles.py", "file_name": "CountFiles.py", "file_ext": "py", "file_size_in_byte": 1556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.listdir", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 23, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 32, "usage_type": "call"}, {"api_name": "re.match", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pydub.AudioSegment.from_file", "line_number": 39, "usage_type": "call"}, {"api_name": "pydub.AudioSegment", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}]} +{"seq_id": "439810655", "text": "import os\nfrom tqdm import tqdm\nimport pickle\n\nroot_pose_path = '/data3/alexhu/Datasets/AUTSL_Upper/Keypoints_2d_mmpose/'\nroot_img_path = '/data3/alexhu/Datasets/AUTSL_Upper/jpg_video/'\n\nsplit_path = os.listdir(root_img_path)\nfor split_index in (range(len(split_path))):\n split_name = split_path[split_index]\n real_split_name = os.path.join(root_img_path, split_name)\n video_list = os.listdir(real_split_name)\n for video_name in tqdm(video_list):\n if video_name.endswith('depth'):\n continue\n real_video_name = os.path.join(real_split_name, video_name)\n N_frame_img = len(os.listdir(real_video_name))\n real_kps_path = os.path.join(root_pose_path, split_name, video_name+'.pkl')\n with open(real_kps_path, 'rb') as f:\n kps_dict = pickle.load(f)\n N_frame_pose = kps_dict['keypoints'].shape[0]\n if N_frame_img != N_frame_pose + 1:\n print(real_video_name)", "sub_path": "mmpose/Stage2_AUTSL_check.py", "file_name": "Stage2_AUTSL_check.py", "file_ext": "py", "file_size_in_byte": 943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.listdir", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 12, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "634922960", "text": "import requests\nimport re\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\n\n# a simple demo to dowload the howfile source.\n\nr=requests.session()\npath=re.compile('href=\"(http://dl12.howfile.com/downfile/+(?:.+?))\"')\nvid_path=re.compile('setCookie\\(\"vid\", \"(.+?)\", 1\\*60\\*60\\*1000\\);')\nvid1_path=re.compile('setCookie\\(\"vid1\", \"(.+?)\", 1\\*60\\*60\\*1000\\);')\ndata=r.get('http://howfile.com/file/soulplusno1/8637ca5f/')\nsoup=BeautifulSoup(data.text,'lxml')\ninformation_data=soup.find('meta',{\"name\":\"keywords\"})['content'].split('.')\n\n\nurl=path.findall(data.text)[0]\nvid=vid_path.findall(data.text)[0]\nvid_1=vid1_path.findall(data.text)[0]\nprint(information_data)\nprint(url)\nprint(r.cookies)\nprint(r.cookies['JSESSIONID'])\nprint(r.cookies['validCodeUrl'])\nprint(vid)\nprint(vid_1)\nprint('language=zh_cn; JSESSIONID={0}; validCodeUrl={1}; vid={2}; vid1={3}'.format(r.cookies['JSESSIONID'],r.cookies['validCodeUrl'],vid,vid_1))\n\nget_headers={\n'Cookie':'language=zh_cn; JSESSIONID={0}; validCodeUrl={1}; vid={2}; vid1={3}'.format(r.cookies['JSESSIONID'],r.cookies['validCodeUrl'],vid,vid_1),\n'Host':'dl22.howfile.com',\n'Referer':'http://howfile.com/file/sona5566/8277335f/',\n'Upgrade-Insecure-Requests':'1',\n'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36',\n}\n\n\n\nwith open(information_data[0]+\".\"+information_data[-1],'wb') as f:\n\tprint(\"dowload is beginning\")\n\tdata=r.get(url=url,headers=get_headers).content\n\tf.write(data)\n\tprint(\"finished!\")\n", "sub_path": "dowload_howfile.py", "file_name": "dowload_howfile.py", "file_ext": "py", "file_size_in_byte": 1512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.session", "line_number": 9, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "371142649", "text": "from django.db import models\nfrom django.core.exceptions import ValidationError\nfrom .base_dispatch import BaseDispatch\nfrom .dispatch_container_register import DispatchContainerRegister\n\n\nclass DispatchItemRegister(BaseDispatch):\n\n dispatch_container_register = models.ForeignKey(DispatchContainerRegister)\n\n item_app_label = models.CharField(max_length=35)\n\n item_model_name = models.CharField(max_length=35)\n\n item_identifier_attrname = models.CharField(max_length=35)\n\n item_identifier = models.CharField(max_length=40)\n\n item_pk = models.CharField(max_length=50)\n\n dispatch_host = models.CharField(max_length=35, null=True)\n\n dispatch_using = models.CharField(max_length=35, null=True)\n\n registered_subjects = models.TextField(\n verbose_name='List of Registered Subjects',\n null=True,\n blank=True,\n help_text=\"List of Registered Subjects linked to this DispatchItem\"\n )\n\n objects = models.Manager()\n\n# temp removed - erikvw (fails on unknown producer when setting dispatched to False)\n# no longer necessary to check if the instance is dispatched, as this is done by\n# the controller class.\n def save(self, *args, **kwargs):\n \"\"\"Confirms an instance does not exist for this item_identifier.\"\"\"\n using = kwargs.get('using')\n if self.__class__.objects.using(using).filter(\n item_identifier=self.item_identifier,\n is_dispatched=True,\n ).exclude(pk=self.pk).exists():\n dispatch_item = self.__class__.objects.using(using).get(\n item_identifier=self.item_identifier,\n is_dispatched=True,\n ).exclude(pk=self.pk)\n raise ValueError(\"Cannot dispatch. The item \\'{0}\\' is already dispatched to \\'{1}\\'.\".format(dispatch_item.item_identifier, dispatch_item.dispatch_container_register.producer))\n if self.is_dispatched and self.return_datetime:\n raise ValidationError('Attribute return_datetime must be None if is_dispatched=True.')\n if not self.is_dispatched and not self.return_datetime:\n raise ValidationError('Attribute \\'return_datetime\\' may not be None if \\'is_dispatched\\'=False.')\n\n super(DispatchItemRegister, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return \"Dispatch Item {0} {1} -> {2} ({3})\".format(self.item_model_name, self.item_identifier, self.producer.name, self.is_dispatched)\n\n class Meta:\n app_label = \"dispatch\"\n db_table = 'bhp_dispatch_dispatchitemregister'\n unique_together = (('dispatch_container_register', 'item_pk', 'item_identifier', 'is_dispatched'), )\n index_together = [['item_app_label', 'item_model_name', 'item_pk', 'is_dispatched'], ]\n", "sub_path": "edc/device/dispatch/models/dispatch_item_register.py", "file_name": "dispatch_item_register.py", "file_ext": "py", "file_size_in_byte": 2762, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "base_dispatch.BaseDispatch", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "dispatch_container_register.DispatchContainerRegister", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.Manager", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 50, "usage_type": "call"}, {"api_name": "django.core.exceptions.ValidationError", "line_number": 52, "usage_type": "call"}]} +{"seq_id": "27074562", "text": "import json\n\nimport requests\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n\nfrom auth1.settings import API_URL1\nfrom .models import AddDevice\nfrom django.views.decorators.cache import cache_control\nfrom .forms import AddDeviceform\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required\ndef settings(request):\n l = AddDevice.objects.all()\n form = AddDeviceform\n context = {'form2':form,\"device_list\":l}\n if request.method == 'POST' and 'button-name2' in request.POST:\n form2 = AddDeviceform(request.POST)\n if form2.is_valid():\n Driver_Name = form2.cleaned_data['Driver_Name']\n Device_Id = form2.cleaned_data['Device_Id']\n Vehicle_Number = form2.cleaned_data['Vehicle_Number']\n Vehicle_Type = form2.cleaned_data['Vehicle_Type']\n Sim_Number = form2.cleaned_data['Sim_Number']\n IMEI_Number = form2.cleaned_data['IMEI_Number']\n Device_Model = form2.cleaned_data['Device_Model']\n Vehicle_Licence_No = form2.cleaned_data['Vehicle_Licence_No']\n Device_Timezone = form2.cleaned_data['Device_Timezone']\n data = {'Driver_Name': Driver_Name, 'Device_Id': Device_Id , 'Vehicle_Number': Vehicle_Number,\n 'Vehicle_Type': Vehicle_Type, 'Sim_Number': Sim_Number, 'IMEI_Number': IMEI_Number, 'Device_Model': Device_Model\n ,'Vehicle_Licence_No': Vehicle_Licence_No, 'Device_Timezone': Device_Timezone}\n headers = {'Content-type': 'application/json'}\n r = requests.post(API_URL1, data=json.dumps(data), headers=headers)\n print(r.status_code)\n print(\"getting inside if\")\n fs = form2.save(commit=False)\n fs.user = request.user\n fs.save()\n print(\"save\")\n messages.success(request, 'Device Added')\n return render(request, 'settings/settings.html', context)\n\n\ndef v2(request):\n print('hello')\n veh = request.GET['dataa']\n i = str(veh)\n a = AddDevice.objects.get(Device_Id=i)\n a.delete()\n print (a.Ticket_Name)\n print(veh)\n return JsonResponse(veh)\n\n\n'''\n\n '''", "sub_path": "settings/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "models.AddDevice.objects.all", "line_number": 18, "usage_type": "call"}, {"api_name": "models.AddDevice.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "models.AddDevice", "line_number": 18, "usage_type": "name"}, {"api_name": "forms.AddDeviceform", "line_number": 19, "usage_type": "name"}, {"api_name": "forms.AddDeviceform", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 37, "usage_type": "call"}, {"api_name": "auth1.settings.API_URL1", "line_number": 37, "usage_type": "argument"}, {"api_name": "json.dumps", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 45, "usage_type": "call"}, {"api_name": "django.views.decorators.cache.cache_control", "line_number": 15, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 16, "usage_type": "name"}, {"api_name": "models.AddDevice.objects.get", "line_number": 52, "usage_type": "call"}, {"api_name": "models.AddDevice.objects", "line_number": 52, "usage_type": "attribute"}, {"api_name": "models.AddDevice", "line_number": 52, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "163820823", "text": "import torch\nfrom torch_unique import unique\n\n\ndef _get_dtype(max_value):\n if max_value <= 255:\n return torch.uint8\n elif max_value <= 32767: # pragma: no cover\n return torch.short\n elif max_value <= 2147483647: # pragma: no cover\n return torch.int\n else: # pragma: no cover\n return torch.long\n\n\ndef consecutive_cluster(src):\n size = src.size(0)\n key, perm = unique(src)\n max_value = key.size(0)\n dtype = _get_dtype(max_value)\n arg = torch.empty((key[-1] + 1, ), dtype=dtype, device=src.device)\n arg[key] = torch.arange(0, max_value, dtype=dtype, device=src.device)\n out = arg[src.view(-1)]\n out = out.view(size).long()\n return out, perm\n", "sub_path": "torch_geometric/nn/pool/consecutive.py", "file_name": "consecutive.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.uint8", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.short", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.int", "line_number": 11, "usage_type": "attribute"}, {"api_name": "torch.long", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch_unique.unique", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "487272404", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 17 18:30:55 2019\n\n@author: ccccc\n\"\"\"\n\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup, CData\nimport dateutil.parser\nfrom dateutil import tz\nimport re\nimport unittest\nfrom textwrap import dedent\n\nfrom telegram.ext import Updater\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler, Filters\nfrom textwrap import dedent\nimport logging\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',level=logging.INFO)\n\n\nclass current_weather:\n url = 'http://rss.weather.gov.hk/rss/CurrentWeather.xml' \n url_uc = 'http://rss.weather.gov.hk/rss/CurrentWeather_uc.xml' \n\n def search_or_empty(self, pattern, string):\n result = re.search(pattern, string)\n if result is None:\n return ''\n else:\n return result.group(1)\n \n def get_soup_from_url(self, url):\n html = urlopen(url)\n assert(html.status == 200)\n page = html.read()\n return BeautifulSoup(page, 'html.parser')\n \n def get_soup_for_cdata(self, htmlsoup):\n des_html = htmlsoup.find(text=lambda tag: isinstance(tag, CData)).string.strip()\n return BeautifulSoup(des_html, 'html.parser')\n\n def get_rss_data(self):\n \"\"\"This is to download default url data as below:\n status -- html status\n lang -- language\n author\n pub_date -- publish date\n weather_img_no -- image icon number for the weather summary\n temp -- tempature\n rel_humidity -- relative humidity\n uv_index -- UV index of past 1 hour\n uv_level -- UV level of past 1 hour\n predict -- HKO special prediction\n warning_msg -- weather warning message\n \"\"\"\n html_soup = self.get_soup_from_url(self.url)\n des_item = self.get_soup_for_cdata(html_soup)\n self.des_text = des_item.get_text()\n self.lang = html_soup.language.text\n self.author = html_soup.author.text\n pub_date_text = html_soup.pubdate.text\n self.pub_date = dateutil.parser.parse(pub_date_text)\n weather_img_url = des_item.find('img')['src']\n self.weather_img_no = self.search_or_empty(r'http://rss.weather.gov.hk/img/pic(\\d+).png', weather_img_url)\n self.temp = self.search_or_empty(r'Air temperature.* (\\d+).*degrees Celsius', self.des_text)\n self.rel_humidity = self.search_or_empty(r'Relative Humidity.* (\\d+).*per cent', self.des_text)\n self.uv_index = self.search_or_empty(r\"the mean UV Index recorded at King's Park.* (\\d+)\", self.des_text)\n self.uv_level = self.search_or_empty(r'Intensity of UV radiation : (\\S*) ', self.des_text)\n self.rainfall_exist = self.search_or_empty(r'(.*the rainfall recorded in various regions were.*)', self.des_text)\n if self.rainfall_exist != '':\n rainfall_table = des_item.find_all('table')[1]\n self.rainfall_data = [x.text for x in rainfall_table.find_all('tr')]\n\n #Warning in Chinese Source\n html_soup_uc = self.get_soup_from_url(self.url_uc)\n des_item_uc = self.get_soup_for_cdata(html_soup_uc)\n self.predict = self.search_or_empty(u'(預 料 .*)', des_item_uc.get_text())\n des_text_warning_item = des_item_uc.find('span', {'id':'warning_message'})\n if des_text_warning_item is None:\n self.warning_msg = \"\"\n else:\n self.warning_msg = des_text_warning_item.text\n\n def store_scrape_result(self):\n pass\n\n def scrape_result(self):\n result = dedent(f\"\"\"\n 時間: {self.pub_date.astimezone(tz.tzlocal()):%Y-%m-%d %H:%M:%S}\n 氣溫: {self.temp} 度\n 相對濕度: 百分之 {self.rel_humidity}\"\"\")\n\n if self.uv_index != '':\n result += dedent(f\"\"\"\n 紫外線指數: {self.uv_index}\n 曝曬級數: {self.uv_level}\"\"\")\n\n if self.warning_msg !='':\n result += dedent(f\"\"\"\n {self.warning_msg}\"\"\")\n\n if self.predict !='':\n result += dedent(f\"\"\"\n {self.predict}\"\"\")\n\n return result\n\n\nclass weathertelebot:\n def __init__(self, tgToken):\n self.token = tgToken\n \n def start_def_bot(self):\n self.updater = Updater(token=self.token)\n dispatcher = self.updater.dispatcher\n\n def start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, \n text=dedent(\"\"\"\n You can control me by sending these commands:\n \n /weather - get the current weather report\"\"\"))\n\n start_handler = CommandHandler('start', start)\n dispatcher.add_handler(start_handler)\n\n def get_weather(bot, update):\n c1 = current_weather()\n c1.get_rss_data()\n bot.send_message(chat_id=update.message.chat_id, text=c1.scrape_result())\n del c1\n\n get_weather_handler = CommandHandler('weather', get_weather)\n dispatcher.add_handler(get_weather_handler)\n\n def start_bot_host(self):\n self.updater.start_polling()\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Host the telegram bot of weather')\n parser.add_argument('tgToken', metavar='string token',\n help='This is the token for bot')\n args = parser.parse_args()\n\n t1 = weathertelebot(args.tgToken)\n t1.start_def_bot()\n t1.start_bot_host()", "sub_path": "weather-check.py", "file_name": "weather-check.py", "file_ext": "py", "file_size_in_byte": 5457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.basicConfig", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 23, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 31, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 41, "usage_type": "call"}, {"api_name": "bs4.CData", "line_number": 44, "usage_type": "argument"}, {"api_name": "bs4.BeautifulSoup", "line_number": 45, "usage_type": "call"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 67, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 67, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 67, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 93, "usage_type": "call"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 94, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 94, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 99, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 104, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 108, "usage_type": "call"}, {"api_name": "telegram.ext.Updater", "line_number": 119, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 124, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 129, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 138, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "636308442", "text": "#Libraries used for extrating tweets\nimport json\nimport tweepy\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nimport pandas as pd\nimport pickle\nimport sys\n\n#Get external arguments to search the tweets\narg=sys.argv\nprint(sys.argv)\n\nif arg[1]=='santander':\n\treq='banco santander'\nelse:\n\treq=arg[1]\n\n#Credentials to enter the twitter api\nconsumer_key = 'w87sF4cjjFylzzyCd4mmTVfW3'\nconsumer_secret = 'QSomS57CVwOZWHoK9Bl2yt2PdBImgxftulZeGSraq4n9vJzGFh'\naccess_token = '293210492-tjb5kNx8Iupi4Yq4vTlk3vuXCTM3XWqxALnoyIak'\naccess_secret = 'g6IjmWVWljTp9OjrFBZddiIyzjFC9251S7brov3RHT3hU'\n\nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n\n#Accesing the api\napi = tweepy.API(auth)\n\n#Initializing the dictionary that will contain the information\ndatos_tweets={}\ndatos_tweets.setdefault('date',{})\ndatos_tweets.setdefault('texts',{})\ndatos_tweets.setdefault('user_id',{})\ndatos_tweets.setdefault('retweet_count',{})\ni=1\n\n#Make the requests for the specified term in 'q', 'since' and 'until \nfor tweet in tweepy.Cursor(api.search, q=req, lang=\"es\", since=arg[2], until=arg[3]).items():\n\tif not hasattr(tweet,'retweeted_status'):\n\t\tdatos_tweets['date'].update({i:tweet.created_at.isoformat()})\n\t\tdatos_tweets['texts'].update({i:tweet.text})\n\t\tdatos_tweets['user_id'].update({i:tweet.user.id})\n\t\tdatos_tweets['retweet_count'].update({i:tweet.retweet_count})\n\t\ti=i+1\n\n#DataFrame to display the data afterwards\ndf_datos_tweets=pd.DataFrame(datos_tweets)\n\n#Saving the data and assigning a name realted to the given arguments\nwith open('/home/graduate/Bank_sentiment_analysis/'+arg[1]+'_'+arg[2]+'.json', 'w') as fp:\n json.dump(datos_tweets, fp)\n\nprint(df_datos_tweets.to_json)\n", "sub_path": "bueno_por_ahora_today.py", "file_name": "bueno_por_ahora_today.py", "file_ext": "py", "file_size_in_byte": 1772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "tweepy.OAuthHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 30, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 50, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "345709694", "text": "import multiprocessing as mp\nimport platform\n\nimport numpy as np\nimport pytest\n\nimport megengine as mge\nimport megengine.distributed as dist\nimport megengine.quantization.observer as ob\nfrom megengine.distributed.helper import get_device_count_by_fork\n\n\ndef test_min_max_observer():\n x = np.random.rand(3, 3, 3, 3).astype(\"float32\")\n np_min, np_max = x.min(), x.max()\n x = mge.tensor(x)\n m = ob.MinMaxObserver()\n m(x)\n assert m.min_val == np_min and m.max_val == np_max\n\n\n@pytest.mark.skipif(\n platform.system() == \"Darwin\", reason=\"do not imp GPU mode at macos now\"\n)\n@pytest.mark.skipif(\n platform.system() == \"Windows\", reason=\"windows disable MGB_ENABLE_OPR_MM\"\n)\n@pytest.mark.skipif(get_device_count_by_fork(\"gpu\") < 2, reason=\"need more gpu device\")\n@pytest.mark.isolated_distributed\ndef test_sync_min_max_observer():\n x = np.random.rand(6, 3, 3, 3).astype(\"float32\")\n np_min, np_max = x.min(), x.max()\n world_size = 2\n port = dist.get_free_ports(1)[0]\n server = dist.Server(port)\n\n def worker(rank, slc):\n dist.init_process_group(\"localhost\", port, world_size, rank, rank)\n m = ob.SyncMinMaxObserver()\n y = mge.tensor(x[slc])\n m(y)\n assert m.min_val == np_min and m.max_val == np_max\n\n procs = []\n for rank in range(world_size):\n slc = slice(rank * 3, (rank + 1) * 3)\n p = mp.Process(target=worker, args=(rank, slc,), daemon=True)\n p.start()\n procs.append(p)\n for p in procs:\n p.join(20)\n assert p.exitcode == 0\n", "sub_path": "imperative/python/test/unit/quantization/test_observer.py", "file_name": "test_observer.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.random.rand", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "megengine.tensor", "line_number": 16, "usage_type": "call"}, {"api_name": "megengine.quantization.observer.MinMaxObserver", "line_number": 17, "usage_type": "call"}, {"api_name": "megengine.quantization.observer", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "megengine.distributed.get_free_ports", "line_number": 34, "usage_type": "call"}, {"api_name": "megengine.distributed", "line_number": 34, "usage_type": "name"}, {"api_name": "megengine.distributed.Server", "line_number": 35, "usage_type": "call"}, {"api_name": "megengine.distributed", "line_number": 35, "usage_type": "name"}, {"api_name": "megengine.distributed.init_process_group", "line_number": 38, "usage_type": "call"}, {"api_name": "megengine.distributed", "line_number": 38, "usage_type": "name"}, {"api_name": "megengine.quantization.observer.SyncMinMaxObserver", "line_number": 39, "usage_type": "call"}, {"api_name": "megengine.quantization.observer", "line_number": 39, "usage_type": "name"}, {"api_name": "megengine.tensor", "line_number": 40, "usage_type": "call"}, {"api_name": "multiprocessing.Process", "line_number": 47, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 22, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 25, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}, {"api_name": "platform.system", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 28, "usage_type": "attribute"}, {"api_name": "megengine.distributed.helper.get_device_count_by_fork", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 29, "usage_type": "attribute"}]} +{"seq_id": "344310781", "text": "import requests\r\nimport json\r\n\r\nBASE_URL = 'http://127.0.0.1:8000/'\r\nENDPOINT = 'api/'\r\n\r\ndef get_resource(id):\r\n resp = requests.get(BASE_URL+ENDPOINT+id+'/')\r\n #if resp.status_code in range(200,300):\r\n if resp.status_code == requests.codes.ok: #this also works similar to above if statement\r\n print(resp.json())\r\n else:\r\n print(\"Something goes wrong\")\r\nid = input(\"Enter some ID : \")\r\nget_resource(id)\r\n\r\ndef get_all_resource():\r\n resp = requests.get(BASE_URL+ENDPOINT)\r\n print(resp.status_code)\r\n print(resp.json())\r\n#get_all_resource()\r\n\r\n'''\r\n#Status codes are the codes which give the information about page status\r\n#There are 5 category of status codes..\r\n# 1XX --> (100-199) ---> INFORMATIONAL (eg. - continue, processing)\r\n# 2XX --> (200-299) ---> SUCCESSFUL\r\n# 3XX --> (300-399) ---> REDIRECTIONAL\r\n# 4XX --> (400-499) ---> CLIENT ERROR (eg. - 403 csrf token error, 403 forbidden)\r\n# 5XX --> (500-599) ---> SERVER ERROR (eg. - server down)\r\n'''\r\n'''\r\ndumpdata concept\r\npython manage.py dumpdata testapp.Employee #command for get all data of Employee class\r\npython manage.py dumpdata testapp.Employee --indent 4 #for get all data of Employee class with indentation\r\npython manage.py dumpdata testapp.Employee --format formattype --indent 5\r\nformattype may be ---> json/xml\r\n#Even you can save json/xml format in file get_all_resource\r\npython manage.py dumpdata testapp.Employee --format formattype > emp.formattype --indent 5\r\n'''\r\n\r\ndef create_resource():\r\n new_emp={\r\n 'eno':500,\r\n 'ename':'shiva',\r\n 'esal':500,\r\n 'eaddr':'Chennai',\r\n }\r\n resp=requests.post(BASE_URL+ENDPOINT, data=json.dumps(new_emp))\r\n print(resp.status_code)\r\n print(resp.json())\r\n\r\ndef update_resource(id):\r\n new_emp={\r\n 'esal':6000,\r\n 'eaddr':'Delhi',\r\n }\r\n resp=requests.put(BASE_URL+ENDPOINT+str(id)+'/', data=json.dumps(new_emp))\r\n print(resp.status_code)\r\n print(resp.json())\r\n\r\ndef delete_resource(id):\r\n resp=requests.delete(BASE_URL+ENDPOINT+str(id)+'/')\r\n print(resp.status_code)\r\n print(resp.json())\r\ndelete_resource(7)\r\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 10, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 58, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 58, "usage_type": "call"}, {"api_name": "requests.delete", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "520587144", "text": "from tkinter import *\nimport serial\nimport serial.tools.list_ports\nimport lib1685b\n\nwin = Tk()\n\nports = serial.tools.list_ports.comports()\n\nport = StringVar()\nlb = Listbox(win, listvariable=port)\ni = 0\nfor port, desc, hwid in sorted(ports):\n i = i+1\n lb.insert(i, port)\nlb.pack()\n\nser = \"\"\n\ndef setPort():\n print(sorted(ports)[lb.curselection()[0]].device)\n global ser\n ser = serial.Serial(sorted(ports)[lb.curselection()[0]].device)\n ser.timeout = 0.1\n\nButton(win, text=\"Set Port\", command=setPort).pack()\n\ndef setVoltage():\n val = voltscale.get()\n print(val)\n lib1685b.setVoltage(ser, val)\n\ndef setCurrent():\n val = currscale.get()\n print(val)\n lib1685b.setCurrent(ser, val)\n\n\nvoltscale = Scale(win, from_=0, to=36, resolution=0.1, orient=HORIZONTAL, length=300)\nvoltscale.pack()\nButton(win, text=\"Set Voltage\", command=setVoltage).pack()\n\ncurrscale = Scale(win, from_=0, to=10, resolution=0.1, orient=HORIZONTAL, length=300)\ncurrscale.pack()\nButton(win, text=\"Set Current\", command=setCurrent).pack()\n\nvlab = StringVar()\nvlab.set(\"voltage\")\nLabel(win, textvariable = vlab, width=15, relief=RAISED).pack()\nclab = StringVar()\nclab.set(\"current\")\nLabel(win, textvariable = clab, width=15).pack()\n\ndef getD():\n vals = lib1685b.getData(ser)\n vlab.set(\"Volt=\"+str(vals[0]))\n clab.set(\"Curr=\"+str(vals[1]))\n \nButton(win, text=\"Get V/C\", command=getD).pack()\n\n\nwin.mainloop()\n", "sub_path": "Python 3/gui/1685bSeries.py", "file_name": "1685bSeries.py", "file_ext": "py", "file_size_in_byte": 1419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "serial.tools.list_ports.comports", "line_number": 8, "usage_type": "call"}, {"api_name": "serial.tools", "line_number": 8, "usage_type": "attribute"}, {"api_name": "serial.Serial", "line_number": 23, "usage_type": "call"}, {"api_name": "lib1685b.setVoltage", "line_number": 31, "usage_type": "call"}, {"api_name": "lib1685b.setCurrent", "line_number": 36, "usage_type": "call"}, {"api_name": "lib1685b.getData", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "387035330", "text": "import sqlite3\n\nqueueDB = \"/data/logs/submissions.db\"\nconn = sqlite3.connect(queueDB)\nc = conn.cursor()\nc.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS submissions (id INTEGER PRIMARY KEY, loadTime DATETIME, submitTime DATETIME, img TEXT, parameter TEXT, leftChanges TEXT, rightChanges TEXT, chosen TEXT, hashval TEXT, screenWidth TEXT, screenHeight TEXT, windowWidth TEXT, windowHeight TEXT, colorDepth TEXT, userid TEXT, usersubs INTEGER, useragent TEXT)\"\"\"\n)\nconn.commit()\nconn.close()\n", "sub_path": "survey/pre-start.py", "file_name": "pre-start.py", "file_ext": "py", "file_size_in_byte": 486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlite3.connect", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "313630431", "text": "import subprocess\nimport json\nimport sys\n#./threshholding.out test2.png\n\n\nsubprocess.call([\"./threshholding.out\", sys.argv[0]])\nrectangles = subprocess.check_output([\"./blob.out\", \"threshPoDiffie.png\"])\nrectangles = rectangles[:-4]+rectangles[-2:]\njson_blob = json.loads(rectangles)\n#print json_blob\n\nstandard_valid = {}\ni =0\nfor value in json_blob:\n\t#print json_blob[value]\n\t\n\tinternal_dict = {}\n\tinternal_dict[\"corners\"] = json_blob[value]\n\tinternal_dict[\"type\"] = \"towerA\"\n\tinternal_dict[\"number\"] = i\n\tstandard_valid[value] = internal_dict\n\ti += 1\n\n#print standard_valid\nimport json\nwith open('data.txt', 'w') as outfile:\n json.dump(standard_valid, outfile)", "sub_path": "Map/Research/blob/no_color/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 662, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "subprocess.call", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 8, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "167948310", "text": "import sys\nimport curses\nimport time\n\nclass Cpu:\n\n def __init__(self):\n self.registers = dict()\n self.lastvalue = False\n\n def run(self, instruction):\n instlist = instruction.split()\n instruction = instlist[0]\n regletter = instlist[1]\n if len(instlist) == 3:\n try:\n value = int(instlist[2])\n except ValueError:\n value = self.registers[instlist[2]].value\n if instruction == \"jnz\":\n try:\n regval = int(regletter)\n except ValueError:\n if regletter not in self.registers:\n self.registers[regletter] = Register(regletter)\n regval = self.registers[regletter].value\n if regval:\n retval = value\n else:\n retval = 1\n else:\n if regletter not in self.registers:\n self.registers[regletter] = Register(regletter)\n retval = self.registers[regletter].run(instruction, value)\n return retval\n\n def printregs(self):\n for k in self.registers:\n print(self.registers[k].name, self.registers[k].value)\n\n\nclass Register:\n\n def __init__(self, name, value=0):\n self.name = name\n self.value = value\n if name == \"a\":\n self.value = 1\n self.mulrun = 0\n\n def run(self, instruction, val=None):\n method = getattr(self, instruction)\n if val is None:\n retval = method()\n else:\n retval = method(val)\n return retval\n\n def snd(self):\n # print(\"snd:\", self.name)\n return \"sound-\" + str(self.value)\n\n def set(self, val):\n # print(\"set:\", self.name, val)\n self.value = val\n return 1\n\n def add(self, val):\n # print(\"add:\", self.name, val)\n self.value += val\n return 1\n\n def sub(self, val):\n # print(\"sub:\", self.name, val)\n self.value -= val\n return 1\n\n def mul(self, val):\n # print(\"mul:\", self.name, val)\n self.mulrun += 1\n self.value *= val\n return 1\n\n def mod(self, val):\n # print(\"mod:\", self.name, val)\n self.value = self.value % val\n return 1\n\n def rcv(self):\n # print(\"rcv:\", self.name)\n if self.value:\n return \"recover\"\n else:\n return 1\n\n def jnz(self, val):\n # print(\"jnz:\", self.name, val, \"--\", self.value)\n # if self.value < 0:\n # print(\" \", end=\"\")\n # print(self.value)\n if self.value:\n return val\n else:\n return 1\n\n\ndef main(stdscr):\n stdscr.clear()\n\n instructions = list()\n\n with open(sys.argv[1], 'r') as f:\n for line in f:\n instructions.append(line.strip())\n\n instnum = 0\n cpu = Cpu()\n while not str(instnum).startswith(\"recover\"):\n try:\n ret = cpu.run(instructions[instnum])\n \n stdscr.addstr(1, 0, \" \")\n stdscr.addstr(1, 0, instructions[instnum])\n stdscr.refresh()\n # stuff = stdscr.getch(0,0)\n # if stuff == ord(\"q\"):\n # exit()\n x = 0\n for k in sorted(cpu.registers):\n stdscr.addstr(2, x, k)\n stdscr.addstr(3, x, \" \")\n stdscr.addstr(3, x, str(cpu.registers[k].value))\n x += 10\n\n instnum += ret\n except IndexError:\n break\n\n muls = 0\n for k in cpu.registers:\n muls += cpu.registers[k].mulrun\n print(muls)\n\ncurses.wrapper(main)", "sub_path": "2017/23 - Coprocessor Conflagration/part2.py", "file_name": "part2.py", "file_ext": "py", "file_size_in_byte": 3678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.argv", "line_number": 112, "usage_type": "attribute"}, {"api_name": "curses.wrapper", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "311085768", "text": "import csv\nimport re\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom Sastrawi.Stemmer.StemmerFactory import StemmerFactory\n\n#create stopwords corpus\nstop_words = stopwords.words('indonesian')\n#create stemmer\nfactory = StemmerFactory()\nstemmer = factory.create_stemmer()\n\ndef clean_text(t):\n user_removed = re.sub(r'@[A-Za-z0-9]+','',t)\n link_removed = re.sub('https?://[A-Za-z0-9./]+','',user_removed)\n number_removed = re.sub('[^a-zA-Z]', ' ', link_removed)\n lower_case_tweet = number_removed.lower()\n words = word_tokenize(lower_case_tweet)\n #remove stopwords\n sent = []\n for w in words:\n if w not in stop_words:\n sent.append(w)\n # join every words\n text = (' '.join(sent)).strip()\n # stemming using Sastrawi\n text = stemmer.stem(text)\n return text\n\nold_csv = open('dataset.csv', 'r')\nnew_csv = open('clean_dataset.csv', 'a', newline = '')\n\n\nold_reader = csv.reader(old_csv)\nnew_writer = csv.writer(new_csv)\n\nfor row in old_reader:\n print(row[0] + ' ' + row[1])\n new_writer.writerow([clean_text(row[0]), row[1]])\n\nold_csv.close()\nnew_csv.close()\nprint()\nprint('Success clean dataset')\nprint('Saved to clean_dataset.csv')\n", "sub_path": "clean_dataset.py", "file_name": "clean_dataset.py", "file_ext": "py", "file_size_in_byte": 1217, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "nltk.corpus.stopwords.words", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 8, "usage_type": "name"}, {"api_name": "Sastrawi.Stemmer.StemmerFactory.StemmerFactory", "line_number": 10, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 14, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 15, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 16, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 18, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 34, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "298534560", "text": "import re\nimport numpy as np\nimport pandas as pd\nfrom sklearn import preprocessing\nfrom scipy.sparse import csr_matrix\nfrom sklearn.cluster import DBSCAN\nimport datetime\nimport time\n\n\nstates = [\"INITIAL\",\"login\",\"View_Items\",\"home\",\"logout\",\"View_Items_quantity\",\"Add_to_Cart\",\"shoppingcart\",\n \"remove\",\"deferorder\",\"purchasecart\",\"inventory\",\"sellinventory\",\"clearcart\",\"cancelorder\",\"$\"]\n\n#Data imports\nPATH = \"../data/raw/\"\nsessions_file = (PATH+'sessions.dat')\n#Dict of sessions\nwith open(sessions_file) as fn:\n sessions_raw = fn.readlines()\n\ndef session_request_dict(sessions_raw):\n s_r_dict = {}\n for session in sessions_raw:\n key = re.search('([^.]+)', session).group()\n value = re.findall('\\\"(.*?)\\\"', session)\n s_r_dict[key] = value\n\n return s_r_dict\n\n\ndef transition_matrix(sessions, states):\n markovchains = []\n for key, value in sessions.items():\n # labelEncoding\n le = preprocessing.LabelEncoder()\n le.fit(value)\n le.transform(value)\n\n # factorize\n factorize = pd.factorize(value)[0]\n\n # matrix\n n = 1 + max(factorize) # number of states\n M = [[0] * n for _ in range(n)]\n\n for (i, j) in zip(factorize, factorize[1:]):\n M[i][j] += 1\n\n # now convert to probabilities:\n for row in M:\n s = sum(row)\n if s > 0:\n row[:] = [f / s for f in row]\n\n # unique array in the right order\n value = np.array(value)\n _, idx = np.unique(value, return_index=True)\n\n df = pd.DataFrame(data=M, index=value[np.sort(idx)],\n columns=value[np.sort(idx)])\n\n df_1 = pd.DataFrame(index=states, columns=states, dtype='float64')\n\n merge = df_1.update(df, join='left')\n merge = pd.concat([pd.concat([df_1, df], axis=1, sort=False)], axis=0).fillna(0).round(2).iloc[:, :-n]\n\n # convert into Vector\n merge = np.array(merge.values.flatten().tolist())\n # 2-D array\n markovchains.append(merge)\n # csr sparse matrix\n csr = csr_matrix(markovchains)\n\n\n\n return csr\n\ndata = session_request_dict(sessions_raw)\nsmall_data_set = {k: data[k] for k in list(data)[:500]}\nprint('load data done', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\n#for X in tqdm(range(len(small_data_set))):\nX = transition_matrix(small_data_set, states)\nprint('matrix done', datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))\nprint('start clustering')\nclustering = DBSCAN(eps=1.5, min_samples=10).fit(X)\nlabels = clustering.labels_\n#print(labels)\nprint(np.unique(labels, return_counts=True))\nprint(clustering)\nprint(\"End clustering\", datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))", "sub_path": "poc/model/dbscan-and-markov-chain.py", "file_name": "dbscan-and-markov-chain.py", "file_ext": "py", "file_size_in_byte": 2807, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.search", "line_number": 24, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 35, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 35, "usage_type": "name"}, {"api_name": "pandas.factorize", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 80, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.cluster.DBSCAN", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "285409591", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\n script to download cadastral files from the website https://finances.belgium.be/fr/experts_partenaires/plan-cadastral/lambert-72/2018\n require as input the url and the output folder to save the downloaded and unziped files\n first input file is the url \n second input file the output directory\n \n\"\"\"\n\nimport urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport requests\nfrom time import sleep\nfrom random import randint\n\nimport pandas as pd\n#import sys\n\nimport zipfile\n\n\ndef cadastral_plan_links(url):\n \n #url = \"https://finances.belgium.be/fr/experts_partenaires/plan-cadastral/lambert-72/2018\"\n response = requests.get(url, timeout=5)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n commune_name = []\n commune_url = []\n communes = soup.find('div', class_ = 'field-name-body field-type-text-with-summary').find_all(\"li\")\n for com in communes:\n commune_name.append(com.a.get_text())\n commune_url.append(com.a.get('href'))\n \n cadastral_df = pd.DataFrame({'Commune':commune_name, 'url': commune_url})\n \n return cadastral_df\n\n\nif __name__ == \"__main__\":\n \n #url = sys.argv[1] # the URL from cadastral website that list the cadastral files for each commune\n #output_folder = sys.argv[2] # output folder to store the downloaded files\n\n output_folder = \"./data/cadastral/\"\n output_folder_others = \"./data/\"\n url = \"https://finances.belgium.be/fr/experts_partenaires/plan-cadastral/lambert-72/2018\"\n \n cadastral_df = cadastral_plan_links(url)\n exclude = ['Toute la Belgique', 'Limites administratives']\n\n for i, row in cadastral_df.iterrows():\n sleep(randint(5, 40))\n if row.Commune in exclude:\n filename = output_folder_others + row.Commune + \".zip\"\n filename = filename.replace(' ', '_')\n else:\n filename = output_folder + row.Commune + \".zip\"\n \n url = row.url.replace(' ', '%20')\n try:\n urllib.request.urlretrieve(url, filename)\n extract_folder = output_folder + row.Commune\n myzipfile = zipfile.ZipFile(filename)\n myzipfile.extractall(extract_folder)\n\n except urllib.HTTPError as e:\n if e.getcode() == 404: # check the return code\n print(row.Commune)\n continue\n raise # if other than 404, raise the error\n\n", "sub_path": "Housing_typology_belgium/Plan_cadastral_download.py", "file_name": "Plan_cadastral_download.py", "file_ext": "py", "file_size_in_byte": 2492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 27, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 55, "usage_type": "call"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 64, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 64, "usage_type": "name"}, {"api_name": "zipfile.ZipFile", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.request.HTTPError", "line_number": 69, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 69, "usage_type": "name"}]} +{"seq_id": "89360574", "text": "from django.conf import settings\nfrom django_redis import get_redis_connection\nfrom cacheme import cacheme\n\n\nCACHEME_DICT = {\n 'REDIS_CACHE_PREFIX': 'CM:', # key prefix for cache\n 'REDIS_CACHE_SCAN_COUNT': 10,\n 'THUNDERING_HERD_RETRY_COUNT': 5,\n 'THUNDERING_HERD_RETRY_TIME': 20\n}\n\nCACHEME_DICT.update(getattr(settings, 'CACHEME', {}))\nCACHEME = type('CACHEME', (), CACHEME_DICT)\n\n\ndef invalid_keys_in_set(key, conn=None):\n if not conn:\n conn = get_redis_connection(CACHEME.REDIS_CACHE_ALIAS)\n key = CACHEME.REDIS_CACHE_PREFIX + key + ':invalid'\n invalid_keys = conn.smembers(key)\n if invalid_keys:\n conn.sadd(cacheme.meta_keys.deleted, *invalid_keys)\n\n\ndef invalid_cache(sender, instance, created=False, **kwargs):\n # for manytomany pre signal, do nothing\n if not CACHEME.ENABLE_CACHE:\n return\n\n m2m = False\n if 'pre_' in kwargs.get('action', ''):\n return\n if kwargs.get('action', False):\n m2m = True\n\n conn = get_redis_connection(CACHEME.REDIS_CACHE_ALIAS)\n\n if not m2m and instance.cache_key:\n keys = instance.cache_key\n if type(instance.cache_key) == str:\n keys = [keys]\n for key in keys:\n invalid_keys_in_set(key, conn)\n\n if m2m:\n name = instance.__class__.__name__\n m2m_cache_keys = sender.m2m_cache_keys.copy()\n to_invalid_keys = m2m_cache_keys.pop(name)(kwargs.get('pk_set', []))\n from_invalid_key = list(m2m_cache_keys.values())[0]([instance.id])\n all = from_invalid_key + to_invalid_keys\n for key in all:\n invalid_keys_in_set(key, conn)\n", "sub_path": "django_cacheme/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1627, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.settings", "line_number": 13, "usage_type": "argument"}, {"api_name": "django_redis.get_redis_connection", "line_number": 19, "usage_type": "call"}, {"api_name": "cacheme.cacheme.meta_keys", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cacheme.cacheme", "line_number": 23, "usage_type": "name"}, {"api_name": "django_redis.get_redis_connection", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "299635546", "text": "#Course: CS 2302 Data Structures | Spring 2019\n#Author: Maria Fernanda Corona Ortega\n#Assignment: Lab 1 | Part 2 Figure 3\n#Instructor: Olac Fuentes\n#Purpose of Code: The purpose of this code is to duplicate images presented\n#through plotting and recursion\n#Last Modification: 03/09/2019 8:27pm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef figure3(ax,n,p,w):\n if n>0:\n index = [0,1,2]\n q = p[index]/2+p[index]\n ax.plot(p[:,0],p[:,1],color='k')\n figure3(ax,n-1,q,w)\n \n\nplt.close(\"all\") \nsize = 3 \nlevels = 1\np = np.array([[-size,-size-(size/size)],[0,0],[size,-size-(size/size)]])\n \nfig, ax = plt.subplots()\n\nfigure3(ax,levels,p,1/3) \n\nax.set_aspect(1.0)\nax.axis('off')\nplt.show()\nfig.savefig('figure3.png')", "sub_path": "drawTrees.py", "file_name": "drawTrees.py", "file_ext": "py", "file_size_in_byte": 755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "603811308", "text": "#!/Users/divyaganapathisankaran/anaconda/bin/python2.7\nimport numpy as np\nfrom PIL import Image\nfrom PIL import ImageFilter\nfrom scipy.ndimage.measurements import label\nfrom scipy.signal import convolve2d\nimport matplotlib\nimport tifffile\nimport matplotlib.pyplot as plt\nfrom collections import Counter\nimport os\nimport sys\nimport argparse\n\ndef imwrite(filename, image):\n image = (image*255)/np.max(image)\n\n image = Image.fromarray(image.astype(np.uint8))\n image.save(filename)\n\ndef median_filter(eb3):\n eb3 = eb3*255\n eb3 = eb3.astype(np.uint8)\n eb3 = Image.fromarray(eb3).filter(ImageFilter.MedianFilter(3))\n eb3 = np.array(eb3).astype(float)/255.\n return eb3\n\ndef box_filter(eb3, radius):\n k = np.ones((radius, radius))/(float(radius**2))\n return convolve2d(eb3, k, 'same')\n\n\ndef get_all_linesegments(eb3, rel_thresh):\n #blur the image\n eb3_blurred = box_filter(eb3,10)\n\n #look for image locations that are more intense than nearby pixels by rel_thresh\n eb3_sharp = (eb3-eb3_blurred > rel_thresh)\n\n #identify connected components\n labeled_segments, _ = label(eb3_sharp, np.ones((3,3)))\n return labeled_segments\n\ndef preprocess(eb3_raw, rel_thresh):\n eb3 = eb3_raw.astype(float)\n eb3 = eb3/np.max(eb3)\n eb3 = median_filter(eb3)\n labeled_segments = get_all_linesegments(eb3, rel_thresh)\n return eb3, labeled_segments\n\ndef label_linesegments_with_distance_count(labeled_segments, center):\n #find image locations on eb3 comets\n indices = np.where(labeled_segments!=0)\n y = indices[0]\n x = indices[1]\n\n #estimate their distance from center\n pixel_distances = np.sqrt((y-center[1])**2 + (x-center[0])**2)\n\n #get unique segments\n unique_segments = np.unique(labeled_segments[labeled_segments!=0])\n\n #run through pixels and record the farthest distance for each\n segment_distances = {x:-1 for x in unique_segments}\n farthest_pixel = {x:(0,0) for x in unique_segments}\n\n for i in range(y.size):\n label = labeled_segments[y[i],x[i]]\n if pixel_distances[i]>segment_distances[label]:\n segment_distances[label] = pixel_distances[i]\n farthest_pixel[label] = (y[i],x[i])\n\n #Count the number of pixels in each segment\n segment_counts = Counter(labeled_segments[labeled_segments!=0].tolist())\n\n return unique_segments, segment_counts, segment_distances, farthest_pixel\n\ndef find_foci_around_center(unique_segments, segment_counts, segment_distances, farthest_pixel, length_thresh, radii):\n foci = []\n for i,r in enumerate(radii):\n foci_this = [x for x in unique_segments if segment_counts[x]>length_thresh and segment_distances[x]abs_thresh\n\n\n #Compute distance to center for each pixel\n x,y = np.meshgrid(np.arange(labeled_segments.shape[1]), np.arange(labeled_segments.shape[0]))\n dist = np.sqrt((y-center[1])**2 + (x-center[0])**2)\n\n #compute cell areas\n areas = []\n for r in radii:\n areas.append(np.sum((dist=radius-2),:]=0.5\n\n return vis_area\ndef get_labeled_segments_image(labeled_segments, centrosome, foci, farthest_pixel, dist, radius):\n vis_labeled_segs = np.zeros((labeled_segments.shape[0], labeled_segments.shape[1], 3))\n vis_labeled_segs[:,:,1] = (labeled_segments!=0)\n vis_labeled_segs[:,:,0] = (centrosome-np.min(centrosome))/(np.max(centrosome)-np.min(centrosome))\n vis_labeled_segs[np.logical_and(dist<=radius+2, dist>=radius-2),:]=0.5\n points = [farthest_pixel[x] for x in foci]\n boxes = [(x-3,y-3,x+3,y+3) for (y,x) in points]\n for b in boxes:\n xmin, ymin, xmax, ymax = b\n xmin = max(0,xmin)\n ymin = max(0,ymin)\n xmax = min(labeled_segments.shape[1],xmax)\n ymax = min(labeled_segments.shape[0],ymax)\n vis_labeled_segs[ymin:ymax,xmin:xmax,2] = 1\n vis_labeled_segs[ymin:ymax,xmin:xmax,1] = 0\n return vis_labeled_segs\n", "sub_path": "eb3.py", "file_name": "eb3.py", "file_ext": "py", "file_size_in_byte": 5682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.max", "line_number": 16, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 18, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 23, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 24, "usage_type": "name"}, {"api_name": "PIL.ImageFilter.MedianFilter", "line_number": 24, "usage_type": "call"}, {"api_name": "PIL.ImageFilter", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 29, "usage_type": "call"}, {"api_name": "scipy.signal.convolve2d", "line_number": 30, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 61, "usage_type": "call"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 68, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 69, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 70, "usage_type": "name"}, {"api_name": "scipy.ndimage.measurements.label", "line_number": 71, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 140, "usage_type": "call"}]} +{"seq_id": "64674365", "text": "import pdf_name_generator\nfrom constants import *\nimport dummy_outside\nimport data_frame\nimport graphing\nimport paths\nimport utility\nreload(dummy_outside)\nreload(data_frame)\nreload(graphing)\nreload(paths)\nreload(utility)\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport itertools as it\nfrom matplotlib.backends.backend_pdf import PdfPages\n\ncits = ['LA', 'Miami', 'NYC', 'DC', 'Boston']\ncities = data_frame.Data(\"2017\", cits)\n\ncategories = cits# + ['Medscape', 'myCME'] \ncats = len(categories)\n\npre_inits = [8,16,24,32,40,48,56]\npost_inits = [12,20,28,36,44,52,60]\n\npath = paths.get_input_path('2017', 'Questions.csv')\ndf = pd.read_csv(path, header = 0, index_col = 0, na_values = '-')\n\ntitles = [None] + df.ix[0].tolist()\ntopics = [None] + df.ix[1].tolist()\ntimes = [None] + df.ix[2].tolist()\nanswers = [None] + [utility.map_answer(ans) for ans in df.ix[3].tolist()]\nnumber_of_columns = [None] + df.ix[4].tolist()\nlegends = [None] + [df.iloc[5:,i].dropna().tolist() for i in range(63)]\n\n\nif __name__ == \"__main__\":\n \n for i in range(7):\n prenum = pre_inits[i]\n postnum = post_inits[i]\n num = postnum-prenum\n \n layout = it.repeat((1,2))\n \n topic = topics[prenum]\n filename = pdf_name_generator.generate_pdf_name('2016_' + topic.upper())\n pdf = PdfPages(paths.get_output_path(filename))\n \n for j in range(num):\n \n parity = j%2\n \n if parity == 0: \n plt.figure(figsize=(11,8.5), dpi=100)\n plt.figtext(0.01, .97, GRAPH_TEXT_1, fontsize=8)\n plt.figtext(0.01, .96, GRAPH_TEXT_2, fontsize=8, color='lime')\n \n plt.subplot(2, 1, 1+parity)\n data = graphing.Graph_Data(\n cities = cities,\n partition_type = CITY, \n outside = dummy_outside,\n pre_num = prenum + j,\n post_num = postnum + j,\n categories = categories,\n title = utility.format_title(titles[prenum + j]),\n response_list = legends[prenum + j],\n correct_index = answers[prenum + j],\n num_columns = int(number_of_columns[prenum + j])\n )\n graphing.make_graph(data)\n \n if parity == 1 or j==num-1:\n plt.tight_layout()\n pdf.savefig()\n \n plt.close('all')\n pdf.close()", "sub_path": "src/2017 all by city.py", "file_name": "2017 all by city.py", "file_ext": "py", "file_size_in_byte": 2530, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "data_frame.Data", "line_number": 19, "usage_type": "call"}, {"api_name": "paths.get_input_path", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "utility.map_answer", "line_number": 33, "usage_type": "call"}, {"api_name": "itertools.repeat", "line_number": 45, "usage_type": "call"}, {"api_name": "pdf_name_generator.generate_pdf_name", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_pdf.PdfPages", "line_number": 49, "usage_type": "call"}, {"api_name": "paths.get_output_path", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figtext", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "graphing.Graph_Data", "line_number": 61, "usage_type": "call"}, {"api_name": "utility.format_title", "line_number": 68, "usage_type": "call"}, {"api_name": "graphing.make_graph", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "170734539", "text": "from django.urls import include, path\nfrom commonuser.views import (CommonUserSignupView, CommonUserProfileView,\n CommonUserListView, CommonUserBanView,\n CommonUserUnBanView,\n CommonUserDeleteView, BannedCommonUserListView,\n MesssageSendingView, EmailSendingView,\n CommonUserUpdateView, UserConfirmView, TwoMinWaitView,\n CommonUserDashboardView,)\n\napp_name ='commonuser'\nurlpatterns = [\n path('signup/', CommonUserSignupView, name='signup'),\n path('profile//',CommonUserProfileView,\n name = 'profile'),\n path('list/', CommonUserListView.as_view(), name='list'),\n path('confirm/', UserConfirmView, name='confirmation'),\n path('two-min-wait/', TwoMinWaitView, name='twominwait'),\n path('bannedlist/', BannedCommonUserListView.as_view(), name='bannedlist'),\n path('/dashboard/', CommonUserDashboardView, name='dashboard'),\n path('ban//', CommonUserBanView, name='ban'),\n path('unban//', CommonUserUnBanView, name='unban'),\n path('delete//', CommonUserDeleteView, name='delete'),\n path('sendsms//',MesssageSendingView, name='sendsms'),\n path('sendemail//',EmailSendingView, name='sendemail'),\n path('update//', CommonUserUpdateView, name='update'),\n\n]\n", "sub_path": "commonuser/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1457, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserSignupView", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserProfileView", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserListView.as_view", "line_number": 15, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserListView", "line_number": 15, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "commonuser.views.UserConfirmView", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "commonuser.views.TwoMinWaitView", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "commonuser.views.BannedCommonUserListView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "commonuser.views.BannedCommonUserListView", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserDashboardView", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserBanView", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserUnBanView", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserDeleteView", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "commonuser.views.MesssageSendingView", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "commonuser.views.EmailSendingView", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "commonuser.views.CommonUserUpdateView", "line_number": 25, "usage_type": "argument"}]} +{"seq_id": "258329323", "text": "import os\nfrom setuptools import setup, find_packages\n\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(name='book-python-wrappers',\n version='0.0.0',\n install_requires=requirements,\n description='ZohoBooks Python Client Library',\n long_description=read('README.md'),\n url='https://github.com/zoho/books-python-wrappers',\n download_url='',\n author='SahayaRamesh',\n license='MIT',\n packages=find_packages(),\n include_package_data=True,\n )\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 617, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "setuptools.setup", "line_number": 11, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "282994075", "text": "import logging\n\nimport numpy as np\nimport pandas as pd\nimport uproot\n\nimport strax\nfrom straxen.common import get_resource\nfrom straxen import get_to_pe\nimport wfsim\nfrom immutabledict import immutabledict\n\nexport, __all__ = strax.exporter()\n__all__ += ['instruction_dtype', 'truth_extra_dtype']\n\n\n#recoil refers to 1:ER, 2=NR, 3=Alpha\ninstruction_dtype = [(('Waveform simulator event number.', 'event_number'), np.int32),\n (('Quanta type (S1 photons or S2 electrons)', 'type'), np.int8),\n (('Time of the interaction [ns]', 'time'), np.int64),\n (('End time of the interaction [ns]', 'endtime'), np.int64),\n (('X position of the cluster[cm]', 'x'), np.float32),\n (('Y position of the cluster[cm]', 'y'), np.float32),\n (('Z position of the cluster[cm]', 'z'), np.float32),\n (('Number of quanta', 'amp'), np.int32),\n (('Recoil type of interaction.', 'recoil'), np.int8),\n (('Energy deposit of interaction', 'e_dep'), np.float32),\n (('Eventid like in geant4 output rootfile', 'g4id'), np.int32),\n (('Volume id giving the detector subvolume', 'vol_id'), np.int32)\n ]\n\ntruth_extra_dtype = [\n ('n_electron', np.float),\n ('n_photon', np.float), ('n_photon_bottom', np.float),\n ('t_first_photon', np.float), ('t_last_photon', np.float), \n ('t_mean_photon', np.float), ('t_sigma_photon', np.float), \n ('t_first_electron', np.float), ('t_last_electron', np.float), \n ('t_mean_electron', np.float), ('t_sigma_electron', np.float)]\n\nlog = logging.getLogger('SimulationCore')\n\ndef rand_instructions(c):\n n = c['nevents'] = c['event_rate'] * c['chunk_size'] * c['nchunk']\n c['total_time'] = c['chunk_size'] * c['nchunk']\n\n instructions = np.zeros(2 * n, dtype=instruction_dtype)\n uniform_times = c['total_time'] * (np.arange(n) + 0.5) / n\n instructions['time'] = np.repeat(uniform_times, 2) * int(1e9)\n instructions['event_number'] = np.digitize(instructions['time'],\n 1e9 * np.arange(c['nchunk']) * c['chunk_size']) - 1\n instructions['type'] = np.tile([1, 2], n)\n instructions['recoil'] = [7 for i in range(n * 2)] #Use nest ids for ER\n\n r = np.sqrt(np.random.uniform(0, c['tpc_radius']**2, n))\n t = np.random.uniform(-np.pi, np.pi, n)\n instructions['x'] = np.repeat(r * np.cos(t), 2)\n instructions['y'] = np.repeat(r * np.sin(t), 2)\n instructions['z'] = np.repeat(np.random.uniform(-c['tpc_length'], 0, n), 2)\n\n nphotons = np.random.uniform(2000, 2050, n)\n nelectrons = 10 ** (np.random.uniform(3, 4, n))\n instructions['amp'] = np.vstack([nphotons, nelectrons]).T.flatten().astype(int)\n\n return instructions\n\ndef read_optical(c):\n file = c['fax_file']\n data = uproot.open(file)\n try:\n e = data.get('events')\n except:\n raise Exception(\"Are you using mc version >4?\")\n\n event_id = e['eventid'].array(library=\"np\")\n n_events = len(event_id)\n # lets separate the events in time by a constant time difference\n time = np.arange(1, n_events+1)\n\n if c['neutron_veto']:\n nV_pmt_id_offset = 2000\n channels = [[channel - nV_pmt_id_offset for channel in array if channel >=2000] for array in e[\"pmthitID\"].array(library=\"np\")]\n timings = e[\"pmthitTime\"].array(library=\"np\")*1e9\n else:\n # TPC\n channels = e[\"pmthitID\"].array(library=\"np\")\n timings = e[\"pmthitTime\"].array(library=\"np\")*1e9\n\n # Events should be in the TPC\n ins = np.zeros(n_events, dtype=instruction_dtype)\n ins['x'] = e[\"xp_pri\"].array(library=\"np\").flatten() / 10.\n ins['y'] = e[\"yp_pri\"].array(library=\"np\").flatten() / 10.\n ins['z'] = e[\"zp_pri\"].array(library=\"np\").flatten() / 10.\n ins['time']= 1e7 * time.flatten()\n ins['event_number'] = np.arange(n_events)\n ins['g4id'] = event_id\n ins['type'] = np.repeat(1, n_events)\n ins['recoil'] = np.repeat(1, n_events)\n ins['amp'] = [len(t) for t in timings]\n\n # cut interactions without electrons or photons\n ins = ins[ins[\"amp\"] > 0]\n\n return ins, channels, timings\n\ndef instruction_from_csv(filename):\n \"\"\"\n Return wfsim instructions from a csv\n \n :param filename: Path to csv file\n \"\"\"\n df = pd.read_csv(filename)\n \n recs = np.zeros(len(df),\n dtype=instruction_dtype\n )\n for column in df.columns:\n recs[column]=df[column]\n \n expected_dtype = np.dtype(instruction_dtype)\n assert recs.dtype == expected_dtype, \\\n f\"CSV {filename} produced wrong dtype. Got {recs.dtype}, expected {expected_dtype}.\"\n return recs\n\n\n@export\nclass ChunkRawRecords(object):\n def __init__(self, config):\n self.config = config\n self.rawdata = wfsim.RawData(self.config)\n self.record_buffer = np.zeros(5000000,\n dtype=strax.raw_record_dtype(samples_per_record=strax.DEFAULT_RECORD_LENGTH)) # 2*250 ms buffer\n self.truth_buffer = np.zeros(10000, dtype=instruction_dtype + truth_extra_dtype + [('fill', bool)])\n\n self.blevel = buffer_filled_level = 0\n\n def __call__(self, instructions, **kwargs):\n samples_per_record = strax.DEFAULT_RECORD_LENGTH\n dt = self.config['sample_duration']\n buffer_length = len(self.record_buffer)\n rext = int(self.config['right_raw_extension'])\n cksz = int(self.config['chunk_size'] * 1e9)\n\n # Save the constants as privates\n self.blevel = buffer_filled_level = 0\n self.chunk_time_pre = np.min(instructions['time']) - rext\n self.chunk_time = self.chunk_time_pre + cksz # Starting chunk\n self.current_digitized_right = self.last_digitized_right = 0\n for channel, left, right, data in self.rawdata(instructions=instructions,\n truth_buffer=self.truth_buffer,\n **kwargs):\n pulse_length = right - left + 1\n records_needed = int(np.ceil(pulse_length / samples_per_record))\n\n if self.rawdata.left * dt > self.chunk_time:\n self.chunk_time = self.last_digitized_right * dt\n yield from self.final_results()\n self.chunk_time_pre = self.chunk_time\n self.chunk_time += cksz\n\n if self.blevel + records_needed > buffer_length:\n log.warning('Chunck size too large, insufficient record buffer')\n yield from self.final_results()\n\n if self.blevel + records_needed > buffer_length:\n log.warning('Pulse length too large, insufficient record buffer, skipping pulse')\n continue\n\n # WARNING baseline and area fields are zeros before finish_results\n s = slice(self.blevel, self.blevel + records_needed)\n self.record_buffer[s]['channel'] = channel\n self.record_buffer[s]['dt'] = dt\n self.record_buffer[s]['time'] = dt * (left + samples_per_record * np.arange(records_needed))\n self.record_buffer[s]['length'] = [min(pulse_length, samples_per_record * (i+1)) \n - samples_per_record * i for i in range(records_needed)]\n self.record_buffer[s]['pulse_length'] = pulse_length\n self.record_buffer[s]['record_i'] = np.arange(records_needed)\n self.record_buffer[s]['data'] = np.pad(data, \n (0, records_needed * samples_per_record - pulse_length), 'constant').reshape((-1, samples_per_record))\n self.blevel += records_needed\n if self.rawdata.right != self.current_digitized_right:\n self.last_digitized_right = self.current_digitized_right\n self.current_digitized_right = self.rawdata.right\n\n self.last_digitized_right = self.current_digitized_right\n yield from self.final_results()\n\n def final_results(self):\n records = self.record_buffer[:self.blevel] # No copying the records from buffer\n maska = records['time'] <= self.last_digitized_right * self.config['sample_duration']\n records = records[maska]\n\n records = strax.sort_by_time(records) # Do NOT remove this line\n\n # Yield an appropriate amount of stuff from the truth buffer\n # and mark it as available for writing again\n\n maskb = (\n self.truth_buffer['fill'] &\n # This condition will always be false if self.truth_buffer['t_first_photon'] == np.nan\n ((self.truth_buffer['t_first_photon']\n <= self.last_digitized_right * self.config['sample_duration']) |\n # Hence, we need to use this trick to also save these cases (this\n # is what we set the end time to for np.nans)\n (np.isnan(self.truth_buffer['t_first_photon']) &\n (self.truth_buffer['time']\n <= self.last_digitized_right * self.config['sample_duration'])\n )))\n truth = self.truth_buffer[maskb] # This is a copy, not a view!\n\n # Careful here: [maskb]['fill'] = ... does not work\n # numpy creates a copy of the array on the first index.\n # The assignment then goes to the (unused) copy.\n # ['fill'][maskb] leads to a view first, then the advanced\n # assignment works into the original array as expected.\n self.truth_buffer['fill'][maskb] = False\n\n truth.sort(order='time')\n # Return truth without 'fill' field\n _truth = np.zeros(len(truth), dtype=instruction_dtype + truth_extra_dtype)\n for name in _truth.dtype.names:\n _truth[name] = truth[name]\n _truth['time'][~np.isnan(_truth['t_first_photon'])] = \\\n _truth['t_first_photon'][~np.isnan(_truth['t_first_photon'])].astype(int)\n _truth.sort(order='time')\n\n #Oke this will be a bit ugly but it's easy\n if self.config['detector']=='XENON1T':\n yield dict(raw_records=records,\n truth=_truth)\n if self.config['neutron_veto']:\n yield dict(raw_records_nv=records[records['channel'] < self.config['channel_map']['he'][0]],\n truth=_truth)\n elif self.config['detector']=='XENONnT':\n yield dict(raw_records=records[records['channel'] < self.config['channel_map']['he'][0]],\n raw_records_he=records[(records['channel'] >= self.config['channel_map']['he'][0]) &\n (records['channel'] <= self.config['channel_map']['he'][-1])],\n raw_records_aqmon=records[records['channel']==800],\n truth=_truth)\n\n\n self.record_buffer[:np.sum(~maska)] = self.record_buffer[:self.blevel][~maska]\n self.blevel = np.sum(~maska)\n\n def source_finished(self):\n return self.rawdata.source_finished\n\n\n@export\nclass ChunkRawRecordsOptical(ChunkRawRecords):\n def __init__(self, config):\n self.config = config\n self.rawdata = wfsim.RawDataOptical(self.config)\n self.record_buffer = np.zeros(5000000, dtype=strax.raw_record_dtype()) # 2*250 ms buffer\n self.truth_buffer = np.zeros(10000, dtype=instruction_dtype + truth_extra_dtype + [('fill', bool)])\n\n\n@strax.takes_config(\n strax.Option('optical',default=False, track=True,\n help=\"Flag for using optical mc for instructions\"),\n strax.Option('seed',default=False, track=True,\n help=\"Option for setting the seed of the random number generator used for\"\n \"generation of the instructions\"),\n strax.Option('fax_file', default=None, track=False,\n help=\"Directory with fax instructions\"),\n strax.Option('fax_config_override', default=None,\n help=\"Dictionary with configuration option overrides\"),\n strax.Option('event_rate', default=5, track=False,\n help=\"Average number of events per second\"),\n strax.Option('chunk_size', default=100, track=False,\n help=\"Duration of each chunk in seconds\"),\n strax.Option('nchunk', default=1, track=False,\n help=\"Number of chunks to simulate\"),\n strax.Option('right_raw_extension', default=50000),\n strax.Option('timeout', default=1800,\n help=\"Terminate processing if any one mailbox receives \"\n \"no result for more than this many seconds\"),\n strax.Option('fax_config',\n default='https://raw.githubusercontent.com/XENONnT/private_nt_aux_files/master/sim_files/fax_config_nt.json?token=AHCU5AZMPZABYSGVRLDACR3ABAZUA'),\n strax.Option('gain_model',\n default=('to_pe_per_run', 'https://github.com/XENONnT/private_nt_aux_files/blob/master/sim_files/to_pe_nt.npy?raw=true'),\n help='PMT gain model. Specify as (model_type, model_config).'),\n strax.Option('detector', default='XENONnT', track=True),\n strax.Option('channel_map', track=False, type=immutabledict,\n help=\"immutabledict mapping subdetector to (min, max) \"\n \"channel number. Provided by context\"),\n strax.Option('n_tpc_pmts', track=False,\n help=\"Number of pmts in tpc. Provided by context\"),\n strax.Option('n_top_pmts', track=False,\n help=\"Number of pmts in top array. Provided by context\"),\n strax.Option('neutron_veto', default=False, track=True,\n help=\"Flag for nVeto optical simulation instead of TPC\"),\n)\nclass FaxSimulatorPlugin(strax.Plugin):\n depends_on = tuple()\n\n # Cannot arbitrarily rechunk records inside events\n rechunk_on_save = False\n\n # Simulator uses iteration semantics, so the plugin has a state\n # TODO: this seems avoidable...\n parallel = False\n\n # TODO: this state is needed for sorting checks,\n # but it prevents prevent parallelization\n last_chunk_time = -999999999999999\n\n # A very very long input timeout, our simulator takes time\n input_timeout = 3600 # as an hour\n\n def setup(self):\n c = self.config\n c.update(get_resource(c['fax_config'], fmt='json'))\n # Update gains to the nT defaults\n self.to_pe = get_to_pe(self.run_id, c['gain_model'],\n c['channel_map']['tpc'][1]+1)\n c['gains'] = 1 / self.to_pe * (1e-8 * 2.25 / 2**14) / (1.6e-19 * 10 * 50)\n c['gains'][self.to_pe==0] = 0\n if c['seed'] != False:\n np.random.seed(c['seed'])\n\n overrides = self.config['fax_config_override']\n if overrides is not None:\n c.update(overrides)\n\n #We hash the config to load resources. Channel map is immutable and cannot be hashed\n self.config['channel_map'] = dict(self.config['channel_map'])\n self.config['channel_map']['sum_signal']=800\n self.config['channels_bottom'] = np.arange(self.config['n_top_pmts'],self.config['n_tpc_pmts'])\n \n self.get_instructions()\n self.check_instructions()\n self._setup()\n \n def _setup(self):\n #Set in inheriting class\n pass\n\n def get_instructions(self):\n #Set in inheriting class\n pass\n\n def check_instructions(self):\n #Set in inheriting class\n pass\n\n def _sort_check(self, result):\n if len(result) == 0: return\n if result['time'][0] < self.last_chunk_time + 1000:\n raise RuntimeError(\n \"Simulator returned chunks with insufficient spacing. \"\n f\"Last chunk's max time was {self.last_chunk_time}, \"\n f\"this chunk's first time is {result['time'][0]}.\")\n if np.diff(result['time']).min() < 0:\n raise RuntimeError(\"Simulator returned non-sorted records!\")\n self.last_chunk_time = result['time'].max()\n\n def is_ready(self, chunk_i):\n \"\"\"Overwritten to mimic online input plugin.\n Returns False to check source finished;\n Returns True to get next chunk.\n \"\"\"\n if 'ready' not in self.__dict__: self.ready = False\n self.ready ^= True # Flip\n return self.ready\n\n def source_finished(self):\n \"\"\"Return whether all instructions has been used.\"\"\"\n return self.sim.source_finished()\n\n\n@export\nclass RawRecordsFromFaxNT(FaxSimulatorPlugin):\n provides = ('raw_records', 'raw_records_he', 'raw_records_aqmon', 'truth')\n data_kind = immutabledict(zip(provides, provides))\n\n def _setup(self):\n self.sim = ChunkRawRecords(self.config)\n self.sim_iter = self.sim(self.instructions)\n\n def get_instructions(self):\n if self.config['fax_file']:\n assert self.config['fax_file'][-5:] != '.root', 'None optical g4 input is deprecated use EPIX instead'\n self.instructions = instruction_from_csv(self.config['fax_file'])\n self.config['nevents'] = np.max(self.instructions['event_number'])\n\n else:\n self.instructions = rand_instructions(self.config)\n\n def check_instructions(self):\n # Let below cathode S1 instructions pass but remove S2 instructions\n m = (self.instructions['z'] < -self.config['tpc_length']) & (self.instructions['type'] == 2)\n self.instructions = self.instructions[~m]\n\n assert np.all(self.instructions['x']**2 + self.instructions['y']**2 < self.config['tpc_radius']**2), \\\n \"Interation is outside the TPC\"\n assert np.all(self.instructions['z'] < 0.25), \\\n \"Interation is outside the TPC\"\n assert np.all(self.instructions['amp'] > 0), \\\n \"Interaction has zero size\"\n\n\n def infer_dtype(self):\n dtype = {data_type:strax.raw_record_dtype(samples_per_record=strax.DEFAULT_RECORD_LENGTH) \n for data_type in self.provides if data_type is not 'truth'}\n dtype['truth']=instruction_dtype + truth_extra_dtype\n return dtype\n\n\n def compute(self):\n try:\n result = next(self.sim_iter)\n except StopIteration:\n raise RuntimeError(\"Bug in chunk count computation\")\n self._sort_check(result[self.provides[0]])#To accomodate nveto raw records, should be the first in provide.\n\n return {data_type:self.chunk(\n start=self.sim.chunk_time_pre,\n end=self.sim.chunk_time,\n data=result[data_type],\n data_type=data_type) for data_type in self.provides}\n\n\n@export\nclass RawRecordsFromFaxEpix(RawRecordsFromFaxNT):\n depends_on = 'epix_instructions'\n\n def _setup(self):\n self.sim = ChunkRawRecords(self.config)\n \n def compute(self,wfsim_instructions):\n self.sim_iter = self.sim(wfsim_instructions)\n\n try:\n result = next(self.sim_iter)\n except StopIteration:\n raise RuntimeError(\"Bug in chunk count computation\")\n self._sort_check(result['raw_records'])\n\n return {data_type:result[data_type] for data_type in self.provides}\n\n def get_instructions(self):\n pass\n\n def check_instructions(self):\n pass\n\n def is_ready(self,chuck_i):\n \"\"\"Overwritten to mimic online input plugin.\n Returns False to check source finished;\n Returns True to get next chunk.\n \"\"\"\n return True\n\n\n@export\nclass RawRecordsFromFax1T(RawRecordsFromFaxNT):\n provides = ('raw_records', 'truth')\n\n\n@export\nclass RawRecordsFromFaxOptical(RawRecordsFromFaxNT):\n def _setup(self):\n self.sim = ChunkRawRecordsOptical(self.config)\n self.sim_iter = self.sim(instructions=self.instructions, \n channels=self.channels, \n timings=self.timings)\n\n def get_instructions(self):\n self.instructions, self.channels, self.timings = read_optical(self.config)\n self.config['nevents']=len(self.instructions['event_number'])\n\n\n@export\nclass RawRecordsFromFaxnVeto(RawRecordsFromFaxOptical):\n provides = ('raw_records_nv', 'truth')\n data_kind = immutabledict(zip(provides, provides))\n # Why does the data_kind need to be repeated?? So the overriding of the \n # provides doesn't work in the setting of the data__kind?\n\n def compute(self):\n result = super().compute()\n result['raw_records_nv'].data['channel'] += 2000 # nVeto PMT ID offset\n return result\n\n\n def check_instructions(self):\n # Are there some nveto boundries we need to include?\n pass\n", "sub_path": "wfsim/strax_interface.py", "file_name": "strax_interface.py", "file_ext": "py", "file_size_in_byte": 20412, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "strax.exporter", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 24, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 26, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 29, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 34, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 38, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.digitize", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 54, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.repeat", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 58, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 62, "usage_type": "call"}, {"api_name": "uproot.open", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 97, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 119, "usage_type": "call"}, {"api_name": "wfsim.RawData", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 130, "usage_type": "call"}, {"api_name": "strax.raw_record_dtype", "line_number": 131, "usage_type": "call"}, {"api_name": "strax.DEFAULT_RECORD_LENGTH", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 132, "usage_type": "call"}, {"api_name": "strax.DEFAULT_RECORD_LENGTH", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 177, "usage_type": "call"}, {"api_name": "strax.sort_by_time", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 222, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 242, "usage_type": "call"}, {"api_name": "wfsim.RawDataOptical", "line_number": 252, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 253, "usage_type": "call"}, {"api_name": "strax.raw_record_dtype", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 254, "usage_type": "call"}, {"api_name": "strax.Plugin", "line_number": 293, "usage_type": "attribute"}, {"api_name": "straxen.common.get_resource", "line_number": 312, "usage_type": "call"}, {"api_name": "straxen.get_to_pe", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 319, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 319, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 328, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 353, "usage_type": "call"}, {"api_name": "strax.takes_config", "line_number": 257, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 258, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 260, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 263, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 265, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 267, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 269, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 271, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 273, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 274, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 277, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 279, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 282, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 283, "usage_type": "call"}, {"api_name": "immutabledict.immutabledict", "line_number": 283, "usage_type": "name"}, {"api_name": "strax.Option", "line_number": 286, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 288, "usage_type": "call"}, {"api_name": "strax.Option", "line_number": 290, "usage_type": "call"}, {"api_name": "immutabledict.immutabledict", "line_number": 374, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 394, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 396, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 398, "usage_type": "call"}, {"api_name": "strax.raw_record_dtype", "line_number": 403, "usage_type": "call"}, {"api_name": "strax.DEFAULT_RECORD_LENGTH", "line_number": 403, "usage_type": "attribute"}, {"api_name": "immutabledict.immutabledict", "line_number": 476, "usage_type": "call"}]} +{"seq_id": "443254756", "text": "#!/usr/bin/env python3\n\n# This file is used purely to debug logic within Visual Studio Code. \n# It should not be used in any customer facing workshops\nimport os, sys\nsys.path.append('resources/')\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nfrom multiprocessing import Process\nfrom pymongo import MongoClient\nimport datetime\nfrom settings import DB_NAME, CLUSTER_URL, GROUP_1_PROCESSES, GROUP_2_PROCESSES, GROUP_3_PROCESSES\nfrom faker import Faker\n\nAUTH_COL = \"authorisations\"\n\n# Global variables used in Main and Run functions\ntotalNumberOfProcesses = 0\nprocessesList = []\ntotalDocs = 0\n\ndef run():\n client = MongoClient(CLUSTER_URL)\n db = client[DB_NAME]\n coll = db[AUTH_COL]\n faker = Faker()\n\n # Generate a random date to select from the collection\n search_date = faker.date_time_between(start_date='-2y', end_date='-1y')\n\n # find a random document using the date, start and end times \n start_time = datetime.datetime(search_date.year, search_date.month, search_date.day, 4, 0, 0)\n end_time = datetime.datetime(search_date.year, search_date.month, search_date.day, 10, 0, 0)\n\n result = coll.find_one({\n 'posting_date': { \"$gte\": start_time , \"$lt\": end_time } \n })\n print(result)\n\nif __name__ == '__main__':\n run()", "sub_path": "base_data_loading/test_queries.py", "file_name": "test_queries.py", "file_ext": "py", "file_size_in_byte": 1389, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.path.append", "line_number": 6, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 24, "usage_type": "call"}, {"api_name": "settings.CLUSTER_URL", "line_number": 24, "usage_type": "argument"}, {"api_name": "settings.DB_NAME", "line_number": 25, "usage_type": "name"}, {"api_name": "faker.Faker", "line_number": 27, "usage_type": "call"}, {"api_name": "faker.date_time_between", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "183523013", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/transmart/__init__.py\n# Compiled at: 2019-08-27 08:41:54\n# Size of source mod 2**32: 1464 bytes\n\"\"\"\n* Copyright (c) 2015-2017 The Hyve B.V.\n* This code is licensed under the GNU General Public License,\n* version 3.\n\"\"\"\nfrom itertools import chain\nimport logging\nlogger = logging.getLogger('tm-api')\nminimal = ('requests', 'click')\nbackend = ('pandas', 'google.protobuf', 'arrow')\nfull = ('whoosh', 'ipywidgets', 'IPython', 'bqplot')\nmissing_dependencies = set()\nfor dependency in chain(minimal, backend, full):\n try:\n __import__(dependency)\n except ImportError as e:\n try:\n missing_dependencies.add(dependency)\n finally:\n e = None\n del e\n\nif missing_dependencies:\n msg = 'Missing dependencies: {}'.format(', '.join(missing_dependencies))\n logger.warning(msg)\nelse:\n _hard = missing_dependencies.intersection(minimal)\n if _hard:\n raise ImportError('Missing required dependencies {}'.format(_hard))\n else:\n if missing_dependencies.intersection(backend):\n logger.warning('Running in minimal dependency mode. Only administrative calls are available.')\n dependency_mode = 'MINIMAL'\n else:\n if missing_dependencies.intersection(full):\n logger.warning('No Javascript dependencies found. Running in headless mode.')\n dependency_mode = 'BACKEND'\n else:\n dependency_mode = 'FULL'\ndel minimal\ndel backend\ndel full\ndel _hard\ndel missing_dependencies\ndel chain\ndel dependency\ndel logging\nfrom .main import get_api\n__version__ = '0.2.6'\n__author__ = 'The Hyve'", "sub_path": "pycfiles/transmart-0.2.6-py3.7/__init__.cpython-37.py", "file_name": "__init__.cpython-37.py", "file_ext": "py", "file_size_in_byte": 1818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 20, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "293678390", "text": "\"\"\"\nSimpleBot\n\"\"\"\nimport hlt\nfrom collections import OrderedDict\nimport logging\n\ngame = hlt.Game(\"SimpleBot\")\n\nlogging.getLogger().setLevel(logging.INFO)\nlogging.info(\"SimpleBot Starting!\")\n\nwhile True:\n\n game_map = game.update_map()\n command_queue = []\n\n for ship in game_map.get_me().all_ships():\n\n move_made = False\n\n # If the ship is docked\n if ship.docking_status != ship.DockingStatus.UNDOCKED:\n continue\n\n nearby_entities = OrderedDict(sorted(game_map.nearby_entities_by_distance(ship).items()))\n\n for distance, entities in nearby_entities.items():\n\n if move_made:\n break\n\n for entity in entities:\n\n if move_made:\n break\n\n if isinstance(entity, hlt.entity.Planet):\n\n # If the planet is owned\n if entity.is_owned()\\\n and ((entity.owner.id == game_map.my_id\n and len(entity.all_docked_ships()) >= min(4, entity.num_docking_spots))\n or entity.owner.id != game_map.my_id):\n continue\n\n if ship.can_dock(entity):\n # We add the command by appending it to the command_queue\n command_queue.append(ship.dock(entity))\n move_made = True\n else:\n nav_command = ship.navigate(ship.closest_point_to(entity), game_map,\n max_corrections=18,\n angular_step=5,\n speed=hlt.constants.MAX_SPEED,\n ignore_ships=False)\n if nav_command:\n command_queue.append(nav_command)\n move_made = True\n\n if move_made:\n continue\n\n # 2nd pass\n # Attack ships since no planets\n for distance, entities in nearby_entities.items():\n\n if move_made:\n break\n\n for entity in entities:\n\n if move_made:\n break\n\n if isinstance(entity, hlt.entity.Ship):\n\n # attack other ships\n if entity.owner and entity.owner.id == game_map.my_id:\n continue\n\n nav_command = ship.navigate(ship.closest_point_to(entity), game_map,\n max_corrections=18,\n angular_step=5,\n speed=hlt.constants.MAX_SPEED,\n ignore_ships=False)\n if nav_command:\n command_queue.append(nav_command)\n move_made = True\n\n elif isinstance(entity, hlt.entity.Planet):\n\n # attack other planets\n if entity.owner and entity.owner.id == game_map.my_id:\n continue\n\n nav_command = ship.navigate(ship.closest_point_to(entity), game_map,\n max_corrections=18,\n angular_step=5,\n speed=hlt.constants.MAX_SPEED,\n ignore_ships=False)\n if nav_command:\n command_queue.append(nav_command)\n move_made = True\n\n # Send our set of commands to the Halite engine for this turn\n game.send_command_queue(command_queue)", "sub_path": "bots/SimpleBot.py", "file_name": "SimpleBot.py", "file_ext": "py", "file_size_in_byte": 3818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "hlt.Game", "line_number": 8, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 10, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 11, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 26, "usage_type": "call"}, {"api_name": "hlt.entity", "line_number": 38, "usage_type": "attribute"}, {"api_name": "hlt.constants", "line_number": 55, "usage_type": "attribute"}, {"api_name": "hlt.entity", "line_number": 76, "usage_type": "attribute"}, {"api_name": "hlt.constants", "line_number": 85, "usage_type": "attribute"}, {"api_name": "hlt.entity", "line_number": 91, "usage_type": "attribute"}, {"api_name": "hlt.constants", "line_number": 100, "usage_type": "attribute"}]} +{"seq_id": "186703151", "text": "\"\"\"\nTests of Custom User Model\n\"\"\"\n\n# coding: utf-8\n\nfrom __future__ import unicode_literals, absolute_import\n\nfrom django.test import TestCase\nfrom django.db import IntegrityError\n\nfrom apps_data.course.models.course import CourseOwner\nfrom apps_data.courseevent.models.courseevent import CourseEventParticipation\nfrom ..models import User\nfrom .factories import UserFactory, CourseFactory, CourseEventFactory\n\n\nclass MentorsProfileTest(TestCase):\n \"\"\"\n Test the Custom User Model\n \"\"\"\n def setUp(self):\n \"\"\"\n 3 Users and 3 Courses are created\n \"\"\"\n self.user1 = UserFactory(email='test1@gmail.com',\n username='username1',\n first_name='vorname1',\n last_name='nachname1')\n self.user2 = UserFactory(email='test2@gmail.com',\n username='username2',\n first_name='vorname2',\n last_name='nachname2')\n self.user3 = UserFactory(email='test3@gmail.com',\n username='username3',\n first_name='vorname3',\n last_name='nachname3')\n self.course1 = CourseFactory(slug='slug1')\n self.course2 = CourseFactory(slug='slug2')\n self.course3 = CourseFactory(slug='slug3')\n self.courseevent1 = CourseEventFactory(slug='slug1',course=self.course1)\n self.courseevent2 = CourseEventFactory(slug='slug2',course=self.course2)\n self.courseevent3 = CourseEventFactory(slug='slug3',course=self.course1)\n self.ownership1 = CourseOwner(course=self.course1, user=self.user1)\n self.ownership1.save()\n self.ownership2 = CourseOwner(course=self.course1, user=self.user2)\n self.ownership2.save()\n self.ownership3 = CourseOwner(course=self.course2, user=self.user2)\n self.ownership3.save()\n self.participation1 = CourseEventParticipation(\n courseevent=self.courseevent1, user=self.user1)\n self.participation1.save()\n self.participation2 = CourseEventParticipation(\n courseevent=self.courseevent3, user=self.user1)\n self.participation2.save()\n self.participation3 = CourseEventParticipation(\n courseevent=self.courseevent2, user=self.user3)\n self.participation3.save()\n self.participation4 = CourseEventParticipation(\n courseevent=self.courseevent3, user=self.user3)\n self.participation4.save()\n\n\n def test_get_short_name(self):\n \"\"\"\n The shortname of a User is his first name: user1 has shortname vorname1\n \"\"\"\n self.assertEquals(self.user1.get_short_name(), 'vorname1')\n\n def test_get_full_name(self):\n \"\"\"\n The fullname of a User is his firstname and his lastname seperated by a\n blank\n \"\"\"\n self.assertEquals(self.user1.get_full_name(), 'vorname1 nachname1')\n\n def test_teaching(self):\n \"\"\"\n What Courses does a user teach?\n user1 teaches course1\n user2 teaches course1 and course2\n user3 teaches nothing\n \"\"\"\n self.assertQuerysetEqual(self.user1.teaching(),\n [repr(self.course1)],\n ordered=False)\n self.assertQuerysetEqual(self.user2.teaching(),\n [repr(self.course1),\n repr(self.course2)],\n ordered=False)\n self.assertQuerysetEqual(self.user3.teaching(),\n [],\n ordered=False)\n def test_studying(self):\n \"\"\"\n What Courses does a user study?\n user1 studies courseevent1 and courseevent3\n user2 studies nothing\n user3 studies courseevent2 and courseevent3\n \"\"\"\n self.assertQuerysetEqual(self.user1.studying(),\n [repr(self.courseevent1),\n repr(self.courseevent3)],\n ordered=False)\n self.assertQuerysetEqual(self.user2.studying(),\n [],\n ordered=False)\n self.assertQuerysetEqual(self.user3.studying(),\n [repr(self.courseevent2),\n repr(self.courseevent3)],\n ordered=False)\n\n def test_create_user(self):\n \"\"\"\n create a regular user with username, email, first and lastname.\n Check whether the databasecount of users is raised one afterwards.\n :return:\n \"\"\"\n usercount = User.objects.all().count()\n User.objects.create_user(\n username='username4',\n email='test4@gmail.com',\n first_name='vorname4',\n last_name='nachname4',\n password=None)\n usercount_after = usercount + 1\n self.assertEquals(User.objects.all().count(), usercount_after)\n\n def test_create_user_username_required(self):\n \"\"\"\n creating user without username should not be allowed\n \"\"\"\n with self.assertRaises(ValueError):\n User.objects.create_user(\n email='test5@gmail.com',\n username='',\n first_name='vorname5',\n last_name='nachname5',\n password=None)\n\n def test_create_user_email_required(self):\n \"\"\"\n creating user without email is not allowed\n \"\"\"\n with self.assertRaises(ValueError):\n User.objects.create_user(\n username='username5',\n email='',\n first_name='vorname5',\n last_name='nachname5',\n password=None)\n\n def test_no_double_user_email(self):\n with self.assertRaises(IntegrityError):\n User.objects.create_user(\n username='username5',\n email='test1@gmail.com',\n first_name='vorname5',\n last_name='nachname5',\n password=None)\n\n def test_no_double_username(self):\n with self.assertRaises(IntegrityError):\n User.objects.create_user(\n username='username1',\n email='test5@gmail.com',\n first_name='vorname5',\n last_name='nachname5',\n password=None)\n", "sub_path": "apps_accountdata/userprofiles/tests/test_models.py", "file_name": "test_models.py", "file_ext": "py", "file_size_in_byte": 6525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.test.TestCase", "line_number": 18, "usage_type": "name"}, {"api_name": "factories.UserFactory", "line_number": 26, "usage_type": "call"}, {"api_name": "factories.UserFactory", "line_number": 30, "usage_type": "call"}, {"api_name": "factories.UserFactory", "line_number": 34, "usage_type": "call"}, {"api_name": "factories.CourseFactory", "line_number": 38, "usage_type": "call"}, {"api_name": "factories.CourseFactory", "line_number": 39, "usage_type": "call"}, {"api_name": "factories.CourseFactory", "line_number": 40, "usage_type": "call"}, {"api_name": "factories.CourseEventFactory", "line_number": 41, "usage_type": "call"}, {"api_name": "factories.CourseEventFactory", "line_number": 42, "usage_type": "call"}, {"api_name": "factories.CourseEventFactory", "line_number": 43, "usage_type": "call"}, {"api_name": "apps_data.course.models.course.CourseOwner", "line_number": 44, "usage_type": "call"}, {"api_name": "apps_data.course.models.course.CourseOwner", "line_number": 46, "usage_type": "call"}, {"api_name": "apps_data.course.models.course.CourseOwner", "line_number": 48, "usage_type": "call"}, {"api_name": "apps_data.courseevent.models.courseevent.CourseEventParticipation", "line_number": 50, "usage_type": "call"}, {"api_name": "apps_data.courseevent.models.courseevent.CourseEventParticipation", "line_number": 53, "usage_type": "call"}, {"api_name": "apps_data.courseevent.models.courseevent.CourseEventParticipation", "line_number": 56, "usage_type": "call"}, {"api_name": "apps_data.courseevent.models.courseevent.CourseEventParticipation", "line_number": 59, "usage_type": "call"}, {"api_name": "models.User.objects.all", "line_number": 119, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 119, "usage_type": "name"}, {"api_name": "models.User.objects.create_user", "line_number": 120, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 120, "usage_type": "name"}, {"api_name": "models.User.objects.all", "line_number": 127, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 127, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 127, "usage_type": "name"}, {"api_name": "models.User.objects.create_user", "line_number": 134, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 134, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 134, "usage_type": "name"}, {"api_name": "models.User.objects.create_user", "line_number": 146, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 146, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 154, "usage_type": "argument"}, {"api_name": "models.User.objects.create_user", "line_number": 155, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 155, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 163, "usage_type": "argument"}, {"api_name": "models.User.objects.create_user", "line_number": 164, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 164, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "247310396", "text": "from rest_framework import status\nfrom rest_framework import permissions\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import PermissionDenied\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom models import Badge\nfrom models import Profile\nfrom models import LabelSet\nfrom models import BadgeImage\nfrom models import BadgeTemplate\n\nfrom forms import AddLabelSetForm\n\nfrom serializers import BadgeSerializer\nfrom serializers import ProfileSerializer\nfrom serializers import LabelSetSerializer\nfrom serializers import NewProfileSerializer\nfrom serializers import BadgeImageSerializer\nfrom serializers import LabelSearchSerializer\nfrom serializers import BadgeUpdateSerializer\nfrom serializers import BadgeTemplateSerializer\nfrom serializers import NewBadgeImageSerializer\nfrom serializers import ProfileDetailSerializer\n\nSEARCH_PARAM = 's'\nMAX_SEARCH_RESULTS = 20\n\nclass ActiveAndAuthenticatedPermission(permissions.BasePermission):\n\tdef has_permission(self, request, view):\n\t\tif request.user.is_anonymous():\n\t\t\treturn False\n\t\treturn request.user.is_active\n\nclass BadgeTemplateListView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef get(self, request, format=None):\n\t\tsearch_param = request.QUERY_PARAMS.get(SEARCH_PARAM, None)\n\t\tif search_param:\n\t\t\tsearch_tokens = search_param.split(',')\n\t\t\tqueryset = BadgeTemplate.objects.search(search_tokens, 0, MAX_SEARCH_RESULTS)\n\t\telse:\n\t\t\tqueryset = BadgeTemplate.objects.filter()[:MAX_SEARCH_RESULTS]\n\t\tserializer = BadgeTemplateSerializer(queryset, many=True, context={'request':request})\n\t\treturn Response(serializer.data)\n\nclass LabelSetListView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef get(self, request, format=None):\n\t\tsearch_param = request.QUERY_PARAMS.get(SEARCH_PARAM, None)\n\n\t\tif search_param:\n\t\t\tsearch_tokens = search_param.split(',')\n\t\t\tqueryset = LabelSet.objects.search(search_tokens, 0, MAX_SEARCH_RESULTS)\n\t\telse:\n\t\t\tqueryset = LabelSet.objects.filter()[:MAX_SEARCH_RESULTS]\n\n\t\tserializer = LabelSetSerializer(queryset, many=True, context={'request':request})\n\t\treturn Response(serializer.data)\n\n\tdef post(self, request):\n\t\tadd_label_form = AddLabelSetForm(request.data)\n\t\tif add_label_form.is_valid():\n\t\t\tif Profile.objects.get(pk=add_label_form.cleaned_data['profile']).user != request.user:\n\t\t\t\treturn Response({'error-type':'not-owner', 'message': 'You do not own that profile'}, status=status.HTTP_403_FORBIDDEN)\n\t\t\tadd_label_form.save()\n\t\t\treturn Response('OK')\n\t\treturn Response(add_label_form.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass LabelSearchView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef get(self, request, format=None):\n\t\tsearch_param = request.QUERY_PARAMS.get(SEARCH_PARAM, None)\n\n\t\tif search_param:\n\t\t\tsearch_tokens = search_param.split(',')\n\t\t\tlabel_sets = LabelSet.objects.search(search_tokens, 0, MAX_SEARCH_RESULTS)\n\t\t\tbadge_templates = BadgeTemplate.objects.search(search_tokens, 0, MAX_SEARCH_RESULTS)\n\t\telse:\n\t\t\tlabel_sets = LabelSet.objects.all()[:MAX_SEARCH_RESULTS]\n\t\t\tbadge_templates = BadgeTemplate.objects.all()[:MAX_SEARCH_RESULTS]\n\n\t\tserializer = LabelSearchSerializer({\n\t\t\t'label_sets': label_sets,\n\t\t\t'badge_templates': badge_templates\t\n\t\t}, context={'request':request})\n\t\treturn Response(serializer.data)\n\nclass ProfileListView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef get(self, request, format=None):\n\t\tqueryset = Profile.objects.filter(user=request.user)\n\t\tserializer = ProfileSerializer(queryset, many=True, context={'request':request})\n\t\treturn Response(serializer.data)\n\n\tdef post(self, request, format=None):\n\t\tif request.user.profiles.all().count() >= settings.MAX_PROFILE_COUNT:\n\t\t\treturn Response({'error-type':'too-many-profiles', 'message': 'Too many profiles'}, status=status.HTTP_403_FORBIDDEN)\n\n\t\trequest.data['user'] = request.user.id\n\t\tserializer = NewProfileSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tprofile = serializer.save()\n\t\t\tprofile.update_positions()\n\t\t\treturn Response(ProfileSerializer(profile, context={'request':request}).data, status=status.HTTP_201_CREATED)\n\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ProfileView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef check_object_permissions(self, request, obj):\n\t\tif request.user != obj.user:\n\t\t\traise PermissionDenied\n\n\tdef get_object(self, id):\n\t\tobj = get_object_or_404(Profile, id=id)\n\t\tself.check_object_permissions(self.request, obj)\n\t\treturn obj\n\n\tdef get(self, request, id, format=None):\n\t\tprofile = self.get_object(id)\n\t\treturn Response(ProfileDetailSerializer(profile, context={'request':request}).data)\n\n\tdef delete(self, request, id, format=None):\n\t\tprofile = self.get_object(id)\n\t\tprofile.delete()\n\t\treturn Response(ProfileDetailSerializer(profile, context={'request':request}).data)\n\nclass BadgeView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef check_object_permissions(self, request, obj):\n\t\tif request.user != obj.profile.user:\n\t\t\traise PermissionDenied\n\n\tdef get_object(self, id):\n\t\tobj = get_object_or_404(Badge, id=id)\n\t\tself.check_object_permissions(self.request, obj)\n\t\treturn obj\n\n\tdef get(self, request, id, format=None):\n\t\tobj = self.get_object(id)\n\t\treturn Response(BadgeSerializer(obj).data)\n\n\tdef put(self, request, id, format=None):\n\t\tbadge = self.get_object(id)\n\t\tself.check_object_permissions(request, badge)\n\t\tserializer = BadgeUpdateSerializer(badge, data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tbadge = serializer.save()\n\t\t\tbadge.update_positions()\n\t\t\treturn Response(BadgeSerializer(badge, context={'request':request}).data, status=status.HTTP_201_CREATED)\n\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\tdef delete(self, request, id, format=None):\n\t\tbadge = self.get_object(id)\n\t\tself.check_object_permissions(request, badge)\n\t\tbadge.delete()\n\t\treturn Response('Badge deleted', status=status.HTTP_200_OK)\n\nclass BadgeListView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef check_object_permissions(self, request, obj):\n\t\tif request.user != obj.user:\n\t\t\traise PermissionDenied\n\n\tdef get_object(self, id):\n\t\tobj = get_object_or_404(Profile, id=id)\n\t\tself.check_object_permissions(self.request, obj)\n\t\treturn obj\n\n\tdef get(self, request, id, format=None):\n\t\tprofile = self.get_object(id)\n\t\tserializer = BadgeSerializer(profile.all_badges.all(), many=True, context={'request':request})\n\t\treturn Response(serializer.data)\n\n\tdef post(self, request, id, format=None):\n\t\tserializer = BadgeUpdateSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tprofile = self.get_object(serializer.data['profile'])\n\t\t\tself.check_object_permissions(request, profile)\n\t\t\tbadge = serializer.save()\n\t\t\tbadge.update_positions()\n\t\t\treturn Response(BadgeSerializer(badge, context={'request':request}).data, status=status.HTTP_201_CREATED)\n\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass BadgeImageListView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef post(self, request, format=None):\n\t\trequest.data['source_user'] = request.user.id\n\t\tserializer = NewBadgeImageSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tbadge_image = serializer.save()\n\t\t\treturn Response(BadgeImageSerializer(badge_image, context={'request':request}).data, status=status.HTTP_201_CREATED)\n\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass BadgeImageView(APIView):\n\tpermission_classes = (ActiveAndAuthenticatedPermission,)\n\n\tdef get_object(self, id):\n\t\tobj = get_object_or_404(BadgeImage, id=id)\n\t\treturn obj\n\n\tdef get(self, request, id, format=None):\n\t\tobj = self.get_object(id)\n\t\treturn Response(BadgeImageSerializer(obj).data)\n\n\tdef put(self, request, id, format=None):\n\t\tbadge_image = self.get_object(id)\n\t\tif request.user != badge_image.source_user:\n\t\t\traise PermissionDenied\n\n\t\trequest.data['source_user'] = badge_image.source_user.id\n\t\tserializer = NewBadgeImageSerializer(badge_image, data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tbadge_image = serializer.save()\n\t\t\treturn Response(BadgeImageSerializer(badge_image, context={'request':request}).data, status=status.HTTP_201_CREATED)\n\n\t\treturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n", "sub_path": "profiles/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 8446, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rest_framework.permissions.BasePermission", "line_number": 32, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 32, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 38, "usage_type": "name"}, {"api_name": "models.BadgeTemplate.objects.search", "line_number": 45, "usage_type": "call"}, {"api_name": "models.BadgeTemplate.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "models.BadgeTemplate", "line_number": 45, "usage_type": "name"}, {"api_name": "models.BadgeTemplate.objects.filter", "line_number": 47, "usage_type": "call"}, {"api_name": "models.BadgeTemplate.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.BadgeTemplate", "line_number": 47, "usage_type": "name"}, {"api_name": "serializers.BadgeTemplateSerializer", "line_number": 48, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 49, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 51, "usage_type": "name"}, {"api_name": "models.LabelSet.objects.search", "line_number": 59, "usage_type": "call"}, {"api_name": "models.LabelSet.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.LabelSet", "line_number": 59, "usage_type": "name"}, {"api_name": "models.LabelSet.objects.filter", "line_number": 61, "usage_type": "call"}, {"api_name": "models.LabelSet.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.LabelSet", "line_number": 61, "usage_type": "name"}, {"api_name": "serializers.LabelSetSerializer", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 64, "usage_type": "call"}, {"api_name": "forms.AddLabelSetForm", "line_number": 67, "usage_type": "call"}, {"api_name": "models.Profile.objects.get", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 69, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 70, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 72, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 73, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 73, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 73, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 75, "usage_type": "name"}, {"api_name": "models.LabelSet.objects.search", "line_number": 83, "usage_type": "call"}, {"api_name": "models.LabelSet.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.LabelSet", "line_number": 83, "usage_type": "name"}, {"api_name": "models.BadgeTemplate.objects.search", "line_number": 84, "usage_type": "call"}, {"api_name": "models.BadgeTemplate.objects", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.BadgeTemplate", "line_number": 84, "usage_type": "name"}, {"api_name": "models.LabelSet.objects.all", "line_number": 86, "usage_type": "call"}, {"api_name": "models.LabelSet.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "models.LabelSet", "line_number": 86, "usage_type": "name"}, {"api_name": "models.BadgeTemplate.objects.all", "line_number": 87, "usage_type": "call"}, {"api_name": "models.BadgeTemplate.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.BadgeTemplate", "line_number": 87, "usage_type": "name"}, {"api_name": "serializers.LabelSearchSerializer", "line_number": 89, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 93, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 95, "usage_type": "name"}, {"api_name": "models.Profile.objects.filter", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Profile.objects", "line_number": 99, "usage_type": "attribute"}, {"api_name": "models.Profile", "line_number": 99, "usage_type": "name"}, {"api_name": "serializers.ProfileSerializer", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 101, "usage_type": "call"}, {"api_name": "django.conf.settings.MAX_PROFILE_COUNT", "line_number": 104, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 104, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 105, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 105, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 105, "usage_type": "name"}, {"api_name": "serializers.NewProfileSerializer", "line_number": 108, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 112, "usage_type": "call"}, {"api_name": "serializers.ProfileSerializer", "line_number": 112, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 112, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 112, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 114, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 114, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 114, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 117, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 122, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 125, "usage_type": "call"}, {"api_name": "models.Profile", "line_number": 125, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 131, "usage_type": "call"}, {"api_name": "serializers.ProfileDetailSerializer", "line_number": 131, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 136, "usage_type": "call"}, {"api_name": "serializers.ProfileDetailSerializer", "line_number": 136, "usage_type": "call"}, {"api_name": "rest_framework.views.APIView", "line_number": 138, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 143, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Badge", "line_number": 146, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 152, "usage_type": "call"}, {"api_name": "serializers.BadgeSerializer", "line_number": 152, "usage_type": "call"}, {"api_name": "serializers.BadgeUpdateSerializer", "line_number": 157, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 161, "usage_type": "call"}, {"api_name": "serializers.BadgeSerializer", "line_number": 161, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 161, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 161, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 163, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 163, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 163, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 169, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 169, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 169, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 171, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 176, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 179, "usage_type": "call"}, {"api_name": "models.Profile", "line_number": 179, "usage_type": "argument"}, {"api_name": "serializers.BadgeSerializer", "line_number": 185, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 186, "usage_type": "call"}, {"api_name": "serializers.BadgeUpdateSerializer", "line_number": 189, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 195, "usage_type": "call"}, {"api_name": "serializers.BadgeSerializer", "line_number": 195, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 195, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 195, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 197, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 197, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 197, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 199, "usage_type": "name"}, {"api_name": "serializers.NewBadgeImageSerializer", "line_number": 204, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 207, "usage_type": "call"}, {"api_name": "serializers.BadgeImageSerializer", "line_number": 207, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 207, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 207, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 209, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 209, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 209, "usage_type": "name"}, {"api_name": "rest_framework.views.APIView", "line_number": 211, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 215, "usage_type": "call"}, {"api_name": "models.BadgeImage", "line_number": 215, "usage_type": "argument"}, {"api_name": "rest_framework.response.Response", "line_number": 220, "usage_type": "call"}, {"api_name": "serializers.BadgeImageSerializer", "line_number": 220, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.PermissionDenied", "line_number": 225, "usage_type": "name"}, {"api_name": "serializers.NewBadgeImageSerializer", "line_number": 228, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 231, "usage_type": "call"}, {"api_name": "serializers.BadgeImageSerializer", "line_number": 231, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 231, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 231, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 233, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 233, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 233, "usage_type": "name"}]} +{"seq_id": "77144232", "text": "import tensorflow as tf\nimport numpy as np\nimport argparse\n\nimport data_loader_recsys as data_loader\nimport utils\nimport shutil\nimport time\nimport eval\nimport math\n\n\ndef main(data_path):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--batch_size', type=int, default=320,\n help='Learning Rate')\n parser.add_argument('--top_k', type=int, default=5,\n help='Sample from top k predictions')\n parser.add_argument('--max_epochs', type=int, default=1000,\n help='Max Epochs')\n parser.add_argument('--text_dir', type=str, default=data_path,\n # parser.add_argument('--text_dir', type=str, default='Data/Session/user-filter-20000items-session5.csv',\n help='Directory containing text files')\n parser.add_argument('--seed', type=str,\n default='f78c95a8-9256-4757-9a9f-213df5c6854e,1151b040-8022-4965-96d2-8a4605ce456c,4277434f-e3c2-41ae-9ce3-23fd157f9347,fb51d2c4-cc69-4128-92f5-77ec38d66859,4e78efc4-e545-47af-9617-05ff816d86e2',\n help='Seed for text generation')\n parser.add_argument('--sample_percentage', type=float, default=0.5,\n help='sample_percentage from whole data, e.g.0.2= 80% training 20% testing')\n\n args = parser.parse_args()\n\n dl = data_loader.Data_Loader({'model_type': 'generator', 'dir_name': args.text_dir})\n all_samples = dl.item\n # items = dl.item_dict\n\n\n # Randomly shuffle data\n np.random.seed(10)\n shuffle_indices = np.random.permutation(np.arange(len(all_samples)))\n text_samples = all_samples[shuffle_indices]\n\n # Split train/test set\n # TODO: This is very crude, should use cross-validation\n dev_sample_index = -1 * int(args.sample_percentage * float(len(text_samples)))\n x_train, x_dev = text_samples[:dev_sample_index], text_samples[dev_sample_index:]\n\n item_dic = {}\n for bi in x_train:\n for item in bi:\n if item in item_dic:\n item_dic[item] = item_dic.get(item) + 1\n else:\n item_dic[item] = 1\n sorted_names_5 = sorted(item_dic.items(), key=lambda v: v[1], reverse=True)[:args.top_k] # top_k=5\n print(\"sorted_names_5\", sorted_names_5)\n toplist_5 = [tuple[0] for tuple in sorted_names_5] # the same order with sorted_names\n\n sorted_names_20 = sorted(item_dic.items(), key=lambda v: v[1], reverse=True)[:(args.top_k + 15)] # top_k=5\n print(\"sorted_names_20\", sorted_names_20)\n toplist_20 = [tuple[0] for tuple in sorted_names_20] # the same order with sorted_names\n\n # predictmap=[tuple for tuple in sorted_names]\n predictmap_5 = dict(sorted_names_5)\n predictmap_20 = dict(sorted_names_20)\n\n batch_no_test = 0\n batch_size_test = args.batch_size * 1\n curr_preds_5 = []\n rec_preds_5 = [] # 1\n ndcg_preds_5 = [] # 1\n curr_preds_20 = []\n rec_preds_20 = [] # 1\n ndcg_preds_20 = [] # 1\n maxmrr5, maxmrr20, maxhit5, maxhit20, maxndcg5, maxndcg20 = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n\n while (batch_no_test + 1) * batch_size_test < x_dev.shape[0]:\n # if (batch_no_test > 100):\n # break\n test_batch = x_dev[batch_no_test * batch_size_test: (batch_no_test + 1) * batch_size_test, :]\n for bi in range(batch_size_test):\n # predictmap = sorted_names\n true_word = test_batch[bi][-1]\n rank_5 = predictmap_5.get(true_word)\n rank_20 = predictmap_20.get(true_word)\n if rank_5 == None:\n curr_preds_5.append(0.0)\n rec_preds_5.append(0.0) # 2\n ndcg_preds_5.append(0.0) # 2\n else:\n rank_5 = toplist_5.index(true_word)\n MRR_5 = 1.0 / (rank_5 + 1)\n Rec_5 = 1.0 # 3\n ndcg_5 = 1.0 / math.log(rank_5 + 2, 2) # 3\n curr_preds_5.append(MRR_5)\n rec_preds_5.append(Rec_5) # 4\n ndcg_preds_5.append(ndcg_5) # 4\n if rank_20 == None:\n curr_preds_20.append(0.0)\n rec_preds_20.append(0.0) # 2\n ndcg_preds_20.append(0.0) # 2\n else:\n rank_20 = toplist_20.index(true_word)\n MRR_20 = 1.0 / (rank_20 + 1)\n Rec_20 = 1.0 # 3\n ndcg_20 = 1.0 / math.log(rank_20 + 2, 2) # 3\n curr_preds_20.append(MRR_20)\n rec_preds_20.append(Rec_20) # 4\n ndcg_preds_20.append(ndcg_20) # 4\n\n thismrr5 = np.mean(curr_preds_5)\n thismrr20 = np.mean(curr_preds_20)\n thishit5 = np.mean(rec_preds_5)\n thishit20 = np.mean(rec_preds_20)\n thisndcg5 = np.mean(ndcg_preds_5)\n thisndcg20 = np.mean(ndcg_preds_20)\n\n # print(\"\\t\\t\\t\\t\\t\\t\\tthis_hit_5=%.4f this_mrr_5=%.4f this_ndcg_5=%.4f\" %\n # (thishit5, thismrr5, thisndcg5)) # 5\n # print(\"\\t\\t\\t\\t\\t\\t\\tthis_hit_20=%.4f this_mrr_20=%.4f this_ndcg_20=%.4f\\n\" %\n # (thishit20, thismrr20, thisndcg20)) # 20\n\n if thisndcg5 > maxndcg5 or thisndcg20 > maxndcg20:\n maxndcg5 = thisndcg5\n maxmrr5 = thismrr5\n maxhit5 = thishit5\n maxndcg20 = thisndcg20\n maxmrr20 = thismrr20\n maxhit20 = thishit20\n\n print(\"\\t\\t\\t\\t\\t\\t\\tmax_hit_5=%.4f max_mrr_5=%.4f max_ndcg_5=%.4f\" %\n (maxhit5, maxmrr5, maxndcg5)) # 5\n print(\"\\t\\t\\t\\t\\t\\t\\tmax_hit_20=%.4f max_mrr_20=%.4f max_ndcg_20=%.4f\\n\" %\n (maxhit20, maxmrr20, maxndcg20)) # 20\n\n batch_no_test += 1\n\n\nif __name__ == '__main__':\n # paths = ['movie5', 'movie10', 'movie20', 'musicl_5', 'musicl_10', 'musicl_20', 'musicl_50', 'musicl_100']\n # for path in paths:\n # datapath = 'Data/Session/'+path+'.csv'\n # print(datapath+\"\\n\\n\")\n # main(datapath)\n main('Data/Session/user-filter-20000items-session5.csv')\n print(\"all done\")\n", "sub_path": "MostPop.py", "file_name": "MostPop.py", "file_ext": "py", "file_size_in_byte": 6018, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "data_loader_recsys.Data_Loader", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 39, "usage_type": "attribute"}, {"api_name": "numpy.random.permutation", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 40, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 40, "usage_type": "call"}, {"api_name": "math.log", "line_number": 94, "usage_type": "call"}, {"api_name": "math.log", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "43082742", "text": "\"\"\"\n This spider is a JobsInAustralia spider created on top of the ATSSpider\n scrapy crawl jobsinaustralia -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://jobs.com.au/search/?s=777&sarea=1\"\n\n sample job url:\n http://jobs.com.au/mobile-plant-operator-limestone-9593868.html?s=777\n\"\"\"\n\nfrom re import compile\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, NormalizedJoin, Strip\n\nRef_Num = compile(r\"(\\d+)\")\nDATE_REF = compile(r'\\s([^\\S]+)$')\n\n\nclass JobsInAustralia(ATSSpider):\n\n name = \"jobsinaustralia\"\n\n def parse(self, response):\n '''\n Iterating over a list of jobs present.\n '''\n selector = Selector(response)\n job_count = selector.xpath('//p[@class=\"cN-indicatorResults\"]/em/text()').extract()\n if not self.expected_job_count_set and job_count:\n self.expected_job_count = int(job_count[0])\n\n jobs = selector.xpath('//ul[@class=\"cT-searchResults\"]/li[@class=\"cT-listingJob\"]/div[@class=\"listing-item\"]/h3/a')\n for job in jobs:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'title': job.xpath('./text()').extract()\n },\n url=urljoin(response.url, job.xpath('./@href').extract()[0])\n )\n\n next_page_url = selector.xpath('//ul[@class=\"cN-pagination cfix\"]//ul/li/a[contains(text(), \"Next\")]/@href').extract()\n if next_page_url:\n yield Request(\n callback=self.parse,\n url=urljoin(response.url, next_page_url[0])\n )\n\n def parse_job(self, response):\n '''\n Gathering required information related to each and every job.\n '''\n selector = Selector(response)\n loader = BrightcorpItemLoader(selector=selector)\n\n loader.add_xpath('date',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Date Posted\")]/following-sibling::dd[1]//text()',\n re=DATE_REF)\n\n loader.add_xpath('apply_url',\n '//span[@class=\"button-medium-wrap\"]//a[contains(text(), \"Apply Now\")]/@href')\n if not loader.get_output_value('apply_url'):\n loader.add_value('apply_url', response.url)\n\n loader.add_xpath('baseSalary',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Salary\")]/following-sibling::dd/span/text()')\n\n loader.add_xpath('company',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Ad Placed By\")]/following-sibling::dd[1]//text()')\n\n loader.add_xpath('description', [\n '//div[@class=\"job-details\"]//div[@class=\"adContainer\"]/div[@class=\"adContent\"][not(self::h1) or (self::div[@class=\"adFooter\"])]',\n '//tr/td/div[2]//text()',\n '//div[@class=\"templatetext\"]',\n '//tr/td[@valign=\"top\"]/blockquote',\n '//tr/td[@valign=\"top\"]//tr',\n '//tr/td/div[@class=\"jobdetailsgenbg\"]',\n '//div[@class=\"detail-container\"]/div[@class=\"detail-content\"]',\n '//tr/td/div[contains(@style, \"padding\")]//text()',\n ])\n\n norm_join = NormalizedJoin(', ')\n\n loader.add_xpath('jobcategory',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Subsector\")]/following-sibling::dd[@class=\"sector-dd\"]//text()',\n Strip(',: '), norm_join)\n\n loader.add_xpath('jobtype',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Work Type\")]/following-sibling::dd[1]//text()',\n norm_join)\n\n loader.add_xpath('location',\n '//dl[@class=\"cT-reviewDetails\"]/dt[contains(text(), \"Location\")]/following-sibling::dd[1]//text()',\n Strip(', '), norm_join)\n\n loader.add_value('referencenumber', response.url, Prefix('%s-' % self.name), re=Ref_Num)\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n", "sub_path": "brightcorp/brightcorp/spiders/jobsinaustralia.py", "file_name": "jobsinaustralia.py", "file_ext": "py", "file_size_in_byte": 4190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 20, "usage_type": "call"}, {"api_name": "brightcorp.base.atsspiders.ATSSpider", "line_number": 23, "usage_type": "name"}, {"api_name": "scrapy.selector.Selector", "line_number": 31, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 38, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 43, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 48, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 50, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 57, "usage_type": "call"}, {"api_name": "brightcorp.items.BrightcorpItemLoader", "line_number": 58, "usage_type": "call"}, {"api_name": "brightcorp.processors.NormalizedJoin", "line_number": 86, "usage_type": "call"}, {"api_name": "brightcorp.processors.Strip", "line_number": 90, "usage_type": "call"}, {"api_name": "brightcorp.processors.Strip", "line_number": 98, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "476646686", "text": "#!/usr/bin/env python\n# -*- coding:utf8 -*-\n# e-mail: gotochenglong@gmail.com\n# two jump http proxy server\n# browser <--> this proxy server <--> another proxy server <--> web server\n\nimport os\nPARENT_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nimport sys\nsys.path.insert(0, PARENT_DIR)\n\nimport logging\nimport logging.config\nimport conf.proxy_conf as proxy_conf\nimport module.data_store as data_store\n\nimport socket\nimport thread\nimport urlparse\nimport select\n\nclass Proxy(object):\n def __init__(self, client_conn, proxy_conn):\n self.BUFSIZE=4096\n self.client_conn = client_conn\n self.proxy_conn = proxy_conn\n self.run()\n \n def run(self):\n \"\"\" start to exchange info \"\"\"\n inputs = [self.client_conn, self.proxy_conn]\n loop = True\n while loop:\n readable,_,errs=select.select(inputs,[], inputs)\n if errs:\n break\n for soc in readable:\n data=soc.recv(self.BUFSIZE)\n if data and soc is self.client_conn:\n self.proxy_conn.send(data)\n elif data and soc is self.proxy_conn:\n self.client_conn.send(data)\n else:\n loop = False\n self.client_conn.close()\n self.proxy_conn.close()\n\nclass Server(object):\n def __init__(self, conf, handler=Proxy):\n self.logger = logging.getLogger('proxy_svr')\n self.host = conf[\"host\"]\n self.port = conf[\"port\"]\n self.proxy_data = data_store.getStore('proxy_reader')\n self.handler = handler\n\n def start(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.host, self.port))\n self.server.listen(5)\n self.logger.info(\"proxy server bind: %s:%d\", self.host, self.port)\n while True:\n try:\n client_conn, client_addr = self.server.accept()\n self.logger.info(\"proxy request from %s\" % client_addr[0])\n proxy_addr = self.proxy_data.get('baidu')[-1][1].split(':')\n proxy_conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n proxy_addr = (proxy_addr[0], int(proxy_addr[1]))\n proxy_conn.connect(proxy_addr)\n thread.start_new_thread(self.handler, (client_conn, proxy_conn))\n except:\n pass\n\nif __name__ == '__main__':\n logging.config.dictConfig(proxy_conf.LOGGING)\n data_store.dictConfig(proxy_conf.DATA_STORE)\n server = Server(conf=proxy_conf.PROXY_SERVER)\n server.start()", "sub_path": "proxy/proxy_server.py", "file_name": "proxy_server.py", "file_ext": "py", "file_size_in_byte": 2695, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "select.select", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 50, "usage_type": "call"}, {"api_name": "conf.proxy_conf", "line_number": 51, "usage_type": "name"}, {"api_name": "conf.proxy_conf", "line_number": 52, "usage_type": "name"}, {"api_name": "module.data_store.getStore", "line_number": 53, "usage_type": "call"}, {"api_name": "module.data_store", "line_number": 53, "usage_type": "name"}, {"api_name": "socket.socket", "line_number": 57, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 57, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 57, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 58, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 58, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 67, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 67, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 67, "usage_type": "attribute"}, {"api_name": "thread.start_new_thread", "line_number": 70, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 75, "usage_type": "attribute"}, {"api_name": "conf.proxy_conf.LOGGING", "line_number": 75, "usage_type": "attribute"}, {"api_name": "conf.proxy_conf", "line_number": 75, "usage_type": "name"}, {"api_name": "module.data_store.dictConfig", "line_number": 76, "usage_type": "call"}, {"api_name": "module.data_store", "line_number": 76, "usage_type": "name"}, {"api_name": "conf.proxy_conf.DATA_STORE", "line_number": 76, "usage_type": "attribute"}, {"api_name": "conf.proxy_conf", "line_number": 76, "usage_type": "name"}, {"api_name": "conf.proxy_conf.PROXY_SERVER", "line_number": 77, "usage_type": "attribute"}, {"api_name": "conf.proxy_conf", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "280947073", "text": "# Client for controlling the Robot via server\n\nimport pygame as pg\nimport joystick as joy\nimport zmq\nfrom threading import Thread\n#import threading\n\n#connected = threading.Condition()\n\ndef interrupts(socket, conn):\n\twhile conn < 10:\n\t\ttry:\n\t\t\tif socket.recv_string() == \"i\":\n\t\t\t\tprint(\"Got i\")\n\t\t\t\tconn = conn + 1\n\t#\t\tif socket.recv_string() == \"HereIAm!\":\n\t#\t\t\tconnected.acquire()\n\t#\t\t\tconnected.notify()\n\t#\t\t\tconnected.release()\n\t#\t\t\tconn = True\n\t\texcept KeyboardInterrupt:\n\t\t\tbreak\n\n\nif __name__ == \"__main__\":\n\n\tpg.init()\n\n\tclock = pg.time.Clock()\n\n\t# Init Gamepad\n\tgamepad, buttons = joy.initializeJoystick()\n\tprint(buttons)\n\n\t# Init connection\n\tconnected = 0\n\tcontext = zmq.Context()\n\tsocket = context.socket(zmq.PAIR)\n\tsocket.connect(\"tcp://127.0.0.1:5555\")\n\n\tlisten_thread = Thread(target=interrupts, args=(socket, connected,))\n\tlisten_thread.start()\n\n\t#connected.acquire()\n\t#connected.wait()\n\n\n\trunning = True\n\n\tscreen = pg.display.set_mode([40,40])\n\n\n\twhile running:\n\t\tbutton = \"\"\n\t\tsomethingPressed = -1\n\t\tfor event in pg.event.get():\n\t\t\tif event.type == pg.QUIT:\n\t\t\t\trunning = False\n\t\t\tif event.type == pg.KEYDOWN:\n\t\t\t\tif event.key == pg.K_ESCAPE:\n\t\t\t\t\trunning = False\n\t\tsomethingPressed = joy.printPressedButton(gamepad,buttons)\n\t\tif somethingPressed == 15:\n\t\t\trunning = False\n\n\t\tif (somethingPressed > -1):\n\t\t\tif somethingPressed in buttons.keys():\n\t\t\t\tsocket.send_string(\"Pressed: \" + buttons[somethingPressed])\n\t\t\telse:\n\t\t\t\tsocket.send_string(\"Unknown key pressed!\")\n\n\t\tprint(connected)\n\t\tpg.display.flip()\n\t\tclock.tick(1)\n\n", "sub_path": "COM/client_connected.py", "file_name": "client_connected.py", "file_ext": "py", "file_size_in_byte": 1540, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.init", "line_number": 28, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 30, "usage_type": "attribute"}, {"api_name": "joystick.initializeJoystick", "line_number": 33, "usage_type": "call"}, {"api_name": "zmq.Context", "line_number": 38, "usage_type": "call"}, {"api_name": "zmq.PAIR", "line_number": 39, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 42, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 51, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 61, "usage_type": "attribute"}, {"api_name": "joystick.printPressedButton", "line_number": 63, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 74, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "233708395", "text": "from Bio import SeqIO\nimport matplotlib.pyplot as plt\n\n\nrecord = SeqIO.read() #read fasta-file\n\n# https://doi.org/10.1016/0022-2836(82)90515-0 values above 1.6 indicate membranespanning sequences (using a window of about 20 aminoacids)\nKyte_Doolittle = {\n \"A\": 1.8, \"C\": 2.5, \"D\": -3.5, \"E\": -3.5, \"F\": 2.8, \n \"G\": -0.4, \"H\": -3.2, \"I\": 4.5, \"K\": -3.9, \"L\": 3.8,\n \"M\": 1.9, \"N\": -3.5, \"P\": -1.6, \"Q\": -3.5, \"R\": -4.5,\n \"S\": -0.8, \"T\": -0.7, \"V\": 4.2, \"W\": -0.9, \"Y\": -1.3\n }\n\nlength = len(record.seq)\nx = []\ny = []\n\nfor idx, aa in enumerate(record.seq):\n s = 0.0\n if idx < length-19:\n x.append(idx)\n for i in record.seq[idx:idx+20]:\n s = s + Kyte_Doolittle[i]\n y.append(s)\n\n elif idx == length-19:\n x.append(idx)\n for i in record.seq[idx:idx+20]:\n s = s + Kyte_Doolittle[i]\n y.append(s)\n break\n\nplt.figure()\nax = plt.axes()\nplt.rcParams[\"font.family\"] = \"Arial\"\nplt.plot(x,y)\nplt.plot([0,length],[1.6, 1.6], \"r\")\nax.set(xlim=(0, length-19))\nplt.ylabel(r\"$\\sum_{i=0}^{20}\\ hydropathy\\ of\\ AA_i$\", fontsize=16, math_fontfamily='stixsans')\nplt.xlabel(r\"$AA_i$\", fontsize=16, math_fontfamily='stixsans')\nplt.show()", "sub_path": "hydropathy_plot.py", "file_name": "hydropathy_plot.py", "file_ext": "py", "file_size_in_byte": 1265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "Bio.SeqIO.read", "line_number": 5, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 5, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "101254751", "text": "import random\nimport logging\nfrom urllib.request import urlopen\nimport requests\nfrom PIL import Image\nfrom imagehash import phash, hex_to_hash\nfrom peewee import DoesNotExist\nfrom models import Annonce\nfrom trello_module import TrelloModule\n\n\nclass Search:\n HASH_SIMILAR_TRESHOLD = 8\n\n def __init__(self, parameters, proxies=[]):\n self.proxies = proxies\n self.parameters = parameters\n self.header = \"\"\n\n def request(self, method, url, params=None, data=None, raiseException=True):\n proxy_index = 0\n\n # change proxy in case of connection error\n while True:\n proxy_dict = None\n if self.proxies:\n proxy_dict = {'http': self.proxies[proxy_index], 'https': self.proxies[proxy_index]}\n try:\n response = requests.request(method,\n url,\n params=params,\n data=data,\n headers=self.header,\n proxies=proxy_dict,\n timeout=5)\n # got 200 code\n if response.ok:\n return response\n # got HTTP error code (40X,50X...)\n if self.proxies:\n raise Exception()\n break\n\n except Exception as e:\n if self.proxies:\n logging.info(\"Error connecting to API with proxy : \" + self.proxies[proxy_index])\n logging.debug(\"Error : \" + e.__str__())\n proxy_index = self.__next_proxy_index(proxy_index)\n else:\n break\n if raiseException:\n raise ConnectionError(\"Cannot connect to API\")\n\n def save(self, uid, site, created, title, city, link, price, surface,\n description=None, telephone=None, rooms=None, bedrooms=None, picture=None):\n is_duplicate = False\n similar_ad = None\n\n # ad already exists ?\n try:\n Annonce.get_by_id(uid)\n return False\n except DoesNotExist:\n pass\n\n # ad exists as similar ad ?\n if picture is not None:\n for pic in picture:\n similar_ad = self.__find_similar_ad_from_pic(pic)\n if similar_ad:\n logging.info(\n \"(\" + site + \") ad for \" + title + \" already exists : \" +\n link + \" = \" + similar_ad.link\n )\n is_duplicate = True\n if similar_ad.posted2trello:\n TrelloModule().add_new_link(similar_ad, link)\n break\n else:\n # the similar ad is not yet on trello, will process and save this similar ad the next launch\n return False\n\n annonce = Annonce.create(\n id=uid,\n site=site,\n created=created,\n title=title,\n description=description,\n telephone=telephone,\n price=price,\n surface=surface,\n rooms=rooms,\n bedrooms=bedrooms,\n city=city,\n link=link,\n picture=picture,\n picturehash=phash(Image.open(urlopen(picture[0]))) if (picture is not None and len(picture) > 0) else None,\n posted2trello=is_duplicate,\n isduplicate=is_duplicate,\n trelloid=similar_ad.idtrello if similar_ad else None\n )\n\n logging.info(\"(\" + site + \") new ad saved : \" + title + (\"(duplicate)\" if is_duplicate else \"\"))\n annonce.save()\n return True\n\n def __next_proxy_index(self, proxy_index):\n self.proxies.pop(proxy_index)\n if len(self.proxies) == 0:\n return -1\n return random.randint(0, len(self.proxies) - 1)\n\n def __find_similar_ad_from_pic(self, picture):\n new_hash = phash(Image.open(urlopen(picture)))\n hashes = [ad.picturehash for ad in Annonce.select()]\n for old_hash in hashes:\n if old_hash is not None and hex_to_hash(old_hash) - new_hash < self.HASH_SIMILAR_TRESHOLD:\n return Annonce.get(Annonce.picturehash == old_hash)\n else:\n return False\n\n\n", "sub_path": "scrapping_modules/search.py", "file_name": "search.py", "file_ext": "py", "file_size_in_byte": 4419, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.request", "line_number": 29, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 46, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Annonce.get_by_id", "line_number": 61, "usage_type": "call"}, {"api_name": "models.Annonce", "line_number": 61, "usage_type": "name"}, {"api_name": "peewee.DoesNotExist", "line_number": 63, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 71, "usage_type": "call"}, {"api_name": "trello_module.TrelloModule", "line_number": 77, "usage_type": "call"}, {"api_name": "models.Annonce.create", "line_number": 83, "usage_type": "call"}, {"api_name": "models.Annonce", "line_number": 83, "usage_type": "name"}, {"api_name": "imagehash.phash", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 97, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 97, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 97, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 103, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 111, "usage_type": "call"}, {"api_name": "imagehash.phash", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 114, "usage_type": "name"}, {"api_name": "urllib.request.urlopen", "line_number": 114, "usage_type": "call"}, {"api_name": "models.Annonce.select", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Annonce", "line_number": 115, "usage_type": "name"}, {"api_name": "imagehash.hex_to_hash", "line_number": 117, "usage_type": "call"}, {"api_name": "models.Annonce.get", "line_number": 118, "usage_type": "call"}, {"api_name": "models.Annonce", "line_number": 118, "usage_type": "name"}, {"api_name": "models.Annonce.picturehash", "line_number": 118, "usage_type": "attribute"}]} +{"seq_id": "197828845", "text": "\nfrom controllers.controller import Controller\nfrom events.events_base import (BasicEvents, EventManager, EventType,\n InputEvent, InventorySelectionEvent,\n InventoryTransferEvent)\nfrom models.scenes.inventory_scene import (InventoryScene, SlotHeaderInfo,\n SlotRowInfo)\n\n\nclass InventoryController(Controller):\n \"\"\"Handles user inputs for the inventory scene\"\"\"\n\n def __init__(self, scene: InventoryScene) -> None:\n super().__init__()\n self._scene = scene\n\n def _notify(self, event: EventType) -> None:\n if isinstance(event, InputEvent):\n if event.event_type == BasicEvents.MOUSE_CLICK:\n self._handle_mouse_click(event)\n\n def _handle_mouse_click(self, event: InputEvent) -> None:\n x = event.mouse[0]\n y = event.mouse[1]\n\n # Handle clicks on mod slots\n clicked_obj = self._scene.layout.object_at(x, y)\n # Player clicks on a mod.\n if isinstance(clicked_obj, SlotRowInfo):\n EventManager.post(InventorySelectionEvent(clicked_obj.mod))\n # Player clicks on a slot category -> attempt mod transfer.\n elif isinstance(clicked_obj, SlotHeaderInfo):\n EventManager.post(InventoryTransferEvent(clicked_obj.slot))\n EventManager.post(InventorySelectionEvent(None))\n else:\n EventManager.post(InventorySelectionEvent(None))\n", "sub_path": "src/controllers/inventory_controller.py", "file_name": "inventory_controller.py", "file_ext": "py", "file_size_in_byte": 1476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "controllers.controller.Controller", "line_number": 10, "usage_type": "name"}, {"api_name": "models.scenes.inventory_scene.InventoryScene", "line_number": 13, "usage_type": "name"}, {"api_name": "events.events_base.EventType", "line_number": 17, "usage_type": "name"}, {"api_name": "events.events_base.InputEvent", "line_number": 18, "usage_type": "argument"}, {"api_name": "events.events_base.BasicEvents.MOUSE_CLICK", "line_number": 19, "usage_type": "attribute"}, {"api_name": "events.events_base.BasicEvents", "line_number": 19, "usage_type": "name"}, {"api_name": "events.events_base.InputEvent", "line_number": 22, "usage_type": "name"}, {"api_name": "models.scenes.inventory_scene.SlotRowInfo", "line_number": 29, "usage_type": "argument"}, {"api_name": "events.events_base.EventManager.post", "line_number": 30, "usage_type": "call"}, {"api_name": "events.events_base.EventManager", "line_number": 30, "usage_type": "name"}, {"api_name": "events.events_base.InventorySelectionEvent", "line_number": 30, "usage_type": "call"}, {"api_name": "models.scenes.inventory_scene.SlotHeaderInfo", "line_number": 32, "usage_type": "argument"}, {"api_name": "events.events_base.EventManager.post", "line_number": 33, "usage_type": "call"}, {"api_name": "events.events_base.EventManager", "line_number": 33, "usage_type": "name"}, {"api_name": "events.events_base.InventoryTransferEvent", "line_number": 33, "usage_type": "call"}, {"api_name": "events.events_base.EventManager.post", "line_number": 34, "usage_type": "call"}, {"api_name": "events.events_base.EventManager", "line_number": 34, "usage_type": "name"}, {"api_name": "events.events_base.InventorySelectionEvent", "line_number": 34, "usage_type": "call"}, {"api_name": "events.events_base.EventManager.post", "line_number": 36, "usage_type": "call"}, {"api_name": "events.events_base.EventManager", "line_number": 36, "usage_type": "name"}, {"api_name": "events.events_base.InventorySelectionEvent", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "566292539", "text": "import os\nimport cv2\nimport random\n\n\ndef read_kitti_anno(label_file):\n \"\"\"\n 从Kitti数据集中读取车辆目标数据\n :param label_file:标签文件\n :return:\n \"\"\"\n labels = [line.rstrip().split(' ') for line in open(label_file)]\n rect_list = \"\"\n for label in labels:\n if not (label[0] == 'Car' or label[0] == 'Van' or label[0] == 'Truck'):\n continue\n class_id = 1\n rect_list = \"\".join((rect_list, str(class_id), \" \", str(float(label[4])), \" \",\n str(float(label[5])), \" \", str(float(label[6])), \" \", str(float(label[7])), \" \"))\n\n return rect_list\n\n\ndef get_image_path(filepath, trainpath, valpath):\n image_paths = []\n label_pahts = []\n for line in open(filepath):\n line_ = line.split()\n path0 = os.path.join(\"F:\\Kitti\\KittiBox\", line_[0])\n path1 = os.path.join(\"F:\\Kitti\\KittiBox\", line_[1])\n image_paths.append(path0)\n label_pahts.append(path1)\n\n image = cv2.imread(image_paths[0])\n\n shape = image.shape[0:2]\n\n index1 = 0\n index2 = 0\n with open(trainpath, 'w') as f1:\n with open(valpath, 'w') as f2:\n for (image_path, label_path) in zip(image_paths, label_pahts):\n detection_objects = read_kitti_anno(label_path)\n if detection_objects == \"\":\n continue\n if random.random() < 0.8:\n result_line = \"\".join((str(index1), ' ', image_path, ' ',\n str(shape[0]), ' ', str(shape[1]), ' ', detection_objects))\n index1 += 1\n print(result_line)\n f1.write(result_line)\n f1.write('\\n')\n else:\n result_line = \"\".join((str(index2), ' ', image_path, ' ',\n str(shape[0]), ' ', str(shape[1]), ' ', detection_objects))\n index2 += 1\n print(result_line)\n f2.write(result_line)\n f2.write('\\n')\n\n\nif __name__ == \"__main__\":\n filepath = r\"F:\\Kitti\\KittiBox\\train.txt\"\n trainpath = os.path.join(os.getcwd(), \"../data/my_data\", \"train.txt\")\n valpath = os.path.join(os.getcwd(), \"../data/my_data\", \"val.txt\")\n get_image_path(filepath, trainpath, valpath)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "utils/data_process.py", "file_name": "data_process.py", "file_ext": "py", "file_size_in_byte": 2387, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 34, "usage_type": "call"}, {"api_name": "random.random", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path", "line_number": 64, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 64, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "252276473", "text": "#!/usr/bin/env python3\n\"\"\"\nGiven a collection of intervals, merge all overlapping intervals.\n\nEXAMPLES:\n\n Input: [[1,3], [2,6], [8,10], [15,18]]\n Output: [[1,6], [8,10], [15,18]]\n Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].\n\n Input: [[1,4], [4,5]]\n Output: [[1,5]]\n Explanation: Intervals [1,4] and [4,5] are considered overlapping.\n\nREFERENCE:\n - https://leetcode.com/problems/merge-intervals/ (Medium)\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def merge_v1(self, intervals: List[List[int]]) -> List[List[int]]:\n \"\"\"Sort.\n A simple method will take O(N^2), comparing each element with the other.\n If we sort the intervals first, it will be O(N log(N)) + O(N).\n Here we use the built-in sort method. Alternatively, we may write\n our own sort method (like merge sort) and make some modifications.\n \"\"\"\n ans = []\n if not intervals: # validate input\n return ans\n\n # Get the first element\n it = iter(sorted(intervals)) # will sort on the first value!\n curr = next(it) \n\n # Iterate through the rest\n for x in it:\n # Compare two consecutive intervals.\n if x[0] <= curr[1]: \n if x[1] > curr[1]:\n curr[1] = x[1]\n else:\n ans.append(curr)\n curr = x\n ans.append(curr)\n return ans\n\n def merge_v2(self, intervals: List[List[int]]) -> List[List[int]]:\n \"\"\"Simple O(N^2) method.\"\"\"\n visited = set()\n ans = []\n for i, x in enumerate(intervals):\n if i in visited:\n continue\n visited.add(i)\n for j in range(i+1, len(intervals)):\n if j in visited:\n continue\n y = intervals[j]\n\n # Check if x and y overlap\n if y[0] > x[1] or y[1] < x[0]:\n continue\n else:\n x[0] = min(x[0], y[0])\n x[1] = max(x[1], y[1])\n visited.add(j)\n ans.append(x)\n return ans\n\n\ndef main():\n test_data = [\n [[1, 3], [8, 10], [2, 6], [15, 18], [2, 4], [9, 11]],\n [[1, 4], [4, 5]],\n [[1, 4], [2, 3]],\n [[2, 3]],\n [],\n ]\n\n sol = Solution()\n for intervals in test_data:\n print(\"# Input : {}\".format(intervals))\n print(\" - Output v1: {}\".format(sol.merge_v1(intervals)))\n print(\" - Output v2: {}\".format(sol.merge_v2(intervals)))\n print()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "python3/sorting_and_search/merge_intervals.py", "file_name": "merge_intervals.py", "file_ext": "py", "file_size_in_byte": 2640, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "435619771", "text": "#script for downloading wykop.pl avatars\nimport os\nimport sys\nimport shutil\nimport requests\nimport bs4 as bs\nimport lxml\nimport urllib.parse\n\ndef script_path():\n '''change current path to script one'''\n path = os.path.realpath(os.path.dirname(sys.argv[0]))\n os.chdir(path) #it seems to be quite important\n return path\n \ndef get_avatar(nick):\n base_url = \"https://www.wykop.pl/ludzie/\"\n nick_url = urllib.parse.urljoin(base_url, nick)\n content, status = get_content(nick_url)\n if status == 404:\n return \"\"\n soup = bs.BeautifulSoup(content, 'lxml')\n hrefs = soup.find_all('img', {'title': nick}) #this is useful\n if hrefs:\n avatar_url = hrefs[0]['src']\n else:\n avatar_url = \"\"\n return avatar_url\n\ndef make_dir(new_dir):\n path = os.path.realpath(os.path.dirname(sys.argv[0]))\n os.chdir(path)\n if not os.path.exists(new_dir):\n os.makedirs(new_dir)\n new_path = os.path.join(path, new_dir)\n return new_path\n \ndef save_avatars(nick_list):\n new_path = make_dir('avatars') #create dir if not exists\n for nick in nick_list:\n avatar_url = get_avatar(nick)\n file_path = os.path.join(new_path, nick + \".jpg\")\n if download_image(avatar_url, file_path):\n print(\"avatar saved as: {}\".format(file_path))\n else:\n print(\"failed to download from: '{}' nick: '{}'\".format(avatar_url, nick))\n return True\n\ndef read_file(file_name, rmnl=False):\n try:\n with open(file_name, \"r\") as file:\n if rmnl:\n fileContent = file.read().splitlines()\n else:\n fileContent = file.readlines()\n except:\n fileContent = []\n return fileContent\n \ndef get_content(url):\n res = requests.get(url)\n content = res.text\n status = res.status_code\n return content, status\n\ndef convert_nick_str_to_list(nick_str):\n nick_str = nick_str.replace(\"@\", \" \")\n nick_str = nick_str.replace(\",\", \" \")\n nick_list = [item.strip() for item in nick_str.split() if item]\n to_call = \" \".join([\"@\" + item for item in nick_list])\n return nick_list, to_call \n \ndef download_image(url, file_name):\n '''download image from specified url and save it to specified file_name'''\n try:\n response = requests.get(url, stream=True)\n with open(file_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n return True\n except:\n return False\n\ndef usage():\n print(\"usage:\")\n print(\" from console:\")\n print(\" -run script and put nicknames into console window\")\n print(\" from file:\")\n print(\" -run script with .txt file with nicnames as argument. Example:\")\n print(\" python wykop_avatars.py nicnames.txt\")\n print(\"tip: you can put nicnames separeted with spaces, comas and with '@' in front of. It doesn't matter\")\n print(\"--\"*20)\n print()\n \n \nif __name__ == \"__main__\":\n path = script_path()\n args = sys.argv[1:]\n if not args:\n usage()\n nick_str = input(\"put vikop nicnames here:\\n\")\n else:\n file = args[0]\n try:\n nick_str = \" \".join(read_file(file, rmnl=True))\n except:\n nick_str = \"\"\n nick_list, to_call = convert_nick_str_to_list(nick_str)\n save_avatars(nick_list)\n ", "sub_path": "wykop/wykop_avatars.py", "file_name": "wykop_avatars.py", "file_ext": "py", "file_size_in_byte": 3364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.realpath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urljoin", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 18, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 31, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 61, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 76, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 78, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 97, "usage_type": "attribute"}]} +{"seq_id": "101160706", "text": "\"\"\"\n\n \n \n \n \n \n \n\n\"\"\"\nimport Domoticz\nfrom enum import IntEnum\n\nimport sys\nif not '/usr/lib/python3.7/dist-packages' in sys.path:\n sys.path.append('/usr/lib/python3.7/dist-packages')\n\n\nfrom pysnmp.hlapi import *\n\n\nclass SNMPCommand(IntEnum):\n ON = 1\n OFF = 0\n\nclass SNMPPlugin:\n\n oidPrefix = '1.3.6.1.4.1.38783'\n oidRelay1 = oidPrefix + '.3.2.0'\n oidRelay2 = oidPrefix + '.3.3.0'\n oidRelay3 = oidPrefix + '.3.4.0'\n oidRelay4 = oidPrefix + '.3.5.0'\n oidRelay5 = oidPrefix + '.3.6.0'\n oidRelay6 = oidPrefix + '.3.7.0'\n oidRelay7 = oidPrefix + '.3.8.0'\n oidRelay8 = oidPrefix + '.3.9.0'\n\n oidSaveConfiguration = oidPrefix + '.6.0'\n\n availableRelays = [oidRelay1, oidRelay2, oidRelay3, oidRelay4, oidRelay5, oidRelay6, oidRelay7, oidRelay8]\n relaysCount = len(availableRelays)\n \n deviceType = 17 # Lighting 2 - AC\n switchType = 0 # On/Off Switch\n\n # Plugin configuration\n snmpAddress = ''\n snmpPort = 161\n snmpPublicCommunity = ''\n snmpPrivateCommunity = ''\n\n def __init__(self):\n return\n\n def onStart(self):\n self.snmpAddress = Parameters[\"Address\"]\n self.snmpPort = Parameters[\"Port\"]\n self.snmpPublicCommunity = Parameters[\"Mode1\"]\n self.snmpPrivateCommunity = Parameters[\"Mode2\"]\n\n if (len(Devices) < self.relaysCount):\n Domoticz.Log('Create ' + str(self.relaysCount) + ' devices')\n\n # Range starts by 1 for name and unit (unit cannot be 0)\n for i in range(1, self.relaysCount + 1):\n Domoticz.Device(Name = 'Relay ' + str(i), Unit = i, Type = self.deviceType, Switchtype = self.switchType).Create()\n\n def onCommand(self, Unit, Command, Level, Hue):\n # Secure onCommand call to avoid array index outbound\n if Unit > self.relaysCount:\n return\n\n commandStr = str(Command)\n snmpCommand = SNMPCommand.ON if commandStr == 'On' else SNMPCommand.OFF\n\n resultCommand = self.writeSnmpCommand(self.availableRelays[Unit - 1], snmpCommand)\n resultSave = self.writeSnmpCommand(self.oidSaveConfiguration, SNMPCommand.ON)\n \n if (resultCommand == 0 and resultSave == 0):\n Devices[Unit].Update(nValue = int(snmpCommand), sValue = commandStr)\n\n def onHeartbeat(self):\n for unit in Devices:\n relayStatus = int(self.readSnmpCommand(self.availableRelays[unit - 1]))\n Devices[unit].Update(nValue = relayStatus, sValue = 'On' if relayStatus == 0 else 'Off')\n\n\n def readSnmpCommand(self, Oid):\n errorIndication, errorStatus, errorIndex, varBinds = next(\n getCmd(\n SnmpEngine(),\n CommunityData(self.snmpPublicCommunity),\n UdpTransportTarget((self.snmpAddress, self.snmpPort)),\n ContextData(),\n ObjectType(ObjectIdentity(Oid))\n )\n )\n\n if errorIndication:\n Domoticz.Error(str(errorIndication))\n return '0'\n elif errorStatus:\n Domoticz.Error('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))\n return '0'\n else:\n return varBinds[0][1].prettyPrint()\n\n def writeSnmpCommand(self, Oid, SnmpCommand):\n\n errorIndication, errorStatus, errorIndex, varBinds = next(\n setCmd(\n SnmpEngine(),\n CommunityData(self.snmpPrivateCommunity, mpModel = 0),\n UdpTransportTarget((self.snmpAddress, self.snmpPort)),\n ContextData(),\n ObjectType(ObjectIdentity(Oid), Integer(int(SnmpCommand)))\n )\n )\n\n if errorIndication:\n Domoticz.Error(str(errorIndication))\n return -1\n elif errorStatus:\n Domoticz.Error('%s at %s' % (errorStatus.prettyPrint(), errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))\n return -1\n\n return 0\n\nglobal _plugin\n_plugin = SNMPPlugin()\n\ndef onStart():\n global _plugin\n _plugin.onStart()\n\ndef onCommand(Unit, Command, Level, Hue):\n global _plugin\n _plugin.onCommand(Unit, Command, Level, Hue)\n\ndef onHeartbeat():\n global _plugin\n _plugin.onHeartbeat()", "sub_path": "TCW181B-SNMP/plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 4734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "enum.IntEnum", "line_number": 22, "usage_type": "name"}, {"api_name": "Domoticz.Log", "line_number": 62, "usage_type": "call"}, {"api_name": "Domoticz.Device", "line_number": 66, "usage_type": "call"}, {"api_name": "Domoticz.Error", "line_number": 100, "usage_type": "call"}, {"api_name": "Domoticz.Error", "line_number": 103, "usage_type": "call"}, {"api_name": "Domoticz.Error", "line_number": 121, "usage_type": "call"}, {"api_name": "Domoticz.Error", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "161969752", "text": "import cv2, argparse\nimport numpy as np\n\ndef oldify(original):\n b, g, r = cv2.split(original)\n output = np.copy(original)\n\n output[:,:,0] = np.clip(0.272 * r + 0.534 * g + 0.131 * b, 0, 255)\n output[:,:,1] = np.clip(0.349 * r + 0.686 * g + 0.168 * b, 0, 255)\n output[:,:,2] = np.clip(0.393 * r + 0.769 * g + 0.189 * b, 0, 255)\n return output\n\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--filename\")\nargs = vars(ap.parse_args())\n\n# Read the image\nfilename = \"images/lupita.jpg\"\nif args['filename']:\n filename = args['filename']\n\nimg = cv2.imread(filename)\n\noutput = oldify(img)\n\ncombined = np.hstack([img,output])\ncv2.namedWindow(\"Original Image -- Clarendon Filter output\", cv2.WINDOW_AUTOSIZE)\ncv2.imshow(\"Original Image -- Clarendon Filter output\", combined)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\ncv2.imwrite(\"results/oldify.jpg\",output)\n", "sub_path": "week2/myOldify.py", "file_name": "myOldify.py", "file_ext": "py", "file_size_in_byte": 882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "cv2.split", "line_number": 5, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 9, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 10, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 27, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.WINDOW_AUTOSIZE", "line_number": 28, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 29, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "29181066", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 16:45:36 2020\n\n@author: Illusion\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport numpy.fft as fft\n\nx = np.linspace(-2 * np.pi, 2 * np.pi, 1000)\n\nn = 1000\ny = np.zeros(x.size)\nfor i in range(1, n + 1):\n y += 4 * np.pi / (2 * i - 1) * np.sin((2 * i - 1) * x)\n\ncomplex_array = fft.fft(y)\nprint(complex_array.shape) # (1000,) \nprint(complex_array.dtype) # complex128 \nprint(complex_array[0]) # (-2.1458390619955026e-12+0j)\ny_new = fft.ifft(complex_array)\n\nplt.subplot(311)\nplt.grid(linestyle=':')\nplt.plot(x, y, label='y') # y是1000个相加后的正弦序列\nplt.subplot(312)\nplt.plot(x, y_new, label='y_new', color='orangered') # y是ifft变换后的序列\n\n# 得到分解波的频率序列\nfreqs = fft.fftfreq(x.size, x[1] - x[0])\n# 复数的模为信号的振幅(能量大小)\ncomplex_array = fft.fft(y)\npows = np.abs(complex_array)\n\nplt.subplot(313)\nplt.title('Frequency Domain', fontsize=16)\nplt.xlabel('Frequency', fontsize=12)\nplt.ylabel('Power', fontsize=12)\nplt.tick_params(labelsize=10)\nplt.grid(linestyle=':')\nplt.plot(freqs[freqs > 0], pows[freqs > 0], c='orangered', label='Frequency')\nplt.legend()\nplt.tight_layout()\nplt.show()", "sub_path": "Script/fourier_transform_demo.py", "file_name": "fourier_transform_demo.py", "file_ext": "py", "file_size_in_byte": 1216, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.linspace", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 12, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.fft.fft", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 19, "usage_type": "name"}, {"api_name": "numpy.fft.ifft", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "numpy.fft.fftfreq", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 32, "usage_type": "name"}, {"api_name": "numpy.fft.fft", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.fft", "line_number": 34, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tick_params", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "75972519", "text": "from flask_script import Manager\nfrom flask_graphql import GraphQLView\nfrom flask_migrate import MigrateCommand\nfrom flask import Flask\nfrom flask_uwsgi_websocket import WebSocket\nfrom flask import send_from_directory\nfrom api.models import database_session, schema, AccountSchema\nfrom api.core import create_response\n\napp = Flask(__name__)\n\nws = WebSocket(app)\n\nmanager = Manager(app)\n\nmanager.add_command(\"db\", MigrateCommand)\n\napp.config['RESULT_STATIC_PATH'] = \"./static\"\n\n\n@app.route(\"/accounts\", methods=[\"GET\"])\ndef get_accounts():\n query = '''\n query {\n accounts {\n account_id,\n zone_id,\n character_name,\n email,\n password_hash,\n level\n }\n }\n '''\n\n result = schema.execute(query, context_value={'session': database_session})\n return create_response(dict(result.data))\n\n\n@app.route('/')\ndef index():\n return send_from_directory(app.config['RESULT_STATIC_PATH'], 'index.html')\n\n\n@app.route('/static/basketri.js')\ndef serve_file():\n return send_from_directory(app.config['RESULT_STATIC_PATH'], 'basketri.js')\n\n\napp.add_url_rule('/acczone', view_func=GraphQLView.as_view('acczone', schema=AccountSchema, graphiql=True))\n\n\n@ws.route('/ws')\ndef echo(ws):\n while True:\n msg = ws.receive()\n if msg is not None:\n ws.send(msg)\n else: return\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1443, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_uwsgi_websocket.WebSocket", "line_number": 12, "usage_type": "call"}, {"api_name": "flask_script.Manager", "line_number": 14, "usage_type": "call"}, {"api_name": "flask_migrate.MigrateCommand", "line_number": 16, "usage_type": "argument"}, {"api_name": "api.models.schema.execute", "line_number": 36, "usage_type": "call"}, {"api_name": "api.models.schema", "line_number": 36, "usage_type": "name"}, {"api_name": "api.models.database_session", "line_number": 36, "usage_type": "name"}, {"api_name": "api.core.create_response", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 47, "usage_type": "call"}, {"api_name": "flask_graphql.GraphQLView.as_view", "line_number": 50, "usage_type": "call"}, {"api_name": "flask_graphql.GraphQLView", "line_number": 50, "usage_type": "name"}, {"api_name": "api.models.AccountSchema", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "553748084", "text": "import time\n\nimport psycopg2\nimport requests\nimport schedule\n\nfrom scheduler.db import AbstractStorage, PostgresDB\nfrom scheduler.settings import Settings, logger\n\nsettings = Settings()\nSERVICE_URL = settings.SERVICE_URL\n\n\nclass Scheduler:\n def __init__(self, db: AbstractStorage):\n self.db = db\n\n def check_processing_orders(self):\n \"\"\" Get orders in state Draft, In progress and send them for update to Billing API \"\"\"\n processing_orders = self.db.get_processing_orders()\n for order in processing_orders:\n self.send_order_for_update(order.id)\n time.sleep(settings.REQUEST_DELAY)\n\n def check_overdue_orders(self):\n \"\"\"\n Get orders in state Draft, In progress that are not processed for more than 10 days and send them\n for cancelling to Billing API\n \"\"\"\n overdue_orders = self.db.get_overdue_orders()\n for overdue_order in overdue_orders:\n self.send_order_for_cancel(overdue_order.id)\n time.sleep(settings.REQUEST_DELAY)\n\n def check_subscriptions(self):\n \"\"\"\n Runner for gathering subscriptions once a day and\n sending requests to Billing API to update or cancel a subscription.\n \"\"\"\n self.check_active_subscriptions()\n self.check_overdue_subscriptions()\n\n def check_active_subscriptions(self):\n \"\"\"\n Get Active subscriptions which need to be repaid.\n Send them to Billing API for update.\n \"\"\"\n subscriptions = self.db.get_active_subscriptions()\n for subscription in subscriptions:\n self.send_subscription_for_update(subscription.id)\n time.sleep(settings.REQUEST_DELAY)\n\n def check_overdue_subscriptions(self):\n \"\"\"\n Get Active subscriptions which need to be cancelled.\n Send them to Billing API for deactivation.\n \"\"\"\n overdue_subscriptions = self.db.get_overdue_subscriptions()\n for overdue_subscription in overdue_subscriptions:\n self.send_subscription_for_cancel(overdue_subscription.id)\n time.sleep(settings.REQUEST_DELAY)\n\n def check_pre_active_subscriptions(self):\n \"\"\"\n Get Pre Active subscriptions which need to be Activated.\n Send them to Billing API for activation.\n \"\"\"\n pre_active_subscriptions = self.db.get_pre_active_subscriptions()\n for pre_active_subscription in pre_active_subscriptions:\n self.send_order_for_activate(pre_active_subscription.id)\n time.sleep(settings.REQUEST_DELAY)\n\n def check_pre_deactivate_subscriptions(self):\n \"\"\"\n Get Pre Deactivated subscriptions which need to be Deactivated.\n Send them to Billing API for deactivation.\n \"\"\"\n pre_deactivate_subscriptions = self.db.get_pre_deactivate_subscriptions()\n for subscription in pre_deactivate_subscriptions:\n self.send_subscription_for_cancel(subscription.id)\n time.sleep(settings.REQUEST_DELAY)\n\n @staticmethod\n def send_subscription_for_update(subscription_id: str) -> None:\n \"\"\"\n Send request for payment to Blling API\n :param subscription_id: UUID of subscription\n :return: None\n \"\"\"\n try:\n logger.info(\n f\"Sending request to Billing API to update a subscription with id {subscription_id}\"\n )\n requests.post(\n f\"{SERVICE_URL}/subscription/{subscription_id}/recurring_payment\"\n )\n except Exception as e:\n logger.error(\n \"Error while sending a request to update a subscription to Billing API: %s\"\n % e\n )\n\n @staticmethod\n def send_order_for_activate(subscription_id: str) -> None:\n \"\"\"\n Send request for activation of the subscription to the Blling API\n :param subscription_id: UUID of subscription\n :return: None\n \"\"\"\n try:\n logger.info(\n f\"Sending request to Billing API to activate a subscription with id {subscription_id}\"\n )\n requests.post(f\"{SERVICE_URL}/subscription/{subscription_id}/activate\")\n\n except Exception as e:\n logger.error(\n \"Error while sending a request to activate a subscription to Billing API: %s\"\n % e\n )\n\n @staticmethod\n def send_subscription_for_cancel(subscription_id: str) -> None:\n \"\"\"\n Send request for cancelling a user subscription to Blling API\n :param subscription_id: UUID of subscription\n :return: None\n \"\"\"\n try:\n logger.info(\n f\"Sending request to Billing API to cancel a subscription with id {subscription_id}\"\n )\n requests.post(f\"{SERVICE_URL}/subscription/{subscription_id}/deactivate\")\n except Exception as e:\n logger.error(\n \"Error while sending a request to cancel a subscription to Billing API: %s\"\n % e\n )\n\n @staticmethod\n def send_order_for_update(order_id: str) -> None:\n \"\"\"\n Send request for Blling API to update an order\n :param order_id: UUID of order\n :return: None\n \"\"\"\n try:\n logger.info(\n f\"Sending request to Billing API to update an order with id {order_id}\"\n )\n requests.post(f\"{SERVICE_URL}/order/{order_id}/update_info\")\n except Exception as e:\n logger.error(\n f\"Error while sending a request to update an order to Billing API: {e}\"\n )\n\n @staticmethod\n def send_order_for_cancel(order_id: str) -> None:\n \"\"\"\n Send request for Blling API to set an Error state on order\n :param order_id: UUID of order\n :return: None\n \"\"\"\n try:\n logger.info(\n f\"Sending request to Billing API to cancel an order with id {order_id}\"\n )\n requests.post(f\"{SERVICE_URL}/order/{order_id}/cancel\")\n except Exception as e:\n logger.error(\n f\"Error while sending a request to cancel an order to Billing API: {e}\"\n )\n\n\nif __name__ == \"__main__\":\n logger.info(\"Billing scheduler is starting\")\n pg_connection = PostgresDB(\n psycopg2.connect(\n database=settings.DB_NAME,\n user=settings.DB_USER,\n password=settings.DB_PASSWORD,\n host=settings.DB_HOST,\n port=settings.DB_PORT,\n options=f\"-c search_path={settings.DB_SCHEMA}\",\n )\n )\n scheduler = Scheduler(pg_connection)\n\n schedule.every().day.at(\"10:30\").do(scheduler.check_subscriptions)\n schedule.every().day.at(\"10:30\").do(scheduler.check_overdue_orders)\n schedule.every(5).seconds.do(scheduler.check_processing_orders)\n schedule.every(6).seconds.do(scheduler.check_pre_active_subscriptions)\n schedule.every(7).seconds.do(scheduler.check_pre_deactivate_subscriptions)\n\n logger.info(\"Billing scheduler is running\")\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n", "sub_path": "scheduler/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 7171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scheduler.settings.Settings", "line_number": 10, "usage_type": "call"}, {"api_name": "scheduler.db.AbstractStorage", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 61, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 81, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.info", "line_number": 91, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 91, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 94, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.error", "line_number": 98, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 98, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 111, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 111, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 114, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.error", "line_number": 117, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 117, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 130, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 130, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 133, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.error", "line_number": 135, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 135, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 148, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 148, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 151, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.error", "line_number": 153, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 153, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 165, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 165, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 168, "usage_type": "call"}, {"api_name": "scheduler.settings.logger.error", "line_number": 170, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 170, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 176, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 176, "usage_type": "name"}, {"api_name": "scheduler.db.PostgresDB", "line_number": 177, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 178, "usage_type": "call"}, {"api_name": "scheduler.db", "line_number": 187, "usage_type": "name"}, {"api_name": "schedule.every", "line_number": 189, "usage_type": "call"}, {"api_name": "scheduler.db.check_subscriptions", "line_number": 189, "usage_type": "attribute"}, {"api_name": "scheduler.db", "line_number": 189, "usage_type": "name"}, {"api_name": "schedule.every", "line_number": 190, "usage_type": "call"}, {"api_name": "scheduler.db.check_overdue_orders", "line_number": 190, "usage_type": "attribute"}, {"api_name": "scheduler.db", "line_number": 190, "usage_type": "name"}, {"api_name": "schedule.every", "line_number": 191, "usage_type": "call"}, {"api_name": "scheduler.db.check_processing_orders", "line_number": 191, "usage_type": "attribute"}, {"api_name": "scheduler.db", "line_number": 191, "usage_type": "name"}, {"api_name": "schedule.every", "line_number": 192, "usage_type": "call"}, {"api_name": "scheduler.db.check_pre_active_subscriptions", "line_number": 192, "usage_type": "attribute"}, {"api_name": "scheduler.db", "line_number": 192, "usage_type": "name"}, {"api_name": "schedule.every", "line_number": 193, "usage_type": "call"}, {"api_name": "scheduler.db.check_pre_deactivate_subscriptions", "line_number": 193, "usage_type": "attribute"}, {"api_name": "scheduler.db", "line_number": 193, "usage_type": "name"}, {"api_name": "scheduler.settings.logger.info", "line_number": 195, "usage_type": "call"}, {"api_name": "scheduler.settings.logger", "line_number": 195, "usage_type": "name"}, {"api_name": "schedule.run_pending", "line_number": 198, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 199, "usage_type": "call"}]} +{"seq_id": "194223742", "text": "from telethon.sync import TelegramClient,events\r\nimport configparser\r\nfrom telethon import utils\r\n\r\nfrom pydrive.auth import GoogleAuth\r\nfrom pydrive.drive import GoogleDrive\r\n\r\nconfig = configparser.ConfigParser()\r\nconfig.read(\"config-sample.ini\")\r\n\r\n# Setting configuration values\r\napi_id = config['Telegram']['api_id']\r\napi_hash = config['Telegram']['api_hash']\r\n\r\napi_hash = str(api_hash)\r\n\r\nphone = config['Telegram']['phone']\r\nusername = config['Telegram']['username']\r\n\r\nwith TelegramClient(username,api_id,api_hash) as client:\r\n messages=client.get_messages('https://t.me/UPSC_Prelims_Mains_PDF_Materials',limit=2)\r\n for msg in messages:\r\n try:\r\n pdf_name=msg.media.document.attributes[0].file_name\r\n except:\r\n continue\r\n ans=input(\"Do you want to download pdf: \"+msg.media.document.attributes[0].file_name +\" ? [Y/N]\"+\"\\n\"+\"Answer: \")\r\n print()\r\n if ans=='Y' or ans=='y':\r\n print(\"download--> : \",msg.media.document.attributes[0].file_name)\r\n client.download_media(msg,\"files/\")\r\n print()\r\n print()\r\n else:\r\n continue\r\n\r\nimport os\r\n\r\ndirectory=r'C:\\Users\\User\\Desktop\\telegram-data-retrieve/files/'\r\n\r\ngauth=GoogleAuth()\r\ngauth.LocalWebserverAuth()\r\ndrive=GoogleDrive(gauth)\r\n\r\nfor x in os.listdir(directory): \r\n f = drive.CreateFile({'title': x}) \r\n f.SetContentFile(os.path.join(directory, x)) \r\n f.Upload() \r\n print(\"Uploading to google drive....\")\r\n\r\n \r\n\r\n", "sub_path": "data_retrieve.py", "file_name": "data_retrieve.py", "file_ext": "py", "file_size_in_byte": 1509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "configparser.ConfigParser", "line_number": 8, "usage_type": "call"}, {"api_name": "telethon.sync.TelegramClient", "line_number": 20, "usage_type": "call"}, {"api_name": "pydrive.auth.GoogleAuth", "line_number": 41, "usage_type": "call"}, {"api_name": "pydrive.drive.GoogleDrive", "line_number": 43, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "529602971", "text": "import attr\nfrom api.jira_client.constants.IssueTypes import IssueTypes\nfrom api.jira_client.services.IssueService import IssueService\n\n\n@attr.s(auto_attribs=True)\nclass IssueClient:\n issue_service: IssueService\n\n def create_bug(self, project, summary, **kwargs):\n bug = {\n \"project\": {\n \"key\": project\n },\n \"summary\": summary,\n \"issuetype\": {\n \"id\": IssueTypes.BUG\n }\n }\n bug.update(kwargs)\n response = self.issue_service.create_issue(bug)\n json = response.json()\n return json\n\n def delete_issue(self, issue_id):\n return self.issue_service.delete_issue(issue_id)\n\n def get_issue(self, issue_id):\n return self.issue_service.get_issue(issue_id).json()\n", "sub_path": "api/jira_client/clients/IssueClient.py", "file_name": "IssueClient.py", "file_ext": "py", "file_size_in_byte": 804, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "api.jira_client.services.IssueService.IssueService", "line_number": 8, "usage_type": "name"}, {"api_name": "api.jira_client.constants.IssueTypes.IssueTypes.BUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "api.jira_client.constants.IssueTypes.IssueTypes", "line_number": 17, "usage_type": "name"}, {"api_name": "attr.s", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "398427189", "text": "import pygame\r\nfrom random import random\r\npygame.init()\r\n\r\nwin_width = 1000\r\nwin_height = 600\r\n\r\nwin = pygame.display.set_mode((win_width, win_height))\r\n\r\nwidth = 64\r\nheight = 64\r\nx = 60\r\ny = win_height - height - 100\r\nspeed = 5\r\n\r\ny_constant = y\r\n\r\njumpSpeed = 100\r\njump = False\r\n\r\nred = 255\r\ngreen = 255\r\nblue = 100\r\n\r\nleft = False\r\nright = False\r\nup = False\r\n\r\n#Keep delayLimit even number\r\ndelayLimit = 28\r\ndelay = -delayLimit\r\n\r\n\r\npygame.display.set_caption(\"Fast Legs\")\r\nradius= 128\r\ncx=win_width\r\ncy= y_constant - (128/3)\r\n\r\nscore = 0\r\nhigh_score = 0\r\nspeed_base = 3\r\nspeed_multiplier = 2\r\ns = int(random()*speed_multiplier+speed_base)\r\n\r\nwalkRight = [pygame.image.load('R1.png'), pygame.image.load('R2.png'), pygame.image.load('R3.png'), pygame.image.load('R4.png'), pygame.image.load('R5.png'), pygame.image.load('R6.png'), pygame.image.load('R7.png'), pygame.image.load('R8.png'), pygame.image.load('R9.png')]\r\nwalkLeft = [pygame.image.load('L1.png'), pygame.image.load('L2.png'), pygame.image.load('L3.png'), pygame.image.load('L4.png'), pygame.image.load('L5.png'), pygame.image.load('L6.png'), pygame.image.load('L7.png'), pygame.image.load('L8.png'), pygame.image.load('L9.png')]\r\nstand = pygame.image.load('standing.png')\r\nleftCount = 0\r\nrightCount = 0\r\n\r\nrun_t = True\r\nCircles = []\r\nClouds = []\r\nclass Circle:\r\n def __init__(self,radius, dx, x, y):\r\n self.radius = radius\r\n self.dx = dx\r\n self.x = x\r\n self.y = y\r\n def draw_circle(self):\r\n win.blit(pygame.image.load('b.png'), (self.x, self.y))\r\n self.reset_circle()\r\n self.x -= self.dx\r\n def add_circle(self):\r\n Circles.append(self)\r\n def reset_circle(self):\r\n if self.x + 128 < 0:\r\n global score\r\n global speed_base\r\n global s\r\n score += 1\r\n self.x = cx\r\n self.dx = int(random()*speed_multiplier+speed_base)\r\n s = self.dx\r\n if score% 3==0 and score<=55:\r\n speed_base +=1\r\n\r\n\r\ncloud_x = win_width\r\ncloud_y = 0\r\ncloud_length = 0\r\ncloud_width = 0\r\n\r\n\r\n\r\nclass Cloud:\r\n def __init__(self, x, y, length, width, cloud_speed):\r\n\r\n self.x=x\r\n self.y=y\r\n self.length = length\r\n self.width = width\r\n self.cloud_speed = cloud_speed\r\n self.generate_values()\r\n def generate_values(self):\r\n while (self.length <= 50):\r\n self.length = int(random() * 350)\r\n while (self.width <= 10):\r\n self.width = int(random() * 55)\r\n self.y = int((random()*300)+1)\r\n def draw_cloud(self):\r\n pygame.draw.ellipse(win,(255,255,255),[self.x,self.y,self.length,self.width])\r\n self.x -= s\r\n def add_cloud(self):\r\n Clouds.append(self)\r\n\r\n\r\ntemp = Circle(radius, s, cx, cy)\r\ntemp.add_circle()\r\n\r\ncloud = Cloud(cloud_x,cloud_y,cloud_length,cloud_width,s)\r\ncloud.add_cloud()\r\n\r\nfont = pygame.font.SysFont(None, 200)\r\nfont2 = pygame.font.SysFont(None, 50)\r\ndef text_score():\r\n screen_text = font.render(str(score),True,(0,0,0))\r\n win.blit(screen_text,[win_width/2,win_height/4])\r\n\r\n screen_text2 = font2.render(str(high_score), True, (0, 255, 0))\r\n win.blit(screen_text2, [win_width -50, 50])\r\n\r\nrun = True\r\n\r\nwhile run:\r\n pygame.time.delay(7)\r\n\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n key = pygame.key.get_pressed()\r\n\r\n if (key[pygame.K_LEFT] or key[pygame.K_a]):\r\n x -= speed\r\n left = True\r\n if (key[pygame.K_RIGHT] or key[pygame.K_d]):\r\n x += speed\r\n right = True\r\n\r\n\r\n #For Jumping mechanics\r\n if key[pygame.K_UP] or key[pygame.K_w]:\r\n jump = True\r\n\r\n if jump:\r\n if delay < 0:\r\n y -= (delay*delay)/50\r\n delay += 1\r\n\r\n elif delay == 0:\r\n delay += 1\r\n\r\n elif delay <= delayLimit:\r\n y += (delay*delay)/50\r\n delay += 1\r\n elif delay == delayLimit+1:\r\n jump = False\r\n delay = -delayLimit\r\n\r\n\r\n win.fill((0, 0, 0))\r\n\r\n #sky\r\n pygame.draw.rect(win, (46, 132, 244), (0, 0, win_width, y_constant + height))\r\n # Background ground\r\n pygame.draw.rect(win, (2, 204, 18), (0, y_constant + height - 5, win_width, 10))\r\n pygame.draw.rect(win, (97, 26, 9), (0, y_constant + height - 5 +10, win_width, win_height))\r\n # obstacle\r\n\r\n for i in range(len(Circles)):\r\n Circles[i].draw_circle()\r\n\r\n for i in range(len(Clouds)):\r\n Clouds[i].draw_cloud()\r\n\r\n if(random()>0.99):\r\n cloud = Cloud(cloud_x, cloud_y, cloud_length, cloud_width, s)\r\n cloud.add_cloud()\r\n\r\n #Rectangle character\r\n if(left):\r\n win.blit(walkLeft[leftCount], (x, y))\r\n leftCount +=1\r\n if x<=0:\r\n x = 0\r\n elif(right):\r\n win.blit(walkRight[rightCount], (x, y))\r\n rightCount+=1\r\n if x >= win_width -45:\r\n x = win_width -45\r\n else:\r\n win.blit(stand, (x, y))\r\n\r\n left = False\r\n right = False\r\n\r\n text_score()\r\n\r\n if(leftCount>8):\r\n leftCount=0\r\n if (rightCount > 8):\r\n rightCount = 0\r\n if x>=Circles[0].x-35 and x<=(Circles[0].x+Circles[0].radius-35) and y>=Circles[0].y:\r\n explosion = [pygame.image.load('regularExplosion00.png'),pygame.image.load('regularExplosion01.png'),pygame.image.load('regularExplosion02.png'),pygame.image.load('regularExplosion03.png'),pygame.image.load('regularExplosion04.png'),pygame.image.load('regularExplosion05.png'),pygame.image.load('regularExplosion06.png'),pygame.image.load('regularExplosion07.png'),pygame.image.load('regularExplosion08.png')]\r\n\r\n for i in range(9):\r\n win.blit(explosion[i], (x, y))\r\n pygame.time.delay(100)\r\n if i == 8:\r\n run_t = False\r\n leave = font.render(\"D:\", True, (int(random()*255), int(random()*255), int(random()*255)))\r\n win.blit(leave, (win_width/2, win_height / 2))\r\n speed_base = 3\r\n s=3\r\n Circles[0].dx = s\r\n score = 0\r\n run_t = True\r\n Circles[0].x = win_width\r\n x = 60\r\n\r\n if run_t:\r\n pygame.display.update()\r\n\r\n\r\n\r\n\r\n\r\n if score>= high_score:\r\n high_score = score\r\n pygame.display.update()\r\npygame.quit()\r\n\r\n\r\n", "sub_path": "fastLegs.py", "file_name": "fastLegs.py", "file_ext": "py", "file_size_in_byte": 6421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 8, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 61, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 73, "usage_type": "call"}, {"api_name": "random.random", "line_number": 97, "usage_type": "call"}, {"api_name": "random.random", "line_number": 99, "usage_type": "call"}, {"api_name": "random.random", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.draw.ellipse", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 114, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 114, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 115, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 133, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 166, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 166, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 168, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 169, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 169, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 206, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 206, "usage_type": "attribute"}, {"api_name": "pygame.time.delay", "line_number": 210, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 210, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 213, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 224, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 224, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 232, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 232, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "472657885", "text": "from matplotlib import pyplot as plt\nimport time\n\n\ndef fib_iter(n):\n assert n >= 0\n f0, f1 = 0, 1\n for i in range(n - 1):\n f0, f1 = f1, f0 + f1\n return f1\n\n\ndef fib_rec(n):\n assert n >= 0\n return n if n <= 1 else fib_rec(n - 1) + fib_rec(n - 2)\n\n\ndef timed(f, *args, n_iter=100):\n acc = float(\"inf\")\n for i in range(n_iter):\n t0 = time.perf_counter()\n f(*args)\n t1 = time.perf_counter()\n acc = min(acc, t1 - t0)\n return acc\n\n\ndef plotting(fs, args):\n for f in fs:\n plt.plot(args, [timed(f, arg) for arg in args], label=f.__name__)\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\ndef main():\n n = 20\n plotting([fib_rec, fib_iter], list(range(n)))\n plotting([fib_rec], list(range(n)))\n plotting([fib_iter], list(range(n)))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "methods-comparison.py", "file_name": "methods-comparison.py", "file_ext": "py", "file_size_in_byte": 858, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.perf_counter", "line_number": 21, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "181836668", "text": "from PIL import Image, ImageFilter, ImageEnhance, ImageOps\nimport sys\nimport click\nimport datetime\nimport os.path\nfrom progress.bar import ChargingBar\nimport time\nfrom art import *\nimport emoji\n\ndefaultThreshold = 100\n@click.command()\n@click.argument(\"filename\")\n@click.option('--threshold', default=defaultThreshold, help='Threshold value between 0 and 255. Default=100')\n@click.option('--out', '-o', default='', help='Output path. Default afterscan-[filename] in pwd')\n@click.option('--invert/--no-invert', '-i', default=False, help='Invert the image')\n@click.option('--force/--no-force', '-f', default=False, help='Overwrite existing file without asking')\ndef main(filename, threshold, out, invert, force):\n Art = text2art(\"Afterscan\")\n print(Art)\n print(emoji.emojize(\n ':thumbs_up: Turn sloppy photoscans into crisp black/white masterpieces :thumbs_up:'))\n print(' ')\n print('------------------')\n print(' ')\n if len(filename) is 0:\n sys.exit()\n # Read image\n im = Image.open(filename)\n\n # bar = ChargingBar('goiiinnng', fill='@',\n # suffix='%(percent)d%%')\n # time.sleep(3)\n text = 'Threshold (--threshold): '\n bar = ChargingBar(text, max=255, suffix=str(threshold) + '/255')\n for i in range(threshold):\n bar.next()\n bar.finish()\n\n\n\n transition = threshold + 20\n\n def ttt(value):\n if value < threshold:\n return 0\n elif value < transition:\n return (value - threshold) / (transition - threshold) * 255\n else:\n return 255\n\n im2 = im.convert('L').point(ttt)\n\n if invert:\n im2 = ImageOps.invert(im2)\n\n if len(out) == 0:\n out = os.getcwd() + '/afterscan-' + os.path.basename(filename)\n\n if os.path.exists(out) and not force:\n print(' ')\n print('ERROR!!!')\n print('File ' + out + ' already exists! Use --force to overwrite')\n print(' ')\n sys.exit()\n\n im2.save(out)\n print(' ')\n print(emoji.emojize(':heavy_check_mark: Optimized image saved as ' + out))\n print(' ')\n\n# if __name__ == '__main__':\n# main()\n", "sub_path": "build/lib/afterscan/afterscan.py", "file_name": "afterscan.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "emoji.emojize", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 29, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 29, "usage_type": "name"}, {"api_name": "progress.bar.ChargingBar", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.ImageOps.invert", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.ImageOps", "line_number": 55, "usage_type": "name"}, {"api_name": "os.path.getcwd", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.path.basename", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.path.exists", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 65, "usage_type": "call"}, {"api_name": "emoji.emojize", "line_number": 69, "usage_type": "call"}, {"api_name": "click.command", "line_number": 12, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 13, "usage_type": "call"}, {"api_name": "click.option", "line_number": 14, "usage_type": "call"}, {"api_name": "click.option", "line_number": 15, "usage_type": "call"}, {"api_name": "click.option", "line_number": 16, "usage_type": "call"}, {"api_name": "click.option", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "97668190", "text": "__author__ = \"thilo_ilg\"\n\n## source - http://www.nltk.org/howto/parse.html\n## cool pages - http://sentdex.com/\n\nfrom nltk import Nonterminal, nonterminals, Production, CFG ## nltk => natural language toolkit\n\nnt1 = Nonterminal('NP')\nnt2 = Nonterminal('VP')\n\nprint(nt1.symbol())\n\nprint(nt1 == Nonterminal('NP'))\nprint(nt1 == nt2)\n\nS, NP, VP, PP = nonterminals('S, NP, VP, PP')\nN, V, P, DT = nonterminals('N, V, P, DT')\nprod1 = Production(S, [NP, VP])\nprod2 = Production(NP, [DT, NP])\n\nprint(prod1.lhs())\nprint(prod1.rhs())\n\nprint(prod1 == Production(S, [NP, VP]))\nprint(prod1 == prod2)\n\ngrammar = CFG.fromstring(\"\"\"\nS -> NP VP\nPP -> P NP\nNP -> 'the' N | N PP | 'the' N PP\nVP -> V NP | V PP | V NP PP\nN -> 'cat'\nN -> 'dog'\nN -> 'rug'\nV -> 'chased'\nV -> 'sat'\nP -> 'in'\nP -> 'on'\n\"\"\")", "sub_path": "parse.py", "file_name": "parse.py", "file_ext": "py", "file_size_in_byte": 782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "nltk.Nonterminal", "line_number": 8, "usage_type": "call"}, {"api_name": "nltk.Nonterminal", "line_number": 9, "usage_type": "call"}, {"api_name": "nltk.Nonterminal", "line_number": 13, "usage_type": "call"}, {"api_name": "nltk.nonterminals", "line_number": 16, "usage_type": "call"}, {"api_name": "nltk.nonterminals", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.Production", "line_number": 18, "usage_type": "call"}, {"api_name": "nltk.Production", "line_number": 19, "usage_type": "call"}, {"api_name": "nltk.Production", "line_number": 24, "usage_type": "call"}, {"api_name": "nltk.CFG.fromstring", "line_number": 27, "usage_type": "call"}, {"api_name": "nltk.CFG", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "468487855", "text": "# coding= utf-8\n\n#>>> instance = someclass() # __call__ メソッドを持ったsomeclass \n#>>> instance(argument) # instance.__call__(argument) と等価な処理\n\nimport datetime, io #, cgi\nimport urllib.parse\nfrom xml.sax import saxutils\nfrom wsgiref import util\n\nclass Server(object):\n\tdef __init__(self):\n\t\tself.messages = []\n\t\t\n\t#self: Pythonでは, C++やRubyなどと違い, 呼び出し元インスタンスを明示的に引数として取る\n\tdef __call__(self, environ, start_response):\n\t\tmethod = environ['REQUEST_METHOD']\n\t\tif method == 'GET':\n\t\t\treturn self.listMessages(environ, start_response)\n\t\telif method == 'POST':\n\t\t\treturn self.addMessage(environ, start_response)\n\t\telse:\n\t\t\tstart_response('501 NotImplemented', [('Content-type', 'text/plain')])\n\t\t\treturn '501 NotImplemented'\n\t\t\t\n\tdef addMessage(self, environ, start_response):\n\t\tinpt = environ['wsgi.input']\n\t\tlength = int(environ.get('CONTENT_LENGTH', 0))\n\t\t\n\t\t#取得したデータをパースして辞書オブジェクトに変換\n\t\t#wsgi.inputの読み込みの長さを指定しないと、read関数の呼び出しが終わらず、実行がブロックされてしまうので注意.\n\t\t\n\t\t#postされたデータのパースには、cgiモジュールのparse_qsl, parse_qsを使う. -> cgi module is deprecated from python3.2 or older\n\t\t#query = dict(cgi.parse_qs(inpt.read(length))) #dict([(key, value), (key, value)])\n\t\tquery = dict(urllib.parse.parse_qs(inpt.read(length))) #dict([(key, value), (key, value)])\n\t\ttmp = {}\n\t\tfor k, v in query.items():\n\t\t\t#print(k, v)\n\t\t\ttmp[k.decode('utf-8')] = v[0].decode('utf-8')\n\t\tquery = tmp\n\t\tmsg = {\n\t\t\t'name': query['name'],\n\t\t\t'title': query['title'],\n\t\t\t'body': query['body'],\n\t\t\t'date': datetime.datetime.now()}\n\t\tself.messages.append(msg)\n\t\t\n\t\t# Redirect\n\t\t# 直接listMessagesを呼び出してもいいが、その場合、書き込んだ後にリロードすると、二重で書き込みをしてしまうため、redirectの方がいい.\n\t\tstart_response('303 See Other', [\n\t\t\t('Content-type', 'text/plain'), \n\t\t\t('Location', util.request_uri(environ))])\n\t\t\n\t\treturn ''\n\n\tdef\tlistMessages(self, environ, start_response):\n\t\t#レスポンス本文の生成には、StringIOクラスを利用.\n\t\t#StringIO: メモリ上のバッファに対してファイルオブジェクトのような入出力操作を提供するオブエクト.このStringIOに, レスポンスとしてHTMLをwriteメソッドを使用して書き出していく.\n\t\tfp = io.BytesIO()\n\t\thead = r'''\nMessage Board\n\n\n\n'''\n\t\tfp.write(head.encode('utf-8'))\n\t\tfor msg in reversed(self.messages):\n\t\t\n\t\t\tesc = saxutils.escape #投稿内容にあるHTMLタグを無効化. XSS対策.\n\t\t\ttmp={}\n\t\t\tfor key, value in msg.items():\n\t\t\t\tvalue = str(value)\n\t\t\t\ttmp[key] = str(esc(value))\n\t\t\t\tprint(key, str(esc(value)))\n\t\t\tdata = '''
\n
title
\n
{title}
\n
name
\n
{name}
\n
date
\n
{date}
\n
message
\n
{body}
\n

'''.format(**tmp)\n\t\t\tfp.write(data.encode('utf-8'))\n\n\t\tdata = '''
\n
\n
name
\n
\n
title
\n
\n
body
\n
\n
\n\n
\n'''.format(util.request_uri(environ))\n\t\tfp.write(data.encode('utf-8'))\n\t\t\n\t\t#シークの位置を先頭にしておく\n\t\tfp.seek(0)\n\n\t\tstart_response('200 OK', [('Content-type', 'text/html; charset=utf-8')])\n\t\treturn fp \n\t\t# ファイルオブジェクト同様、StringIOもiterable なので, WSGIappの返り値をして利用可能. ただし, write()で文字列を出力した後は、バッファの読み出し位置が変わってしまうため、seekメソッドで読み出し位置を先頭に設定した後で返り値として使用する.", "sub_path": "appserver/wsgi/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 4009, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "urllib.parse.parse.parse_qs", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 35, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 35, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 45, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 45, "usage_type": "attribute"}, {"api_name": "wsgiref.util.request_uri", "line_number": 52, "usage_type": "call"}, {"api_name": "wsgiref.util", "line_number": 52, "usage_type": "name"}, {"api_name": "io.BytesIO", "line_number": 59, "usage_type": "call"}, {"api_name": "xml.sax.saxutils.escape", "line_number": 69, "usage_type": "attribute"}, {"api_name": "xml.sax.saxutils", "line_number": 69, "usage_type": "name"}, {"api_name": "wsgiref.util.request_uri", "line_number": 98, "usage_type": "call"}, {"api_name": "wsgiref.util", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "549599072", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom django.contrib.auth import authenticate, login, logout\n\nfrom django.contrib import messages\n\nfrom django.contrib.auth.decorators import login_required\n\nfrom . import urls\n\nfrom .decorators import unauthenticated_user\nfrom django.forms import inlineformset_factory ,formset_factory# multiple form within one form\nfrom django.urls import NoReverseMatch, reverse\nfrom .models import *\nfrom .forms import OrderForm,CreateUserForm\nfrom .filters import *\n\n# Create your views here.\ndef registerPage(request):\n\n\tform = CreateUserForm()\n\tif request.method == 'POST':\n\t\tform = CreateUserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tuser = form.cleaned_data.get('username')\n\t\t\tmessages.success(request, 'Account was created for ' + user)\n\n\t\t\treturn redirect('accounts:login')\n\n\n\tcontext = {'form':form}\n\treturn render(request, 'accounts/register.html', context)\n\ndef loginPage(request):\n\tif request.method == 'POST':\n\t\tusername = request.POST.get('username')\n\t\tpassword =request.POST.get('password')\n\n\t\tuser = authenticate(request, username=username, password=password)\n\n\t\tif user is not None:\n\t\t\tlogin(request, user)\n\t\t\treturn redirect('accounts:home')\n\t\telse:\n\t\t\tmessages.info(request, 'Username OR password is incorrect')\n\n\tcontext = {}\n\treturn render(request, 'accounts/login.html', context)\n\ndef logoutUser(request):\n\tlogout(request)\n\treturn redirect('accounts:login')\n\n\n@login_required(login_url='accounts:login')\ndef home(request):\n\torders = Order.objects.all()\n\tcustomers = Customer.objects.all()\n\n\ttotal_customers = customers.count()\n\n\ttotal_orders = orders.count()\n\tdelivered = orders.filter(status='Delivered').count()\n\tpending = orders.filter(status='Pending').count()\n\n\tcontext = {'orders':orders, 'customers':customers,\n\t'total_orders':total_orders,'delivered':delivered,\n\t'pending':pending }\n\n\treturn render(request, 'accounts/dashboard.html', context)\n\n# @login_required(login_url='login')\ndef userPage(request):\n context={}\n return render(request,'accounts/user.html',context)\n\n# def home(request):\n# orders=Order.objects.all()\n# customers=Customer.objects.all()\n# total_customer=customers.count()\n# total_orders= orders.count()\n# delivered=orders.filter(status='delivered').count()\n# pending=orders.filter(status='pending').count()\n# context={\n# 'orders':orders,\n# 'customers':customers,\n# 'total_orders':total_orders,\n# 'total_customer':total_customer,\n# 'pending':pending,\n# 'delivered':delivered,\n# }\n# return render(request,'accounts/dashboard.html',context)\n@login_required(login_url='accounts:login')\ndef products(request):\n products=Product.objects.all()\n context={\n 'products':products,\n }\n return render(request,'accounts/products.html',context)\n\n@login_required(login_url='accounts:login')\ndef customer(request, pk_test):\n customer=Customer.objects.get(id=pk_test)\n orders=customer.order_set.all()\n order_count=orders.count()\n myFilter=OrderFilter(request.GET,queryset=orders)\n orders =myFilter.qs\n context={'customer': customer,\n 'orders':orders,\n 'myFilter':myFilter}\n\n return render(request,'accounts/customer.html',context)\n\n@login_required(login_url='accounts:login')\ndef createOrder(request,pk):\n OrderFormSet=inlineformset_factory(Customer,Order, fields=('product','status'))\n #parent child\n customer=Customer.objects.get(id=pk)\n formset=OrderFormSet(queryset=Order.objects.none(),instance=customer)\n # form=OrderForm(initial={'customer':customer})\n\n if request.method == \"POST\":\n # print(\"request :\",request.POST)\n form=OrderForm(request.POST)\n formset=OrderFormSet(request.POST,instance=customer)\n if formset.is_valid():\n formset.save()\n return redirect('/')\n context={'form':formset}\n return render(request,'accounts/order_form.html',context)\n\n@login_required(login_url='accounts:login')\n\ndef updateOrder(request,pk):\n # OrderFormSet=formset_factory(Order)\n # #parent child\n # customer=Customer.objects.get(id=pk)\n order=Order.objects.get(id=pk)\n # OrderFormSet=inlineformset_factory(Customer,Order, fields=('product','status'))\n form=OrderForm(instance=order)\n # formset=OrderFormSet(initial=[{'customer':customer,}])\n\n if request.method == \"POST\":\n form=OrderForm(request.POST,instance=order)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context={'form':form}\n return render(request,'accounts/order_form.html',context)\n\n@login_required(login_url='accounts:login')\ndef deleteOrder(request,pk):\n order=Order.objects.get(id=pk)\n if request.method== \"POST\":\n order.delete()\n return redirect('/')\n\n context={'item':order}\n return render(request,'accounts/delete.html',context)\n", "sub_path": "accounts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "forms.CreateUserForm", "line_number": 23, "usage_type": "call"}, {"api_name": "forms.CreateUserForm", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 29, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 29, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 42, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.messages.info", "line_number": 48, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 48, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 58, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 78, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 102, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 96, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 115, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 104, "usage_type": "call"}, {"api_name": "django.forms.inlineformset_factory", "line_number": 119, "usage_type": "call"}, {"api_name": "forms.OrderForm", "line_number": 127, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 131, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 117, "usage_type": "call"}, {"api_name": "forms.OrderForm", "line_number": 143, "usage_type": "call"}, {"api_name": "forms.OrderForm", "line_number": 147, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 150, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 153, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 160, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 163, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 155, "usage_type": "call"}]} +{"seq_id": "458484462", "text": "import pygame\n\nfrom constants import *\n\nclass Block:\n\n def __init__(self, x=0, y=0, color=GREEN, color2=RED, width=100, height=100, text=0):\n self.x = x\n self.y = y\n self.color = color\n self.width = width\n self.height = height\n self.color2 = color2\n self.text = text\n\n self.speed = 5\n self.direction = [0, 0]\n\n def render(self):\n global SCREEN\n pygame.draw.rect(SCREEN, self.color, [self.x, self.y, self.width, self.height])\n pygame.draw.line(SCREEN, self.color2, (self.x, self.y), (self.x + self.width, self.y))\n pygame.draw.line(SCREEN, self.color2, (self.x, self.y), (self.x, self.y+self.height))\n pygame.draw.line(SCREEN, self.color2, (self.x+self.width, self.y), (self.x + self.width, self.y+self.height))\n pygame.draw.line(SCREEN, self.color2, (self.x, self.y+self.height), (self.x + self.width, self.y+self.height))\n\n font = pygame.font.SysFont(None, 72)\n t = font.render(str(self.text), True, self.color2, self.color)\n textRect = t.get_rect()\n textRect.center = (self.x + self.width // 2, self.y + self.height // 2)\n SCREEN.blit(t, textRect)\n\n def update(self):\n\n self.x += self.speed * self.direction[0]\n self.y += self.speed * self.direction[1]\n \n self.render()\n\n def hover(self):\n x,y = pygame.mouse.get_pos()\n\n return (x >= self.x and y >= self.y and x <= self.x + self.width and y <= self.y + self.height)\n \n def clicked(self):\n\n a, b, c = pygame.mouse.get_pressed()\n\n return self.hover() and a\n", "sub_path": "Classes/blocks.py", "file_name": "blocks.py", "file_ext": "py", "file_size_in_byte": 1622, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.draw.rect", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 27, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 27, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 41, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 47, "usage_type": "attribute"}]} +{"seq_id": "468649128", "text": "__author__ = 'dipsy'\n\nimport requests\nimport json\nimport re\nfrom os import listdir\nfrom os.path import isfile, join, exists\n\n'''\nClass to load model information from Github.\nFrom a public Github repo, if you point it to a base models folder, it assumes that the name of the models is the\nfolder name, and inside that folder all models for that source exist with the naming convention *-model.ttl.\nThe root for the corresponding model is assumed to be in a file named *-root.txt\nFor example if you have a models folder:\n\n-models\n---|-google\n---|----|-google-model.ttl\n---|----|-google-root.txt\n---|-yahoo\n---|----|-yahoo1-model.ttl\n---|----|-yahoo1-root.txt\n---|----|-yahoo2-model.ttl\n---|----|-yahoo2-root.txt\n---|----|-yahoo2-model.MD\n---|----|-x.png\n\nIt will extract the model information and return an array as follows:\n[\n{\"name\":\"google\", \"url\":\"absolute url of google-model.ttl\", \"root\":\"contents of google-root.txt file\"},\n{\"name\":\"yahoo\", \"url\":\"absolute url of yahoo1-model.ttl\", \"root\":\"contents of yahoo1-root.txt file\"},\n{\"name\":\"yahoo\", \"url\":\"absolute url of yahoo2-model.ttl\", \"root\":\"contents of yahoo2-root.txt file\"},\n]\n\nSee the main method for usage\n'''\n\nclass GitModelLoader:\n\n def __init__(self, organization, repository_name, branch, local_folder=None):\n self.base_url = \"https://api.github.com/repos/\" + organization + \"/\" + repository_name + \"/contents/\"\n self.raw_url = \"https://raw.githubusercontent.com/\" + organization + \"/\" + repository_name + \"/\" + branch\n self.branch = branch\n self.local_folder = local_folder\n\n def get_models_from_folder(self, folder_path):\n if self.local_folder is not None:\n return self.get_models_from_directory(folder_path)\n else:\n return self.get_models_from_github_folder(folder_path)\n\n def get_models_from_github_folder(self, folder_path):\n models_folder_json_arr = self.__query_github(folder_path)\n models = []\n if models_folder_json_arr is not None:\n for models_folder_json in models_folder_json_arr:\n model_name = models_folder_json[\"name\"]\n if models_folder_json[\"type\"] == \"dir\":\n models_inner_folder_json = self.__query_github(models_folder_json[\"path\"])\n\n #First get all the models\n model_uris = {}\n for model_file_details in models_inner_folder_json:\n file_name = model_file_details['name']\n if re.search(r'.*-model\\.ttl$', file_name):\n model_uris[file_name] = model_file_details[\"download_url\"]\n\n #Now get all roots for the models\n for model_file_name in model_uris:\n root_file_name = model_file_name[0: len(model_file_name)-10] + \"-root.txt\"\n #print \"FInd:\", root_file_name\n\n #Now find the root file and load it\n root = None\n for model_file_details in models_inner_folder_json:\n if model_file_details[\"name\"] == root_file_name:\n root = self.__get_request(model_file_details[\"download_url\"])\n break\n\n if root is not None:\n root = root.strip()\n models.append({\"name\": model_name, \"url\": model_uris[model_file_name], \"root\": root})\n\n return models\n\n def get_models_from_directory(self, folder_path):\n models = []\n folders = [f for f in listdir(self.local_folder + \"/\" + folder_path) if not isfile(join(self.base_url + \"/\" + folder_path, f))]\n for folder in folders:\n folder_name = self.local_folder + \"/\" + folder_path + \"/\" + folder\n if isfile(folder_name):\n continue\n\n model_name = folder\n folder_files = [f for f in listdir(folder_name) if isfile(join(folder_name, f))]\n\n #First get all the models\n model_uris = {}\n for f in folder_files:\n if re.search(r'.*-model\\.ttl$', f):\n model_uris[f] = self.raw_url + \"/\" + folder_path + \"/\" + model_name + \"/\" + f\n\n for model_file_name in model_uris:\n root_file_name = model_file_name[0: len(model_file_name)-10] + \"-root.txt\"\n root = self.__read_file(folder_name + \"/\" + root_file_name)\n\n if root is not None:\n root = root.strip()\n models.append({\"name\": model_name, \"url\": model_uris[model_file_name], \"root\": root})\n return models\n\n def __query_github(self, folder_path):\n url = self.base_url + folder_path + \"?ref=\" + self.branch\n print(url)\n response = self.__get_request(url)\n if response is not None:\n return json.loads(response)\n return None\n\n def __get_request(self, url):\n response = requests.get(url, verify=False, timeout=10*60)\n if response.status_code == requests.codes.ok:\n return str(response.content)\n return None\n\n\n def __read_file(self, filename):\n content = None\n if exists(filename):\n with open(filename, 'r') as content_file:\n content = content_file.read()\n\n return content\n\nif __name__ == \"__main__\":\n gitModelLoader = GitModelLoader(\"usc-isi-i2\", \"effect-alignment\", \"master\", \"/Users/dipsy/github-effect/effect/effect-alignment\")\n models = gitModelLoader.get_models_from_folder(\"models\")\n print (json.dumps(models))", "sub_path": "digWorkflow/git_model_loader.py", "file_name": "git_model_loader.py", "file_ext": "py", "file_size_in_byte": 5650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.search", "line_number": 65, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 91, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "re.search", "line_number": 100, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 117, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 121, "usage_type": "call"}, {"api_name": "requests.codes", "line_number": 122, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 129, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "117647386", "text": "import math, pygame\nimport numpy as np\nfrom utils.grid import Grid\nfrom utils.extramath import lerp\nfrom utils import pygame_utils\n\n\nclass FunctionDrawing(Grid):\n\n def __init__(self, window=((-10, 10), (-10, 10)), interval=1, step=.000001, color = (255,0,0)):\n\n self.color = color\n self.step = step\n\n super().__init__(window, interval)\n\n self.vectorized_func = np.vectorize(self.function)\n self.vectorized_draw = np.vectorize(self.draw_point)\n\n self.x_points = np.array([lerp(self.left, self.right, t) for t in np.arange(0, 1+step, step)])\n self.y_points = -self.vectorized_func(self.x_points)\n\n self.scaled_x = pygame_utils.scale_points(self.x_points, self.scale[0], self.screen_center[0])\n self.scaled_y = pygame_utils.scale_points(self.y_points, self.scale[1], self.screen_center[1])\n\n\n def function(self, x):\n return x\n\n def draw_point(self, x, y, screen):\n pygame.draw.circle(screen, self.color, (int(x),int(y)), 2)\n\n def draw_function(self, screen):\n self.draw_grid(screen)\n self.vectorized_draw(self.scaled_x, self.scaled_y, screen)", "sub_path": "utils/function.py", "file_name": "function.py", "file_ext": "py", "file_size_in_byte": 1142, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "utils.grid.Grid", "line_number": 8, "usage_type": "name"}, {"api_name": "numpy.vectorize", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.vectorize", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.extramath.lerp", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.pygame_utils.scale_points", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.pygame_utils", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.pygame_utils.scale_points", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.pygame_utils", "line_number": 24, "usage_type": "name"}, {"api_name": "pygame.draw.circle", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 31, "usage_type": "attribute"}]} +{"seq_id": "271941045", "text": "# Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/\nfrom __future__ import annotations\n\nimport importlib.util\nimport os\nimport subprocess\nimport sys\n\nPROJECT_DIR = os.path.dirname(os.path.realpath(__file__))\n\n\ndef save_version(new_version: str | None, old_version: str | None, version_file_path: str) -> bool:\n if not new_version:\n return False\n if not old_version or new_version != old_version:\n with open(version_file_path, \"w\") as fp:\n fp.write(f'__version__ = \"{new_version}\"\\n')\n return True\n\n\ndef get_project_version(version_file_relpath: str) -> str:\n version_file_abspath = os.path.join(PROJECT_DIR, version_file_relpath)\n file_version = None\n try:\n module_spec = importlib.util.spec_from_file_location(\"verfile\", location=version_file_abspath)\n if module_spec:\n module = importlib.util.module_from_spec(module_spec)\n if module_spec.loader:\n module_spec.loader.exec_module(module)\n file_version = module.__version__\n else:\n print(f\"Could not load module spec from version file location: {version_file_abspath!r}\")\n except IOError:\n print(f\"Could not load version module from spec (file location: {version_file_abspath!r})\")\n\n os.chdir(os.path.dirname(__file__) or \".\")\n try:\n git_out = subprocess.check_output([\"git\", \"describe\", \"--always\"], stderr=getattr(subprocess, \"DEVNULL\", None))\n except (OSError, subprocess.CalledProcessError):\n pass\n else:\n git_version = git_out.splitlines()[0].strip().decode(\"utf-8\")\n if \".\" not in git_version:\n git_version = f\"0.0.1-0-unknown-{git_version}\"\n if save_version(git_version, file_version, version_file_abspath):\n print(f\"Version resolved from git: {git_version}\")\n return git_version\n\n short_version = subprocess.run([\"git\", \"describe\", \"--abbrev=0\"], check=True, text=True, capture_output=True).stderr\n if save_version(short_version, file_version, version_file_abspath):\n print(f\"Short version resolved from git abbrev: {short_version}\")\n return short_version\n\n if not file_version:\n raise Exception(f\"version not available from git or from file {version_file_abspath!r}\")\n else:\n print(f\"Version resolved from file: {file_version}\")\n return file_version\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: python3 version.py \")\n sys.exit(1)\n get_project_version(sys.argv[1])\n", "sub_path": "version.py", "file_name": "version.py", "file_ext": "py", "file_size_in_byte": 2548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.dirname", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "importlib.util.util.spec_from_file_location", "line_number": 25, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 25, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 25, "usage_type": "name"}, {"api_name": "importlib.util.util.module_from_spec", "line_number": 27, "usage_type": "call"}, {"api_name": "importlib.util.util", "line_number": 27, "usage_type": "attribute"}, {"api_name": "importlib.util", "line_number": 27, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 38, "usage_type": "call"}, {"api_name": "subprocess.CalledProcessError", "line_number": 39, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 62, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 64, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 65, "usage_type": "attribute"}]} +{"seq_id": "453487032", "text": "import os\nimport random\nimport numpy as np\nimport time\nimport torch\nfrom tqdm import tqdm\nfrom torch.utils.data.dataset import random_split\nfrom torch.utils.data import DataLoader\nfrom gan_training import text_classification_data\n\nfrom gan_training.models.EmbeddingBag import TextEmbeddingBag\nfrom gan_training.models.BiLSTM import TextBiLSTM\n\nseed = 2020\ntorch.manual_seed(seed)\nrandom.seed(seed)\nnp.random.seed(seed)\n\nNGRAMS = 2\nN_EPOCHS = 5\nBATCH_SIZE = 32\nEMBED_DIM = 64\nHIDDEN_DIM = 64\nN_LAYERS = 1\nDROPOUT = 0.1\n\nmin_valid_loss = float('inf')\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n\nif not os.path.isdir('../data'):\n os.mkdir('../data')\n\ntrain_dataset, test_dataset = text_classification_data.DATASETS['AG_NEWS'](\n root='../data', ngrams=NGRAMS, vocab=None)\n\n\ntrain_len = int(len(train_dataset) * 0.95)\nsub_train_, sub_valid_ = random_split(train_dataset, [train_len, len(train_dataset) - train_len])\n\nVOCAB_SIZE = len(train_dataset.get_vocab())\nNUN_CLASS = len(train_dataset.get_labels())\n# model = TextEmbeddingBag(VOCAB_SIZE, EMBED_DIM, NUN_CLASS).to(device)\nmodel = TextBiLSTM(VOCAB_SIZE, EMBED_DIM, NUN_CLASS, HIDDEN_DIM, N_LAYERS, DROPOUT).to(device)\n\ncriterion = torch.nn.CrossEntropyLoss().to(device)\noptimizer = torch.optim.SGD(model.parameters(), lr=4.0)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.9)\n\n\ndef train_func(sub_train_):\n\n # Train the model\n train_loss = 0\n train_acc = 0\n data = DataLoader(sub_train_, batch_size=BATCH_SIZE, shuffle=True,\n collate_fn=collate_batch_lstm, drop_last=True)\n model.train()\n for i, (text, length, cls) in tqdm(enumerate(data)):\n optimizer.zero_grad()\n text, cls = text.to(device), cls.to(device)\n # output = model(text, offsets)\n output = model(text, length)\n loss = criterion(output, cls)\n train_loss += loss.item()\n loss.backward()\n optimizer.step()\n train_acc += (output.argmax(1) == cls).sum().item()\n\n # Adjust the learning rate\n scheduler.step()\n\n return train_loss / len(sub_train_), train_acc / len(sub_train_)\n\n\ndef test(data_):\n loss = 0\n acc = 0\n data = DataLoader(data_, batch_size=BATCH_SIZE, collate_fn=collate_batch_lstm)\n model.eval()\n for text, length, cls in data:\n text, cls = text.to(device), cls.to(device)\n with torch.no_grad():\n # output = model(text, offsets)\n output = model(text, length)\n loss = criterion(output, cls)\n loss += loss.item()\n acc += (output.argmax(1) == cls).sum().item()\n\n return loss / len(data_), acc / len(data_)\n\n\ndef generate_batch(batch):\n label = torch.tensor([entry[0] for entry in batch])\n text = [entry[1] for entry in batch]\n offsets = [0] + [len(entry) for entry in text]\n offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)\n text = torch.cat(text)\n return text, offsets, label\n\n\ndef collate_batch_lstm(batch):\n sorted_batch = sorted(batch, key=lambda x: x[1].shape[0], reverse=True)\n sequences = [x[1] for x in sorted_batch]\n sequences_padded = torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)\n lengths = torch.LongTensor([len(x) for x in sequences])\n labels = torch.LongTensor([x[0] for x in sorted_batch])\n return sequences_padded, lengths, labels\n\n\nfor epoch in range(N_EPOCHS):\n\n start_time = time.time()\n train_loss, train_acc = train_func(sub_train_)\n valid_loss, valid_acc = test(sub_valid_)\n\n secs = int(time.time() - start_time)\n mins = secs / 60\n secs = secs % 60\n\n print('Epoch: %d' %(epoch + 1), \" | time in %d minutes, %d seconds\" %(mins, secs))\n print(f'\\tLoss: {train_loss:.4f}(train)\\t|\\tAcc: {train_acc * 100:.1f}%(train)')\n print(f'\\tLoss: {valid_loss:.4f}(valid)\\t|\\tAcc: {valid_acc * 100:.1f}%(valid)')\n\n\nprint('Checking the results of test dataset...')\ntest_loss, test_acc = test(test_dataset)\nprint(f'\\tLoss: {test_loss:.4f}(test)\\t|\\tAcc: {test_acc * 100:.1f}%(test)')", "sub_path": "gan_training/train/train_cls.py", "file_name": "train_cls.py", "file_ext": "py", "file_size_in_byte": 4036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.manual_seed", "line_number": 15, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 32, "usage_type": "call"}, {"api_name": "gan_training.text_classification_data.DATASETS", "line_number": 34, "usage_type": "attribute"}, {"api_name": "gan_training.text_classification_data", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.utils.data.dataset.random_split", "line_number": 39, "usage_type": "call"}, {"api_name": "gan_training.models.BiLSTM.TextBiLSTM", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 56, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.nn.utils.rnn.pad_sequence", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 105, "usage_type": "attribute"}, {"api_name": "torch.LongTensor", "line_number": 106, "usage_type": "call"}, {"api_name": "torch.LongTensor", "line_number": 107, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "649850818", "text": "import json\nimport math\nfrom threading import Thread, Lock\nfrom time import sleep\n\n\nclass NotInCorrectRange(Exception):\n pass\n\n\nclass Control:\n def __init__(self, pin):\n self.pin = pin\n self.throttle = None\n self.min_freq = None\n self.max_freq = None\n\n def set_pulse_width_range(self, min_freq, max_freq):\n self.min_freq = min_freq\n self.max_freq = max_freq\n\n\nclass ContinuousRotationServo:\n max_freq = 1900\n min_freq = 1100\n middle_point = (max_freq + min_freq) / 2\n power_multiplier = ((max_freq - min_freq) / 2) / 100\n\n def __init__(self, pin):\n self.control = None\n self.pin = abs(int(pin))\n self.pervane = -1 if pin[0] == \"-\" else 1\n self.motor_initialize()\n\n self.running = True\n self.power = 0\n self.lock = Lock()\n self.thread = Thread(target=self.motor_thread)\n self.thread.start()\n\n def motor_thread(self):\n slp = 0.01\n prev_power = 0\n control = self.control\n while self.running:\n self.lock.acquire()\n current_power = self.power\n print(\"Pin:\", self.pin, \"current_power:\", current_power, \"prev_power:\", prev_power)\n if prev_power == current_power:\n continue\n else:\n if current_power - prev_power > 5:\n for i in range(prev_power + 1, current_power + 1, 5):\n control.throttle = i / 100\n sleep(slp)\n elif prev_power - current_power > 5:\n for i in range(prev_power - 1, current_power - 1, -5):\n control.throttle = i / 100\n sleep(slp)\n control.throttle = self.force_to_throttle(current_power * self.pervane)\n prev_power = current_power\n self.power = 0\n\n @staticmethod\n def force_to_throttle(power):\n with open('../../t200.json', 'r') as j:\n sozluk = json.load(j)\n\n if sozluk.get(str(power)):\n return (sozluk.get(str(power)) - 1500) / 400 + 0\n\n p_key = None\n l_key = None\n # sozluk.keys() küçükten büyüğe sıralı olmalı\n for key in sozluk.keys():\n key = int(key)\n if power > key:\n p_key = key\n else:\n l_key = key\n break\n\n p_value = sozluk.get(str(p_key))\n l_value = sozluk.get(str(l_key))\n o_value = (l_value - p_value) / (int(l_key) - int(p_key)) * (power - p_key) + p_value\n return (o_value - 1500) / 400 + 0\n\n def motor_initialize(self):\n self.control = Control(self.pin)\n self.control.set_pulse_width_range(1135, 1935)\n\n def _change_power(self, power):\n \"\"\"\n :param power: this parameter takes a value between -100 and 100. Negative values​make it work backward,\n positive values​make it work forward.\n :return:\n \"\"\"\n if power != self.power:\n self.power = power\n if self.lock.locked():\n self.lock.release()\n\n def run_clockwise(self, power):\n \"\"\"\n Suyu geriye ittirir. Motor ileriye doğru hareket etmek ister.\n :param power:\n :return:\n \"\"\"\n if not 0 <= power <= 100:\n raise NotInCorrectRange(\"Power must be between 0 and 100.\")\n return self._change_power(power)\n\n def run_counterclockwise(self, power):\n \"\"\"\n Suyu ileriye ittirir. Motor geriye doğru hareket etmek ister.\n :param power:\n :return:\n \"\"\"\n if not 0 <= power <= 100:\n raise NotInCorrectRange(\"Power must be between 0 and 100.\")\n return self._change_power(-power)\n\n def run_bidirectional(self, power):\n if power >= 0:\n self.run_clockwise(power)\n else:\n self.run_counterclockwise(-power)\n\n def stop(self):\n print(self.pin, \"motor stop...\")\n if self.running:\n print(self.pin, \"motor kapatılıyor...\")\n self.running = False\n self.power = 0\n if self.lock.locked():\n self.lock.release()\n self.thread.join()\n print(self.pin, \"motor kapatıldı...\")\n\n\nclass RovMovement:\n def __init__(self, xy_lf_pin, xy_rf_pin, xy_lb_pin, xy_rb_pin, z_lf_pin, z_rf_pin, z_lb_pin, z_rb_pin, arm_pin,\n initialize_motors=True):\n self.xy_lf = ContinuousRotationServo(xy_lf_pin)\n self.xy_rf = ContinuousRotationServo(xy_rf_pin)\n self.xy_lb = ContinuousRotationServo(xy_lb_pin)\n self.xy_rb = ContinuousRotationServo(xy_rb_pin)\n self.z_lf = ContinuousRotationServo(z_lf_pin)\n self.z_rf = ContinuousRotationServo(z_rf_pin)\n self.z_lb = ContinuousRotationServo(z_lb_pin)\n self.z_rb = ContinuousRotationServo(z_rb_pin)\n # self.arm = StandardServo(arm_pin)\n self.z_motors_list = [self.z_lf, self.z_rf, self.z_lb, self.z_rb]\n self.xy_motors_list = [self.xy_lf, self.xy_rf, self.xy_lb, self.xy_rb]\n self.all_motors_list = self.z_motors_list + self.xy_motors_list\n self.arm_status = False\n self.open_arm()\n if initialize_motors:\n self._initialize_motors()\n sleep(2)\n\n def _initialize_motors(self):\n print(\"All motors initializing...\")\n mp = 30\n for i in list(range(0, mp)) + list(range(mp, -mp, -1)) + list(range(-mp, 1)):\n print(\"Power:\", i)\n for motor in self.all_motors_list:\n motor.run_bidirectional(i)\n sleep(0.01)\n print(\"All motors initialized...\")\n\n def go_up(self, power):\n power_per_motor = int(power / 4)\n for motor in self.z_motors_list:\n motor.run_clockwise(power_per_motor)\n\n def go_down(self, power):\n power_per_motor = int(power / 4)\n for motor in self.z_motors_list:\n motor.run_counterclockwise(power_per_motor)\n\n def turn_left(self, power):\n power = power / 4\n power_per_motor = int(power / 4)\n self.xy_rf.run_clockwise(power_per_motor)\n self.xy_lf.run_counterclockwise(power_per_motor)\n self.xy_lb.run_clockwise(power_per_motor)\n self.xy_rb.run_counterclockwise(power_per_motor)\n\n def turn_right(self, power):\n power = power / 4\n power_per_motor = int(power / 4)\n self.xy_rf.run_counterclockwise(power_per_motor)\n self.xy_lf.run_clockwise(power_per_motor)\n self.xy_lb.run_counterclockwise(power_per_motor)\n self.xy_rb.run_clockwise(power_per_motor)\n\n def go_xy(self, power, degree):\n \"\"\"\n :param power: Power sent to the vehicle's movement\n :param degree: degree of movement (0between 0-360 degree)\n 0 -> ileri\n 90 -> sağa\n 180 -> geri\n 270 -> sola\n :return:\n \"\"\"\n power_per_motor = int(power / 2)\n\n radian_rf = (45 - degree) / 180 * math.pi\n radian_lf = (135 - degree) / 180 * math.pi\n radian_lb = (225 - degree) / 180 * math.pi\n radian_rb = (315 - degree) / 180 * math.pi\n\n pow_rf = int(math.sin(radian_rf) * power_per_motor)\n pow_lf = int(math.sin(radian_lf) * power_per_motor)\n pow_lb = int(math.sin(radian_lb) * power_per_motor)\n pow_rb = int(math.sin(radian_rb) * power_per_motor)\n\n self.xy_rf.run_bidirectional(pow_rf)\n self.xy_lf.run_bidirectional(pow_lf)\n self.xy_lb.run_bidirectional(pow_lb)\n self.xy_rb.run_bidirectional(pow_rb)\n\n def go_xy_and_turn(self, power, degree, turn_power):\n \"\"\"\n :param power: Power sent to the vehicle's movement\n :param degree: degree of movement (0between 0-360 degree)\n 0 -> ileri\n 90 -> sağa\n 180 -> geri\n 270 -> sola\n :param turn_power: Turn power\n Positive value -> Turn right\n Negative value -> Turn left\n :return:\n \"\"\"\n turn_power = turn_power / 4\n turn_power_per_motor = int(turn_power / 4)\n go_power_per_motor = int(power / 2)\n\n radian_rf = (45 - degree) / 180 * math.pi\n radian_lf = (135 - degree) / 180 * math.pi\n radian_lb = (225 - degree) / 180 * math.pi\n radian_rb = (315 - degree) / 180 * math.pi\n\n pow_rf = int(math.sin(radian_rf) * go_power_per_motor - turn_power_per_motor)\n pow_lf = int(math.sin(radian_lf) * go_power_per_motor + turn_power_per_motor)\n pow_lb = int(math.sin(radian_lb) * go_power_per_motor - turn_power_per_motor)\n pow_rb = int(math.sin(radian_rb) * go_power_per_motor + turn_power_per_motor)\n\n self.xy_rf.run_bidirectional(pow_rf)\n self.xy_lf.run_bidirectional(pow_lf)\n self.xy_lb.run_bidirectional(pow_lb)\n self.xy_rb.run_bidirectional(pow_rb)\n\n def open_arm(self):\n # self.arm.change_angle(180)\n self.arm_status = True\n\n def close_arm(self):\n # self.arm.change_angle(0)\n self.arm_status = False\n\n def toggle_arm(self, arm_status=None):\n if self.arm_status and (arm_status is None or arm_status == False):\n self.close_arm()\n elif not self.arm_status and (arm_status is None or arm_status == True):\n self.open_arm()\n\n def run_all_motors_cw(self, power):\n for motor in self.all_motors_list:\n motor.run_clockwise(power)\n\n def run_all_motors_ccw(self, power):\n for motor in self.all_motors_list:\n motor.run_counterclockwise(power)\n\n def stop(self):\n for motor in self.all_motors_list:\n motor.stop()\n # self.arm.stop()\n\n def close(self):\n self.stop()\n\n\nif __name__ == '__main__':\n rov_movement = RovMovement(xy_lf_pin=\"-2\", xy_rf_pin=\"0\", xy_lb_pin=\"-1\", xy_rb_pin=\"6\",\n z_lf_pin=\"-5\", z_rf_pin=\"3\", z_lb_pin=\"-7\", z_rb_pin=\"4\", arm_pin=8,\n initialize_motors=True\n )\n rov_movement.stop()\n", "sub_path": "denemeler/thread_S/motor_thread.py", "file_name": "motor_thread.py", "file_ext": "py", "file_size_in_byte": 10228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "threading.Lock", "line_number": 37, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 38, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 59, "usage_type": "call"}, {"api_name": "json.load", "line_number": 67, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 169, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 210, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 211, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 212, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 213, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 215, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 216, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 217, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 218, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 242, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 243, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 244, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 245, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 247, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 248, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 249, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "81657288", "text": "import datetime\nimport random\nimport copy\nimport pathlib\n\nimport math\n\nfrom seqscout.utils import read_data, read_data_kosarak, \\\n sequence_mutable_to_immutable, \\\n read_data_sc2, k_length, \\\n compute_first_zero_mask, compute_last_ones_mask, \\\n count_target_class_data, extract_items, compute_quality, compute_quality_vertical, \\\n sequence_immutable_to_mutable, encode_items, encode_data, \\\n print_results_decode, read_jmlr, print_results\n\nfrom seqscout.priorityset import PrioritySet, PrioritySetUCB\nimport seqscout.conf as conf\n\nVERTICAL_TOOLS = {}\nVERTICAL_RPZ = False\n\n\ndef filter_target_class(data, target_class):\n filter_data = []\n for line in data:\n if line[0] == target_class:\n filter_data.append(line)\n\n return filter_data\n\n\ndef get_itemset_memory(data):\n memory = set()\n for line in data:\n for itemset in line[1:]:\n memory.add(frozenset(itemset))\n return memory\n\n\ndef is_included(pattern, pattern_set):\n if pattern in pattern_set:\n return True\n else:\n for x in pattern_set:\n if pattern.issubset(x):\n return True\n return False\n\n\ndef compute_variations_better_quality(sequence, items, data, itemsets_memory, target_class, target_quality, enable_i=True, quality_measure=conf.QUALITY_MEASURE):\n '''\n Compute variations until quality increases\n :param sequence:\n :param items: the list of all possible items\n :return: the best new element (sequence, quality), or None if we are on a local optimum\n '''\n variations = []\n\n if VERTICAL_RPZ:\n bitset_slot_size = VERTICAL_TOOLS['bitset_slot_size']\n itemsets_bitsets = VERTICAL_TOOLS['itemsets_bitsets']\n class_data_count = VERTICAL_TOOLS['class_data_count']\n first_zero_mask = VERTICAL_TOOLS['first_zero_mask']\n last_ones_mask = VERTICAL_TOOLS['last_ones_mask']\n\n for itemset_i, itemset in enumerate(sequence):\n # i_extension\n if enable_i:\n for item_possible in items:\n new_variation_i_extension = copy.deepcopy(sequence)\n new_variation_i_extension[itemset_i].add(item_possible)\n\n # we check if created pattern is present in data before\n if is_included(new_variation_i_extension, itemsets_memory):\n if VERTICAL_RPZ:\n new_variation_i_quality, new_variation_i_bitset = compute_quality_vertical(data,\n new_variation_i_extension,\n target_class,\n bitset_slot_size,\n itemsets_bitsets,\n class_data_count,\n first_zero_mask,\n last_ones_mask,\n quality_measure=quality_measure)\n else:\n new_variation_i_quality = compute_quality(data, new_variation_i_extension, target_class)\n\n variations.append(\n (new_variation_i_extension, new_variation_i_quality))\n\n if new_variation_i_quality > target_quality:\n return variations[-1]\n\n # s_extension\n for item_possible in items:\n new_variation_s_extension = copy.deepcopy(sequence)\n new_variation_s_extension.insert(itemset_i, {item_possible})\n\n if VERTICAL_RPZ:\n new_variation_s_quality, new_variation_s_bitset = compute_quality_vertical(data,\n new_variation_s_extension,\n target_class,\n bitset_slot_size,\n itemsets_bitsets,\n class_data_count,\n first_zero_mask,\n last_ones_mask,\n quality_measure=quality_measure)\n else:\n new_variation_s_quality = compute_quality(data,\n new_variation_s_extension,\n target_class)\n\n variations.append(\n (new_variation_s_extension, new_variation_s_quality))\n\n if new_variation_s_quality > target_quality:\n return variations[-1]\n\n for item_i, item in enumerate(itemset):\n new_variation_remove = copy.deepcopy(sequence)\n\n # we can switch this item, remove it or add it as s or i-extension\n\n if (k_length(sequence) > 1):\n new_variation_remove[itemset_i].remove(item)\n\n if len(new_variation_remove[itemset_i]) == 0:\n new_variation_remove.pop(itemset_i)\n\n if VERTICAL_RPZ:\n new_variation_remove_quality, new_variation_remove_bitset = compute_quality_vertical(data,\n new_variation_remove,\n target_class,\n bitset_slot_size,\n itemsets_bitsets,\n class_data_count,\n first_zero_mask,\n last_ones_mask,\n quality_measure=quality_measure)\n else:\n new_variation_remove_quality = compute_quality(data,\n new_variation_remove,\n target_class)\n\n variations.append(\n (new_variation_remove, new_variation_remove_quality))\n if new_variation_remove_quality > target_quality:\n return variations[-1]\n\n # s_extension for last element\n for item_possible in items:\n new_variation_s_extension = copy.deepcopy(sequence)\n new_variation_s_extension.append({item_possible})\n\n if VERTICAL_RPZ:\n new_variation_s_quality, new_variation_s_bitset = compute_quality_vertical(data,\n new_variation_s_extension,\n target_class,\n bitset_slot_size,\n itemsets_bitsets,\n class_data_count,\n first_zero_mask,\n last_ones_mask,\n quality_measure=quality_measure)\n else:\n new_variation_s_quality = compute_quality(data,\n new_variation_s_extension,\n target_class)\n\n variations.append(\n (new_variation_s_extension, new_variation_s_quality))\n if new_variation_s_quality > target_quality:\n return variations[-1]\n\n return None\n\n\ndef generalize_sequence(sequence, data, target_class, quality_measure=conf.QUALITY_MEASURE):\n sequence = copy.deepcopy(sequence)\n # we remove z items randomly\n seq_items_nb = len([i for j_set in sequence for i in j_set])\n z = random.randint(0, seq_items_nb - 1)\n for _ in range(z):\n chosen_itemset_i = random.randint(0, len(sequence) - 1)\n chosen_itemset = sequence[chosen_itemset_i]\n\n chosen_itemset.remove(random.sample(chosen_itemset, 1)[0])\n\n if len(chosen_itemset) == 0:\n sequence.pop(chosen_itemset_i)\n\n # now we compute the Wracc\n if VERTICAL_RPZ:\n quality, _ = compute_quality_vertical(data, sequence, target_class,\n VERTICAL_TOOLS['bitset_slot_size'],\n VERTICAL_TOOLS['itemsets_bitsets'], VERTICAL_TOOLS['class_data_count'],\n VERTICAL_TOOLS['first_zero_mask'], VERTICAL_TOOLS['last_ones_mask'], quality_measure=quality_measure)\n else:\n quality = compute_quality(data, sequence, target_class)\n return sequence, quality\n\n\ndef UCB(score, Ni, N):\n # we choose C = 0.5\n return (score + 0.25) * 2 + 0.5 * math.sqrt(2 * math.log(N) / Ni)\n\n\ndef exploit_arm(pattern, quality, items, data, itemsets_memory, target_class, enable_i=True, quality_measure=conf.QUALITY_MEASURE):\n # we optimize until we find local optima\n # print(\"Optimize\")\n while 'climbing hill':\n # we compute all possible variations\n try:\n\n pattern, quality = compute_variations_better_quality(pattern,\n items, data,\n itemsets_memory,\n target_class,\n quality,\n enable_i=enable_i,\n quality_measure=quality_measure)\n\n except TypeError:\n # print(\"Already a local optima\")\n break\n return pattern, quality\n\n\ndef play_arm(sequence, data, target_class, quality_measure=conf.QUALITY_MEASURE):\n '''\n Select object, generalise\n :param sequence: immutable sequence to generalise\n :param data:\n :param data_target_class: elements of the data with target class\n :return:\n '''\n sequence = sequence_immutable_to_mutable(sequence)\n\n pattern, quality = generalize_sequence(sequence,\n data,\n target_class,\n quality_measure=quality_measure)\n\n return pattern, quality\n\n\ndef seq_scout(data, target_class, time_budget=conf.TIME_BUDGET, top_k=conf.TOP_K, enable_i=True, vertical=True,\n iterations_limit=conf.ITERATIONS_NUMBER, theta=conf.THETA, quality_measure=conf.QUALITY_MEASURE):\n items = extract_items(data)\n begin = datetime.datetime.utcnow()\n time_budget = datetime.timedelta(seconds=time_budget)\n\n data_target_class = filter_target_class(data, target_class)\n sorted_patterns = PrioritySet(k=top_k, theta=theta)\n UCB_scores = PrioritySetUCB()\n itemsets_memory = get_itemset_memory(data)\n\n # removing class\n bitset_slot_size = len(max(data, key=lambda x: len(x))) - 1\n\n global VERTICAL_RPZ\n VERTICAL_RPZ = vertical\n\n global VERTICAL_TOOLS\n VERTICAL_TOOLS = {\n \"bitset_slot_size\": bitset_slot_size,\n \"first_zero_mask\": compute_first_zero_mask(len(data), bitset_slot_size),\n \"last_ones_mask\": compute_last_ones_mask(len(data), bitset_slot_size),\n \"class_data_count\": count_target_class_data(data, target_class),\n \"itemsets_bitsets\": {}\n }\n\n N = 1\n\n # init: we add objects with the best ucb so that they are all played one time in the main procedure.\n # By putting a null N, we ensure the mean of the quality will be correct\n for sequence in data_target_class:\n sequence_i = sequence_mutable_to_immutable(sequence[1:])\n UCB_score = UCB(float(\"inf\"), 1, N)\n UCB_scores.add(sequence_i, (UCB_score, 0, 0))\n\n # play with time budget\n while datetime.datetime.utcnow() - begin < time_budget and N < iterations_limit:\n # we take the best UCB\n _, Ni, mean_quality, sequence = UCB_scores.pop()\n\n pattern, quality = play_arm(sequence, data, target_class, quality_measure=quality_measure)\n pattern = sequence_mutable_to_immutable(pattern)\n sorted_patterns.add(pattern, quality)\n\n # we update scores\n updated_quality = (Ni * mean_quality + quality) / (Ni + 1)\n UCB_score = UCB(updated_quality, Ni + 1, N)\n UCB_scores.add(sequence, (UCB_score, Ni + 1, updated_quality))\n\n N += 1\n\n print(\"seqscout optimized iterations: {}\".format(N))\n\n best_patterns = sorted_patterns.get_top_k_non_redundant(data, top_k)\n\n for pattern in best_patterns:\n pattern_mutable = sequence_immutable_to_mutable(pattern[1])\n optimized_pattern, optimized_quality = exploit_arm(pattern_mutable, pattern[0], items, data, itemsets_memory,\n target_class, enable_i=enable_i, quality_measure=quality_measure)\n optimized_pattern = sequence_mutable_to_immutable(optimized_pattern)\n sorted_patterns.add(optimized_pattern, optimized_quality)\n\n\n\n return sorted_patterns.get_top_k_non_redundant(data, top_k)\n\n\ndef seq_scout_api(dataset=conf.DATA, time_budget=conf.TIME_BUDGET, top_k=conf.TOP_K):\n '''\n Launch seq_scout.\n This function is for the simplicity of the user, so that she does not needs to specify iterations number,\n which is here only for experiments.\n '''\n\n if dataset == 'splice':\n data = read_data(pathlib.Path(__file__).parent.parent / 'data/splice.data')\n target_class = 'EI'\n enable_i = False\n elif dataset == 'alsbu':\n data = read_data_kosarak(pathlib.Path(__file__).parent.parent / 'data/aslbu.data')\n target_class = '195'\n enable_i = False\n elif dataset == 'alsbu':\n data = read_data_kosarak(pathlib.Path(__file__).parent.parent / 'data/blocks.data')\n target_class = '7'\n enable_i = False\n elif dataset == 'context':\n data = read_data_kosarak(pathlib.Path(__file__).parent.parent / 'data/context.data')\n target_class = '4'\n enable_i = False\n elif dataset == 'sc2':\n data = read_data_sc2(pathlib.Path(__file__).parent.parent / 'data/sequences-TZ-45.txt')[:5000]\n target_class = '1'\n enable_i = True\n elif dataset == 'skating':\n data = read_data_kosarak(pathlib.Path(__file__).parent.parent / 'data/skating.data')\n target_class = '1'\n enable_i = False\n elif dataset == 'jmlr':\n data = read_jmlr('svm', pathlib.Path(__file__).parent.parent / 'data/jmlr/jmlr')\n target_class = '+'\n enable_i = False\n else:\n data = read_data(pathlib.Path(__file__).parent.parent / 'data/promoters.data')\n target_class = '+'\n enable_i = False\n\n class_present = False\n for sequence in data:\n if target_class == sequence[0]:\n class_present = True\n break\n\n if not class_present:\n raise ValueError('The target class does not appear in data')\n\n items = extract_items(data)\n items, items_to_encoding, encoding_to_items = encode_items(items)\n data = encode_data(data, items_to_encoding)\n\n results = seq_scout(data, target_class, top_k=top_k, vertical=False, time_budget=time_budget, iterations_limit=10000000000000, enable_i=enable_i)\n\n print_results_decode(results, encoding_to_items)\n return results\n\n\ndef launch():\n DATA = read_data_sc2('../data/sequences-TZ-45.txt')[:5000]\n #DATA = reduce_k_length(10, DATA)\n\n # DATA = read_data_kosarak('../data/blocks.data')\n # DATA = read_data_kosarak('../data/skating.data')\n # DATA = read_data_kosarak('../data/context.data')\n # DATA = read_data(pathlib.Path(__file__).parent.parent / 'data/promoters.data')\n # DATA = read_jmlr('machin', pathlib.Path(__file__).parent.parent / 'data/jmlr/jmlr')\n\n\n #ITEMS = extract_items(DATA)\n #ITEMS, items_to_encoding, encoding_to_items = encode_items(ITEMS)\n #DATA = encode_data(DATA, items_to_encoding)\n\n results = seq_scout(DATA, '1', time_budget=60, top_k=5, enable_i=False, vertical=False, iterations_limit=10000)\n\n #results = seq_scout_api(DATA, '+', 10, 5)\n print_results(results)\n #print_results_decode(results, encodi ng_to_items)\n\n\nif __name__ == '__main__':\n launch()\n", "sub_path": "seqscout/seq_scout.py", "file_name": "seq_scout.py", "file_ext": "py", "file_size_in_byte": 17761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "seqscout.conf.QUALITY_MEASURE", "line_number": 50, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 50, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 70, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality_vertical", "line_number": 76, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality", "line_number": 86, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 96, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality_vertical", "line_number": 100, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality", "line_number": 110, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 121, "usage_type": "call"}, {"api_name": "seqscout.utils.k_length", "line_number": 125, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality_vertical", "line_number": 132, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality", "line_number": 142, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 153, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality_vertical", "line_number": 157, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality", "line_number": 167, "usage_type": "call"}, {"api_name": "seqscout.conf.QUALITY_MEASURE", "line_number": 179, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 179, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 180, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 183, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 185, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 188, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality_vertical", "line_number": 195, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_quality", "line_number": 200, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 206, "usage_type": "call"}, {"api_name": "math.log", "line_number": 206, "usage_type": "call"}, {"api_name": "seqscout.conf.QUALITY_MEASURE", "line_number": 209, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 209, "usage_type": "name"}, {"api_name": "seqscout.conf.QUALITY_MEASURE", "line_number": 230, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 230, "usage_type": "name"}, {"api_name": "seqscout.utils.sequence_immutable_to_mutable", "line_number": 238, "usage_type": "call"}, {"api_name": "seqscout.conf.TIME_BUDGET", "line_number": 248, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 248, "usage_type": "name"}, {"api_name": "seqscout.conf.TOP_K", "line_number": 248, "usage_type": "attribute"}, {"api_name": "seqscout.conf.ITERATIONS_NUMBER", "line_number": 249, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 249, "usage_type": "name"}, {"api_name": "seqscout.conf.THETA", "line_number": 249, "usage_type": "attribute"}, {"api_name": "seqscout.conf.QUALITY_MEASURE", "line_number": 249, "usage_type": "attribute"}, {"api_name": "seqscout.utils.extract_items", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 251, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 251, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 252, "usage_type": "call"}, {"api_name": "seqscout.priorityset.PrioritySet", "line_number": 255, "usage_type": "call"}, {"api_name": "seqscout.priorityset.PrioritySetUCB", "line_number": 256, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_first_zero_mask", "line_number": 268, "usage_type": "call"}, {"api_name": "seqscout.utils.compute_last_ones_mask", "line_number": 269, "usage_type": "call"}, {"api_name": "seqscout.utils.count_target_class_data", "line_number": 270, "usage_type": "call"}, {"api_name": "seqscout.utils.sequence_mutable_to_immutable", "line_number": 279, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 284, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 284, "usage_type": "attribute"}, {"api_name": "seqscout.utils.sequence_mutable_to_immutable", "line_number": 289, "usage_type": "call"}, {"api_name": "seqscout.utils.sequence_immutable_to_mutable", "line_number": 304, "usage_type": "call"}, {"api_name": "seqscout.utils.sequence_mutable_to_immutable", "line_number": 307, "usage_type": "call"}, {"api_name": "seqscout.conf.DATA", "line_number": 315, "usage_type": "attribute"}, {"api_name": "seqscout.conf", "line_number": 315, "usage_type": "name"}, {"api_name": "seqscout.conf.TIME_BUDGET", "line_number": 315, "usage_type": "attribute"}, {"api_name": "seqscout.conf.TOP_K", "line_number": 315, "usage_type": "attribute"}, {"api_name": "seqscout.utils.read_data", "line_number": 323, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 323, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_kosarak", "line_number": 327, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 327, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_kosarak", "line_number": 331, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 331, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_kosarak", "line_number": 335, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 335, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_sc2", "line_number": 339, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 339, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_kosarak", "line_number": 343, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 343, "usage_type": "call"}, {"api_name": "seqscout.utils.read_jmlr", "line_number": 347, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 347, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data", "line_number": 351, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 351, "usage_type": "call"}, {"api_name": "seqscout.utils.extract_items", "line_number": 364, "usage_type": "call"}, {"api_name": "seqscout.utils.encode_items", "line_number": 365, "usage_type": "call"}, {"api_name": "seqscout.utils.encode_data", "line_number": 366, "usage_type": "call"}, {"api_name": "seqscout.utils.print_results_decode", "line_number": 370, "usage_type": "call"}, {"api_name": "seqscout.utils.read_data_sc2", "line_number": 375, "usage_type": "call"}, {"api_name": "seqscout.utils.print_results", "line_number": 392, "usage_type": "call"}]} +{"seq_id": "290991827", "text": "# ****************************************************\n# Author: @Adjprof\n# ****************************************************\n\n# The voter data attributes generation to support a model that allows\n# for interactions between state, ethnicity, income, age, sex, education, marriage status and whether a person has children\n# \n# the list of attributes: {state, zipcode, ethnicity, income, age, sex, education, marriage, children}\n#\n# state -> derive from US Census/PII and Voter database\n# zipcode -> derive from US Census/PII and Voter database\n# ethnicity -> synthetic data derive from US census/PII market\n# income -> synthetic data derive from PII market\n# age -> derive Voter database\n# sex -> derive Voter database (of course, the real gender is subject to data mining from PII and Social Media)\n# education -> synthetic data derive from PII market\n# marriage -> derive from US Census and Voter database (This field might be outdated; should be pair with data from PII market)\n# childen -> synthetic data derive from PII market\n# party -> registered party from voter database\n\nimport pandas as pd\nimport csv\nimport numpy as np\nfrom datetime import date\nfrom random import randrange\n\n# ****************************************************\n# data section\n# ****************************************************\n\nvf_attributes = [\"state_file_id\", \"dob\", \"sex\", \"party\", \"county__registered_address\", \"zip__registered_address\", \"state__registered_address\", \"federal_district\"]\nsynthetic_data_attributes = ['state', 'zipcode', 'ethnicity', 'income', 'age', 'sex', 'education', 'marriage', 'children', 'party']\n\n# var arrays for synthetic data generation\na_ethnicity = ['NA','White','Black','Mexican','Native Indian','Asian']\nv_ethnicity = [0,1,2,3,4,5]\np_ethnicity = [0.1,0.5,0.1,0.05,0.02,0.23]\n \na_income = ['NA', '0-50000', '50001-100000', '100001-150000', '150001-200000', '200001-250000', '250000-']\nv_income = [0,1,2,3,4,5,6]\np_income = [0.1,0.5,0.1,0.05,0.02,0.13,0.1]\n \na_education = ['NA','elementary','middle','high','associate','bachelor','master','phd']\nv_education = [0,1,2,3,4,5,6,7]\np_education = [0.05,0.05,0.05,0.25,0.075,0.37,0.105,0.05]\n\na_marriage = ['NA','married', 'not married', 'divorced']\nv_marriage = [0,1,2,3]\np_marriage = [0.1,0.5,0.1,0.3]\n \na_children = ['NA', 'one', 'two', 'three', 'four', 'five']\nv_children = [0,1,2,3,4,5]\np_children = [0.1,0.4,0.3,0.15,0.04,0.01]\n \n# ****************************************************\n# code section\n# ****************************************************\n\n#\n# Input: VF file from voter registration database; For example, IA-Federal_District-1-VF.csv\n# Output: Extracted voter data file based on vf_attributes \n#\ndef load_VF_data(data_file_name):\n vf_indices = []\n vf_output = []\n with open(data_file_name) as csv_file:\n data_file = csv.reader(csv_file)\n header_ = next(data_file)\n print(header_)\n for index, vf_a in enumerate(vf_attributes):\n vf_indices.append([i for i, elem in enumerate(header_) if vf_a in elem])\n print(vf_indices)\n\n for i, row_ in enumerate(data_file):\n vf_row = []\n for index, vf_v in enumerate(vf_indices):\n if vf_v[0]==(i for i, elem in enumerate(header_) if 'dob' in elem):\n vf_row.append(row_[vf_v[0]])\n else:\n vf_row.append(row_[vf_v[0]])\n vf_output.append(vf_row)\n return vf_output\n \n#\n# Input: birthday string in MM/DD/YYYY format in according to voter file\n# Output: Age based on current time\n#\ndef calc_age(bDay):\n data = bDay.split('/')\n born = date(int(data[2]), int(data[0]), int(data[1]))\n today = date.today()\n if ((today.month, today.day) < (born.month, born.day)):\n extra_year = 1\n else:\n extra_year = 0\n age = today.year - born.year - extra_year\n return age\n \n#\n# This is an utility function to verify p_* array\n# Input: array contain numerical value\n# Output: N/A\n# Print the total value in percentage from input array\n#\ndef check_percentage(p_arrary):\n _total = 0\n for v in range(len(p_arrary)):\n _total = _total + p_arrary[v]\n if (_total == 1):\n print (\"100% met\")\n else:\n _delta = 1 - _total\n print (\"Delta: %f\", _delta) \n\n# generating the ethnicity, NA|White|Black|Mexican|Indian|Asian to 0|1|2|3|4|5\ndef generateEthnicity(p_size):\n return np.random.choice(v_ethnicity, p_size, p=p_ethnicity)\n\n# generating the income, NA|0-50000|50001-100000|100001-150000|150001-200000|200001-250000|250000- to 0|1|2|3|4|5|6\ndef generateIncome(p_size):\n return np.random.choice(v_income, p_size, p=p_income)\n\n# generating the education, NA|elementary|middle|high|associate|bachelor|master|phd to 0|1|2|3|4|5|6|7\ndef generateEducation(p_size):\n return np.random.choice(v_education, p_size, p=p_education)\n\n# generating the marriage, NA|notmarried|married|divoced to 0|1|2|3\ndef generateMarriage(p_size):\n return np.random.choice(v_marriage, p_size, p=p_marriage)\n\n# generating the children, NA|one|two|three|four|five to 0|1|2|3|4|5\ndef generateChildren(p_size):\n return np.random.choice(v_children, p_size, p=p_children)\n\n# convert function on |M|F to 0|1|2\ndef convertSex(sex):\n iSex = 0\n if sex == 'M':\n iSex = 1\n if sex == 'F':\n iSex = 2\n return iSex\n\n# convert function on |D|R to 0|1|2\ndef convertParty(party):\n iParty = 0\n if party == 'D':\n iParty = 1\n if party == 'R':\n iParty = 2 \n return iParty\n\n#\n# This function generate the synthetic data combining the voter database and synthetic data attributes\n# Input: vf_output which is the return from load_VF_data function\n# Output: synthetic_data in according to synthetic_data_attributes\n#\ndef generateSyntheticData(vf_output):\n synthetic_data = []\n\n vf_output_Ethnicity = generateEthnicity(len(vf_output))\n vf_output_Income = generateIncome(len(vf_output))\n vf_output_Education = generateEducation(len(vf_output))\n vf_output_Marriage = generateMarriage(len(vf_output))\n vf_output_Children = generateChildren(len(vf_output))\n\n for i, row_ in enumerate(vf_output):\n sd_row = []\n for index in range(0, 10):\n if index == 0: # state\n sd_row.append(row_[6])\n if index == 1: # zipcode\n sd_row.append(row_[5][:5])\n if index == 2: # ethnicity\n #sd_row.append(randrange(5))\n sd_row.append(vf_output_Ethnicity[i])\n if index == 3: # income\n #sd_row.append(randrange(6))\n sd_row.append(vf_output_Income[i])\n if index == 4: # age\n sd_row.append(calc_age(row_[1]))\n if index == 5: # sex\n sd_row.append(convertSex(row_[2]))\n if index == 6: # education\n #sd_row.append(randrange(5))\n sd_row.append(vf_output_Education[i])\n if index == 7: # marriage\n #sd_row.append(randrange(3))\n sd_row.append(vf_output_Marriage[i])\n if index == 8: # child\n #sd_row.append(randrange(5))\n sd_row.append(vf_output_Children[i])\n if index == 9: # party\n sd_row.append(convertParty(row_[3]))\n synthetic_data.append(sd_row)\n return synthetic_data\n\n#\n# Input: sample data output file For example, IA_synthetic_full_data_Output.csv\n# Output: The sample data file generated in the file system\n#\ndef output_synthetic_data(data_file_name, synthetic_data):\n with open(data_file_name, mode='w') as csv_file:\n csvWriter = csv.writer(csv_file)\n csvWriter.writerow(synthetic_data_attributes)\n for i, vf_r in enumerate(synthetic_data):\n csvWriter.writerow(vf_r)\n \n\n", "sub_path": "Data/DataGeneration.py", "file_name": "DataGeneration.py", "file_ext": "py", "file_size_in_byte": 7825, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "csv.reader", "line_number": 67, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 90, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 91, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 91, "usage_type": "name"}, {"api_name": "numpy.random.choice", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 125, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 129, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 133, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 204, "usage_type": "call"}]} +{"seq_id": "129540373", "text": "import os\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# Set the working directory to be the current one\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# Load a color image\nimg = cv2.imread('simple.jpg')\n\n# Convert to a gray image\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# Shi-Tomasi Corner Detection\ncorners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)\ncorners = np.int0(corners)\n\n# Display result\nfor i in corners:\n x,y = i.ravel()\n cv2.circle(img, (x,y), 3, 255, -1)\n\nplt.imshow(img)\nplt.title('Good Features to Track')\nplt.xticks([]), plt.yticks([])\nplt.show()", "sub_path": "16_feature_detection_and_extraction/02_good_feature_to_track.py", "file_name": "02_good_feature_to_track.py", "file_ext": "py", "file_size_in_byte": 607, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.chdir", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.goodFeaturesToTrack", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "565284136", "text": "from django.urls import path, re_path\nfrom quiz import views\n\napp_name = 'quiz'\n\n# Sub app level mapping for endpoints\nurlpatterns = [\n path('quizzes/', views.QuizDetailView.as_view(), name='quizzes'), # Used to retrieve the quiz when user clicks Play\n path('my-quizzes/', views.MyQuizListView.as_view(), name='myquizzes'), # Used to load list of completed quizzes when user clicks History\n path('save-answer/', views.SaveUserAnswer.as_view()), # Used to save a submitted answer when user clicks Next\n re_path(r'quizzes/(?P[\\w\\-]+)/$', views.QuizInfoView.as_view()), # Used to get data for a selected quiz from History view\n re_path(r\"quizzes/(?P[\\w\\-]+)/submit/$\", views.SubmitQuizView.as_view()), # Used to submit the quiz when the final question is answered\n]\n", "sub_path": "quiz-api/app/quiz/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "quiz.views.QuizDetailView.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "quiz.views.QuizDetailView", "line_number": 8, "usage_type": "attribute"}, {"api_name": "quiz.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "quiz.views.MyQuizListView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "quiz.views.MyQuizListView", "line_number": 9, "usage_type": "attribute"}, {"api_name": "quiz.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "quiz.views.SaveUserAnswer.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "quiz.views.SaveUserAnswer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "quiz.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 11, "usage_type": "call"}, {"api_name": "quiz.views.QuizInfoView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "quiz.views.QuizInfoView", "line_number": 11, "usage_type": "attribute"}, {"api_name": "quiz.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.re_path", "line_number": 12, "usage_type": "call"}, {"api_name": "quiz.views.SubmitQuizView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "quiz.views.SubmitQuizView", "line_number": 12, "usage_type": "attribute"}, {"api_name": "quiz.views", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "619986175", "text": "import datetime\nimport os\nimport shutil\n\nimport backtrader as bt\nimport pandas as pd\n\nfrom pathlib import Path\nfrom django.core.management.base import BaseCommand\n\nSTORAGE_ROOT = './storage/'\n\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def add_arguments(self, parser):\n # parser.add_argument('spider', nargs='+', type=int)\n parser.add_argument('symbols', default='')\n\n def handle(self, *args, **options):\n symbols = options['symbols'].split(',')\n\n fromdate = datetime.datetime(2017, 1, 1)\n todate = datetime.datetime(2019, 5, 24)\n\n os.mkdir(Path('{}{}'.format(STORAGE_ROOT, 'SET/StockPrice/')).resolve())\n\n for symbol in symbols:\n print(f'>> {symbol}')\n\n yahoo_symbol = f'{symbol}.BK'\n\n data = bt.feeds.YahooFinanceData(dataname=yahoo_symbol, fromdate=fromdate, todate=todate)\n data.start()\n\n filename = f'{yahoo_symbol}.csv'\n csv_file_abspath = Path('{}{}{}'.format(STORAGE_ROOT, 'SET/', filename)).resolve()\n with open(csv_file_abspath, 'w') as fd:\n data.f.seek(0)\n shutil.copyfileobj(data.f, fd)\n\n symbol_df = pd.read_csv(csv_file_abspath).set_index(keys=['Date'])\n\n\n\n", "sub_path": "app/management/commands/run_spider.py", "file_name": "run_spider.py", "file_ext": "py", "file_size_in_byte": 1282, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 27, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 27, "usage_type": "call"}, {"api_name": "backtrader.feeds.YahooFinanceData", "line_number": 34, "usage_type": "call"}, {"api_name": "backtrader.feeds", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 38, "usage_type": "call"}, {"api_name": "shutil.copyfileobj", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "278841229", "text": "from net.yolo import YoloV2, YoloV3\nimport ast\n\n\ndef _update_configs(configs, configs_path):\n for k, v in configs.items():\n if k.endswith(\"_dir\") or k.endswith(\"_path\"):\n if not os.path.isabs(v):\n configs[k] = os.path.join(os.path.dirname(os.path.abspath(configs_path)), v)\n if k == \"anchors\" or k == \"class_names\":\n configs[k] = ast.literal_eval(v)\n return configs\n\n\ndef _main(cfg, mode):\n version = cfg[\"COMMON\"][\"version\"]\n if version == \"v2\":\n yolo = YoloV2()\n elif version == \"v3\":\n yolo = YoloV3()\n else:\n raise ValueError(\"Unsupported version: {}\".format(version))\n\n if mode == \"train\":\n params = {**cfg[\"TRAIN\"], **cfg[\"COMMON\"]}\n yolo.train(params)\n elif mode == \"test\":\n params = {**cfg[\"TEST\"], **cfg[\"COMMON\"]}\n yolo.test(params)\n elif mode == \"anchor\":\n params = {**cfg[\"ANCHOR\"], **cfg[\"COMMON\"]}\n anchors, class_names = yolo.generate_anchors(params)\n print(\"Anchors: \")\n print(\"\\t{}\".format(anchors))\n print(\"Class names: \")\n print(\"\\t{}\".format(class_names))\n else:\n raise ValueError(\"Unsupported mode: {}\".format(mode))\n\n\nif __name__ == \"__main__\":\n import argparse\n import configparser\n import os\n\n args = argparse.ArgumentParser()\n args.add_argument(\"--config\", dest=\"config\", help=\"Path to configuration file\",\n default=os.path.join(os.path.dirname(__file__), \"config\", \"yolo_2.ini\"))\n args.add_argument(\"--mode\", dest=\"mode\", help=\"Mode: (train|test|anchor)\",\n default=\"anchor\")\n c = args.parse_args()\n # # DEBUG START\n # c.config = os.path.join(os.path.dirname(__file__), \"config\", \"yolo_2.ini\")\n # c.mode = \"test\"\n # # DEBUG END\n cfg = configparser.ConfigParser()\n cfg.read(c.config)\n cfg = {s: _update_configs(dict(cfg.items(s)), c.config) for s in cfg.sections()}\n\n _main(cfg, c.mode.lower())\n", "sub_path": "launcher.py", "file_name": "launcher.py", "file_ext": "py", "file_size_in_byte": 1984, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "ast.literal_eval", "line_number": 11, "usage_type": "call"}, {"api_name": "net.yolo.YoloV2", "line_number": 18, "usage_type": "call"}, {"api_name": "net.yolo.YoloV3", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 48, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "462293785", "text": "\"\"\"This file is a Authoria spider created on top of the RecursiveList Template.\n\n scrapy crawl authoria -a mining_job_id=9999 -a iteration=1 -a url=\"https://harriscorp.authoria.net/joblist.html\" -a extract=1 -a robots_obey=0\n\nsample urls:\n https://avayacorp.authoria.net/joblist.html\n https://nmhext.authoria.net/joblist.html\n https://harriscorp.authoria.net/joblist.html\n\"\"\"\nfrom copy import deepcopy\nfrom urlparse import urljoin\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, Replace, Postfix, HtmlFormatter\n\n\nclass Authoria(ATSSpider):\n\n name = 'authoria'\n ref_re = r'-view=view-(\\d+)'\n load_functions = False\n load_exp_level = False\n exp_level_xpath = ''\n function_string_xpath = ''\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(\n url=url.replace('.html', '.html?erpc=alljobs')\n )\n\n def parse(self, response):\n sel = Selector(response)\n jobs = sel.xpath('//tr[td/span[contains(@class, \"listrow\")]]')\n for job in jobs:\n job_url = job.xpath('./td/span/a/@href').extract()\n if job_url:\n yield Request(\n urljoin(response.url, job_url[0]),\n callback=self.parse_job_callback()\n )\n\n next_url = sel.xpath(\n '//a[contains(@id, \"Next\") and contains(@id, \"Page\")]/@href'\n ).extract()\n if next_url:\n next_url = urljoin(response.url, next_url[0])\n yield Request(next_url, callback=self.parse)\n\n def parse_job(self, response):\n sel = Selector(response)\n loader = BrightcorpItemLoader(selector=sel)\n loader.add_value('url', response.url)\n loader.add_xpath(\n 'description',\n '//tr[preceding-sibling::tr[td/span/text()=\"Description:\"]][1]/td/span[@class=\"sectionbody\"]',\n HtmlFormatter()\n )\n loader.add_xpath(\n 'jobtype',\n '//td[preceding-sibling::td[contains(span/text(), \"Job Type\")]]/span/text()'\n )\n loader.add_xpath(\n 'educationrequirements',\n '//td[preceding-sibling::td[contains(span/text(), \"Education\")]]/span/text()'\n )\n loader.add_xpath('title', '//span[@class=\"pageheading\"]/text()')\n loader.add_xpath('jobcategory', self.function_string_xpath, Replace('^(-)'))\n\n if self.load_functions:\n self.load_functions_string(loader, sel.xpath(self.function_string_xpath).extract())\n\n if self.load_exp_level:\n self.load_exp_level_string(loader, sel.xpath(self.exp_level_xpath).extract())\n\n locs = sel.xpath(\n '//td[preceding-sibling::td[contains(span/text(), \"Location\")]]/span/text()'\n ).extract()\n\n if locs and len(locs[0].split(',')) > 1:\n locs = locs[0].split(',')\n for loc in locs:\n loc_loader = deepcopy(loader)\n loc_loader.add_value('location', loc)\n loc_loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-%s-' % (self.name, loc.replace(' ', ''))),\n re=self.ref_re\n )\n yield loc_loader.load_item()\n else:\n loader.add_value('location', locs)\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.ref_re\n )\n yield loader.load_item()\n\n def load_functions_string(self, loader, func):\n pass\n\n def load_exp_level_string(self, loader, exp_level):\n pass\n", "sub_path": "brightcorp/brightcorp/spiders/authoria.py", "file_name": "authoria.py", "file_ext": "py", "file_size_in_byte": 3824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "brightcorp.base.atsspiders.ATSSpider", "line_number": 19, "usage_type": "name"}, {"api_name": "scrapy.http.Request", "line_number": 30, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 40, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 41, "usage_type": "call"}, {"api_name": "urlparse.urljoin", "line_number": 49, "usage_type": "call"}, {"api_name": "scrapy.http.Request", "line_number": 50, "usage_type": "call"}, {"api_name": "scrapy.selector.Selector", "line_number": 53, "usage_type": "call"}, {"api_name": "brightcorp.items.BrightcorpItemLoader", "line_number": 54, "usage_type": "call"}, {"api_name": "brightcorp.processors.HtmlFormatter", "line_number": 59, "usage_type": "call"}, {"api_name": "brightcorp.processors.Replace", "line_number": 70, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 85, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 89, "usage_type": "call"}, {"api_name": "brightcorp.processors.Prefix", "line_number": 97, "usage_type": "call"}]} +{"seq_id": "336318116", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom .integrator import SearchApi\n# Create your views here.\n\ndef index(request):\n searchApi = SearchApi()\n output_list = searchApi.make_request()\n result_dict = {}\n results = []\n for result in output_list:\n if result not in result_dict:\n result_dict[result] =1\n results.append(result)\n context = {'results':results}\n template = loader.get_template('Results/index.html')\n #return HttpResponse(template.render(context,request))\n return render(request,'Results/index.html',context)", "sub_path": "neu_hack/WhatsHappening/Results/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "integrator.SearchApi", "line_number": 8, "usage_type": "call"}, {"api_name": "django.template.loader.get_template", "line_number": 17, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "285787666", "text": "import maya.cmds as cmds\r\n\r\n#_______________________________________________________________________________________\r\ndef CenterLocator(*args):\r\n\tsels = cmds.ls(sl=True)\r\n\t\r\n\tfor sel in sels:\r\n\t\tcmds.select(clear=True)\r\n\t\tpos = cmds.xform(sel, q=True, rp=True, ws=True)\r\n\t\tlocator = cmds.spaceLocator(p = pos, n = 'CenterLocator#')\r\n\t\tcmds.xform(centerPivots=True)\r\n#_______________________________________________________________________________________\r\ndef AverageLocator(*args):\r\n\tsels = cmds.ls(sl=True)\t\r\n\ttempCluster = cmds.cluster()\r\n\tpos = cmds.xform(tempCluster, q=True, ws=True,rp=True)\r\n\tlocator = cmds.spaceLocator(p = pos, n = 'AverageLocator#')\r\n\tcmds.xform(centerPivots=True)\r\n\tcmds.delete(tempCluster)\r\n#_______________________________________________________________________________________\r\ndef deleteHistory(*args):\r\n\tcmds.DeleteHistory()\r\n#_______________________________________________________________________________________\r\ndef hideAndLockVisibility(*args):\r\n\tsels = cmds.ls(sl=True)\r\n\tfor sel in sels:\r\n\t\tcmds.setAttr((sel+ \".v\"), lock = True, keyable=False)\r\n#_______________________________________________________________________________________\r\ndef centerPivot(*args):\r\n\t cmds.xform(centerPivots=True)\r\n#_______________________________________________________________________________________\r\ndef freezeTransformation(*args):\r\n\tcmds.FreezeTransformations()\r\n#_______________________________________________________________________________________\r\ndef Renamer(*args):\r\n\tsels = cmds.ls(sl=True)\r\n\tfor sel in sels:\r\n\t\tnewname = cmds.textFieldGrp(textFieldGrpVar, q=1, text=1)\r\n\t\tcmds.rename(sel,(newname +'_#'))\t\t\r\n\t\t\r\n#_______________________________________________________________________________________\r\ndef CreateFkCtrl(*args):\r\n\tcmds.select(hi = True)\r\n\tsels = cmds.ls(sl=True)\r\n\t\r\n\tlast_ctrl = ''\r\n\t\r\n\tfor sel in sels:\r\n\t\tcmds.select(clear=True)\r\n\t\tpos = cmds.xform(sel, q=True, translation=True, ws=True)\r\n\t\trot = cmds.xform(sel, q=True, rotation=True, ws=True)\r\n\t\tctrl = cmds.circle(n=(sel + '_Ctrl'), radius=1,nr=(1,0,0), ch=False) \r\n\t\tcmds.setAttr(\".v\",lock=True, keyable=False)\r\n\t\tgrp_ctrl = cmds.group(n=(sel + '_Ctrl_Grp'))\r\n\t\t\r\n\t\tcmds.xform(grp_ctrl, translation = pos, ws=True)\r\n\t\tcmds.xform(grp_ctrl, rotation = rot, ws=True)\r\n\t\tif last_ctrl != '':\r\n\t\t\tcmds.parent(grp_ctrl, last_ctrl)\r\n\t\tcmds.parentConstraint(ctrl, sel)\r\n\t\tcmds.scaleConstraint(ctrl, sel)\r\n\t\tlast_ctrl = ctrl\r\n\t\r\n\tif cmds.objExists('Skeleton'):\r\n\t\tcmds.parent(sels[0], 'Skeleton')\r\n\telse:\r\n\t\tcmds.group(sels[0], n='Skeleton')\r\n\t\t\r\n\tif cmds.objExists('Controls'):\r\n\t\tcmds.parent((sels[0] + '_Ctrl_Grp'), 'Controls')\r\n\telse:\r\n\t\tcmds.group((sels[0] + '_Ctrl_Grp'), n='Controls')\r\n\r\n#________________________________________________________________________________________\r\ndef CreateBrokenFkCtrl(*args):\r\n\tcmds.select(hi = True)\r\n\tsels = cmds.ls(sl=True)\r\n\tlast_ctrl = ''\r\n\t\r\n\t\r\n\tfor sel in sels:\r\n\t\tcmds.select(clear=True)\r\n\t\tpos = cmds.xform(sel, q=True, translation=True, ws=True)\r\n\t\trot = cmds.xform(sel, q=True, rotation=True, ws=True)\r\n\t\tctrl = cmds.circle(n=(sel + '_Ctrl'), radius=1,nr=(1,0,0), ch=False)\r\n\t\tcmds.setAttr(\".v\",lock=True, keyable=False)\r\n\t\tcmds.addAttr(at = \"float\",ln = 'Follow_Translate', keyable=True, min = 0, max = 1, dv = 1) \r\n\t\tcmds.addAttr(at = \"float\",ln = 'Follow_Rotate', keyable=True, min = 0, max = 1, dv = 1) \r\n\t\t\r\n\t\tgrp_ctrl = cmds.group(n=(sel + '_Ctrl_Grp'))\r\n\t\t\r\n\t\tcmds.xform(grp_ctrl, translation = pos, ws=True)\r\n\t\tcmds.xform(grp_ctrl, rotation = rot, ws=True)\r\n\t\r\n\t\tif last_ctrl == '':\r\n\t\t\tfk_main_ctrl_grp = cmds.group(grp_ctrl,n=(sel + '_FK_Ctrl_Grp'))\t\t\r\n\t\t\tmaster_locator = cmds.spaceLocator(n=(sel + '_FK_Master_Ctrl'), position = pos);\r\n\t\t\tcmds.CenterPivot(master_locator)\r\n\t\t\tcmds.parentConstraint(master_locator, grp_ctrl, mo=True ,sr=[\"x\", \"y\", \"z\"], n='Master_PC_Translate')\r\n\t\t\tcmds.parentConstraint(master_locator, grp_ctrl, mo=True ,st=[\"x\", \"y\", \"z\"], n='Master_PC_Rotate')\r\n\t\t\t\r\n\t\t\tcmds.connectAttr('%s.%s' % (sel + '_Ctrl','Follow_Translate'),'%s.%sW0' % ('Master_PC_Translate',sel + '_FK_Master_Ctrl'))\r\n\t\t\tcmds.connectAttr('%s.%s' % (sel + '_Ctrl','Follow_Rotate'),'%s.%sW0' % ('Master_PC_Rotate',sel + '_FK_Master_Ctrl'))\r\n\t\t\t\r\n\t\r\n\t\tif last_ctrl != '':\r\n\t\t\tcmds.parent(grp_ctrl, fk_main_ctrl_grp)\r\n\t\t\tcmds.parentConstraint(last_ctrl, grp_ctrl, mo=True,sr=[\"x\", \"y\", \"z\"],n=(sel + '_PC_Translate'))\r\n\t\t\tcmds.parentConstraint(last_ctrl, grp_ctrl, mo=True,st=[\"x\", \"y\", \"z\"],n=(sel + '_PC_Rotate'))\r\n\t\t\t\r\n\t\t\tcmds.connectAttr('%s.%s' % (sel + '_Ctrl','Follow_Translate'),'%s.%sW0' % ((sel + '_PC_Translate'),last_sel + '_Ctrl'))\r\n\t\t\tcmds.connectAttr('%s.%s' % (sel + '_Ctrl','Follow_Rotate'),'%s.%sW0' % ((sel + '_PC_Rotate'),last_sel + '_Ctrl'))\r\n\t\tcmds.parentConstraint(ctrl, sel)\r\n\t\tcmds.scaleConstraint(ctrl, sel)\t\r\n\t\t\r\n\t\tlast_ctrl = ctrl\r\n\t\tlast_sel =sel\r\n\t\r\n\tcmds.scaleConstraint(master_locator,fk_main_ctrl_grp, mo=True)\t\r\n\tmain_grp = cmds.group(fk_main_ctrl_grp, master_locator, n=(sels[0] + '_Main_Ctrl_Grp'))\r\n\t\r\n\t\r\n\tif cmds.objExists('Skeleton'):\r\n\t\tcmds.parent(sels[0], 'Skeleton')\r\n\telse:\r\n\t\tcmds.group(sels[0], n='Skeleton')\r\n\t\t\r\n\tif cmds.objExists('Controls'):\r\n\t\tcmds.parent(main_grp, 'Controls')\r\n\telse:\r\n\t\tcmds.group(main_grp, n='Controls')\r\n\r\n\r\n# Make a new window\r\n#\r\nif(cmds.window('Toolbox', exists=True)):\r\n\tcmds.deleteUI('Toolbox')\r\nwindow = cmds.window('Toolbox',title=\"Jerome's Toolbox\",widthHeight=(200, 200))\r\n\r\ncmds.columnLayout( adjustableColumn=True )\r\n\r\ncmds.button( label='Create Locator', command= CenterLocator)\r\ncmds.button( label='Create Average Locator', command= AverageLocator)\r\ncmds.separator()\r\ncmds.frameLayout( label=\"Edit Ctrl\",cll=1, cl=1)\r\ncmds.gridLayout( nc=2, nr=2,cwh=(150, 30))\r\ncmds.button( label ='Delete_History', command= deleteHistory)\r\ncmds.button( label ='Lock And Hide Visibility All Ctrl', command= hideAndLockVisibility)\r\ncmds.button( label ='Center Pivot', command= centerPivot)\r\ncmds.button( label ='Freeze Transformation', command= freezeTransformation)\r\ncmds.setParent( '..' )\r\ncmds.setParent( '..' )\r\ncmds.separator()\r\ncmds.frameLayout( label=\"Rename\",cll=1, cl=1)\r\ncmds.gridLayout( nc=1, nr=2,cwh=(300, 30))\r\ntextFieldGrpVar = cmds.textFieldGrp( label='New Name:', text='Editable',adj=1)\r\ncmds.button( label='Rename', command= Renamer)\r\ncmds.setParent( '..' )\r\ncmds.setParent( '..' )\r\ncmds.separator()\r\ncmds.button( label='Create Normal FK Ctrl', command=CreateFkCtrl)\r\ncmds.button( label='Create Broken FK Ctrl', command=CreateBrokenFkCtrl)\r\ncmds.separator()\r\ncmds.button( label='Close', command=('cmds.deleteUI(\\\"' + window + '\\\", window=True)') )\r\ncmds.setParent( '..' )\r\ncmds.showWindow( window )\r\n\r\n\t\t\r\n\r\n", "sub_path": "Scripts/JeromeToolbox.py", "file_name": "JeromeToolbox.py", "file_ext": "py", "file_size_in_byte": 6652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "maya.cmds.ls", "line_number": 5, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 5, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 8, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 8, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 9, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 9, "usage_type": "name"}, {"api_name": "maya.cmds.spaceLocator", "line_number": 10, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 10, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 11, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 11, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 14, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 14, "usage_type": "name"}, {"api_name": "maya.cmds.cluster", "line_number": 15, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 15, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 16, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 16, "usage_type": "name"}, {"api_name": "maya.cmds.spaceLocator", "line_number": 17, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 17, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 18, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 18, "usage_type": "name"}, {"api_name": "maya.cmds.delete", "line_number": 19, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 19, "usage_type": "name"}, {"api_name": "maya.cmds.DeleteHistory", "line_number": 22, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 22, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 25, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 25, "usage_type": "name"}, {"api_name": "maya.cmds.setAttr", "line_number": 27, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 27, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 30, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 30, "usage_type": "name"}, {"api_name": "maya.cmds.FreezeTransformations", "line_number": 33, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 33, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 36, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 36, "usage_type": "name"}, {"api_name": "maya.cmds.textFieldGrp", "line_number": 38, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 38, "usage_type": "name"}, {"api_name": "maya.cmds.rename", "line_number": 39, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 39, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 43, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 43, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 44, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 44, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 49, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 49, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 50, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 50, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 51, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 51, "usage_type": "name"}, {"api_name": "maya.cmds.circle", "line_number": 52, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 52, "usage_type": "name"}, {"api_name": "maya.cmds.setAttr", "line_number": 53, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 53, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 54, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 54, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 56, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 56, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 57, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 57, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 59, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 59, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 60, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 60, "usage_type": "name"}, {"api_name": "maya.cmds.scaleConstraint", "line_number": 61, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 61, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 64, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 64, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 65, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 65, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 67, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 67, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 69, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 69, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 70, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 70, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 72, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 72, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 76, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 76, "usage_type": "name"}, {"api_name": "maya.cmds.ls", "line_number": 77, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 77, "usage_type": "name"}, {"api_name": "maya.cmds.select", "line_number": 82, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 82, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 83, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 83, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 84, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 84, "usage_type": "name"}, {"api_name": "maya.cmds.circle", "line_number": 85, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 85, "usage_type": "name"}, {"api_name": "maya.cmds.setAttr", "line_number": 86, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 86, "usage_type": "name"}, {"api_name": "maya.cmds.addAttr", "line_number": 87, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 87, "usage_type": "name"}, {"api_name": "maya.cmds.addAttr", "line_number": 88, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 88, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 90, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 90, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 92, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 92, "usage_type": "name"}, {"api_name": "maya.cmds.xform", "line_number": 93, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 93, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 96, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 96, "usage_type": "name"}, {"api_name": "maya.cmds.spaceLocator", "line_number": 97, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 97, "usage_type": "name"}, {"api_name": "maya.cmds.CenterPivot", "line_number": 98, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 98, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 99, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 99, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 100, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 100, "usage_type": "name"}, {"api_name": "maya.cmds.connectAttr", "line_number": 102, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 102, "usage_type": "name"}, {"api_name": "maya.cmds.connectAttr", "line_number": 103, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 103, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 107, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 107, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 108, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 108, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 109, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 109, "usage_type": "name"}, {"api_name": "maya.cmds.connectAttr", "line_number": 111, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 111, "usage_type": "name"}, {"api_name": "maya.cmds.connectAttr", "line_number": 112, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 112, "usage_type": "name"}, {"api_name": "maya.cmds.parentConstraint", "line_number": 113, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 113, "usage_type": "name"}, {"api_name": "maya.cmds.scaleConstraint", "line_number": 114, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 114, "usage_type": "name"}, {"api_name": "maya.cmds.scaleConstraint", "line_number": 119, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 119, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 120, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 120, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 123, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 123, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 124, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 124, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 126, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 126, "usage_type": "name"}, {"api_name": "maya.cmds.objExists", "line_number": 128, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 128, "usage_type": "name"}, {"api_name": "maya.cmds.parent", "line_number": 129, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 129, "usage_type": "name"}, {"api_name": "maya.cmds.group", "line_number": 131, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 131, "usage_type": "name"}, {"api_name": "maya.cmds.window", "line_number": 136, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 136, "usage_type": "name"}, {"api_name": "maya.cmds.deleteUI", "line_number": 137, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 137, "usage_type": "name"}, {"api_name": "maya.cmds.window", "line_number": 138, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 138, "usage_type": "name"}, {"api_name": "maya.cmds.columnLayout", "line_number": 140, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 140, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 142, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 142, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 143, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 143, "usage_type": "name"}, {"api_name": "maya.cmds.separator", "line_number": 144, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 144, "usage_type": "name"}, {"api_name": "maya.cmds.frameLayout", "line_number": 145, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 145, "usage_type": "name"}, {"api_name": "maya.cmds.gridLayout", "line_number": 146, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 146, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 147, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 147, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 148, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 148, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 149, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 149, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 150, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 150, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 151, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 151, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 152, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 152, "usage_type": "name"}, {"api_name": "maya.cmds.separator", "line_number": 153, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 153, "usage_type": "name"}, {"api_name": "maya.cmds.frameLayout", "line_number": 154, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 154, "usage_type": "name"}, {"api_name": "maya.cmds.gridLayout", "line_number": 155, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 155, "usage_type": "name"}, {"api_name": "maya.cmds.textFieldGrp", "line_number": 156, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 156, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 157, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 157, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 158, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 158, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 159, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 159, "usage_type": "name"}, {"api_name": "maya.cmds.separator", "line_number": 160, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 160, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 161, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 161, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 162, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 162, "usage_type": "name"}, {"api_name": "maya.cmds.separator", "line_number": 163, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 163, "usage_type": "name"}, {"api_name": "maya.cmds.button", "line_number": 164, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 164, "usage_type": "name"}, {"api_name": "maya.cmds.setParent", "line_number": 165, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 165, "usage_type": "name"}, {"api_name": "maya.cmds.showWindow", "line_number": 166, "usage_type": "call"}, {"api_name": "maya.cmds", "line_number": 166, "usage_type": "name"}]} +{"seq_id": "368511172", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Pradeep Jairamani , github.com/pradeepjairamani\n\nimport socket\nimport socks\nimport time\nimport json\nimport threading\nimport string\nimport random\nimport sys\nimport struct\nimport re\nimport os\nfrom OpenSSL import crypto\nimport ssl\nfrom core.alert import *\nfrom core.targets import target_type\nfrom core.targets import target_to_host\nfrom core.load_modules import load_file_path\nfrom lib.socks_resolver.engine import getaddrinfo\nfrom core._time import now\nfrom core.log import __log_into_file\nimport requests\n\n\ndef extra_requirements_dict():\n return {\n \"xmlrpc_pingback_vuln_ports\": [80, 443]\n }\n\n\ndef conn(targ, port, timeout_sec, socks_proxy):\n try:\n if socks_proxy is not None:\n socks_version = socks.SOCKS5 if socks_proxy.startswith(\n 'socks5://') else socks.SOCKS4\n socks_proxy = socks_proxy.rsplit('://')[1]\n if '@' in socks_proxy:\n socks_username = socks_proxy.rsplit(':')[0]\n socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),\n int(socks_proxy.rsplit(':')[-1]), username=socks_username,\n password=socks_password)\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo\n else:\n socks.set_default_proxy(socks_version, str(socks_proxy.rsplit(':')[0]),\n int(socks_proxy.rsplit(':')[1]))\n socket.socket = socks.socksocket\n socket.getaddrinfo = getaddrinfo()\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sys.stdout.flush()\n s.settimeout(timeout_sec)\n s.connect((targ, port))\n return s\n except Exception as e:\n return None\n\n\ndef xmlrpc_pingback(target, port, timeout_sec, log_in_file, language, time_sleep,\n thread_tmp_filename, socks_proxy, scan_id, scan_cmd):\n try:\n s = conn(target, port, timeout_sec, socks_proxy)\n if not s:\n return False\n else:\n headers = {}\n if target_type(target) != \"HTTP\" and port == 443:\n target = 'https://' + target\n if target_type(target) != \"HTTP\" and port == 80:\n target = 'http://' + target\n headers['Content-Type'] = 'text/xml'\n postdata = '''pingback.ping\n http://Cannotbehere:22/\n ''' + target + '''\n '''\n\n req = requests.post(target+'/xmlrpc.php',\n data=postdata, headers=headers)\n if re.search('16', req.text):\n return True\n else:\n return False\n except Exception as e:\n return False\n\n\ndef __xmlrpc_pingback(target, port, timeout_sec, log_in_file, language, time_sleep,\n thread_tmp_filename, socks_proxy, scan_id, scan_cmd):\n if xmlrpc_pingback(target, port, timeout_sec, log_in_file, language, time_sleep,\n thread_tmp_filename, socks_proxy, scan_id, scan_cmd):\n info(messages(language, \"target_vulnerable\").format(target, port,\n 'Wordpress XMLRPC pingback Vulnerability'))\n __log_into_file(thread_tmp_filename, 'w', '0', language)\n data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': port, 'TYPE': 'Wordpress_xmlrpc_pingback_vuln',\n 'DESCRIPTION': messages(language, \"vulnerable\").format('Wordpress XMLRPC pingback Vulnerability'), 'TIME': now(),\n 'CATEGORY': \"vuln\",\n 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})\n __log_into_file(log_in_file, 'a', data, language)\n return True\n else:\n return False\n\n\ndef start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,\n verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function\n if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(target) != 'HTTP':\n # requirements check\n new_extra_requirements = extra_requirements_dict()\n if methods_args is not None:\n for extra_requirement in extra_requirements_dict():\n if extra_requirement in methods_args:\n new_extra_requirements[\n extra_requirement] = methods_args[extra_requirement]\n extra_requirements = new_extra_requirements\n if ports is None:\n ports = extra_requirements[\"xmlrpc_pingback_vuln_ports\"]\n if target_type(target) == 'HTTP':\n target = target_to_host(target)\n threads = []\n total_req = len(ports)\n thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(\n random.choice(string.ascii_letters + string.digits) for _ in range(20))\n __log_into_file(thread_tmp_filename, 'w', '1', language)\n trying = 0\n keyboard_interrupt_flag = False\n for port in ports:\n port = int(port)\n t = threading.Thread(target=__xmlrpc_pingback,\n args=(target, int(port), timeout_sec, log_in_file, language, time_sleep,\n thread_tmp_filename, socks_proxy, scan_id, scan_cmd))\n threads.append(t)\n t.start()\n trying += 1\n if verbose_level > 3:\n info(\n messages(language, \"trying_message\").format(trying, total_req, num, total, target, port, 'xmlrpc_pingback_vuln'))\n while 1:\n try:\n if threading.activeCount() >= thread_number:\n time.sleep(0.01)\n else:\n break\n except KeyboardInterrupt:\n keyboard_interrupt_flag = True\n break\n if keyboard_interrupt_flag:\n break\n # wait for threads\n kill_switch = 0\n kill_time = int(\n timeout_sec / 0.1) if int(timeout_sec / 0.1) != 0 else 1\n while 1:\n time.sleep(0.1)\n kill_switch += 1\n try:\n if threading.activeCount() == 1 or kill_switch == kill_time:\n break\n except KeyboardInterrupt:\n break\n thread_write = int(open(thread_tmp_filename).read().rsplit()[0])\n if thread_write == 1 and verbose_level != 0:\n info(messages(language, \"no_vulnerability_found\").format(\n 'xmlrpc_pingback'))\n data = json.dumps({'HOST': target, 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'xmlrpc_pingback_vuln',\n 'DESCRIPTION': messages(language, \"no_vulnerability_found\").format('xmlrpc_pingback'), 'TIME': now(),\n 'CATEGORY': \"scan\", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})\n __log_into_file(log_in_file, 'a', data, language)\n os.remove(thread_tmp_filename)\n\n else:\n warn(messages(language, \"input_target_error\").format(\n 'xmlrpc_pingback_vuln', target))\n", "sub_path": "lib/vuln/wp_xmlrpc_pingback/engine.py", "file_name": "engine.py", "file_ext": "py", "file_size_in_byte": 7625, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "socks.SOCKS5", "line_number": 37, "usage_type": "attribute"}, {"api_name": "socks.SOCKS4", "line_number": 38, "usage_type": "attribute"}, {"api_name": "socks.set_default_proxy", "line_number": 43, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 46, "usage_type": "attribute"}, {"api_name": "socks.socksocket", "line_number": 46, "usage_type": "attribute"}, {"api_name": "socket.getaddrinfo", "line_number": 47, "usage_type": "attribute"}, {"api_name": "lib.socks_resolver.engine.getaddrinfo", "line_number": 47, "usage_type": "name"}, {"api_name": "socks.set_default_proxy", "line_number": 49, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 51, "usage_type": "attribute"}, {"api_name": "socks.socksocket", "line_number": 51, "usage_type": "attribute"}, {"api_name": "socket.getaddrinfo", "line_number": 52, "usage_type": "attribute"}, {"api_name": "lib.socks_resolver.engine.getaddrinfo", "line_number": 52, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 53, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 53, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 53, "usage_type": "attribute"}, {"api_name": "sys.stdout.flush", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 54, "usage_type": "attribute"}, {"api_name": "core.targets.target_type", "line_number": 70, "usage_type": "call"}, {"api_name": "core.targets.target_type", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 80, "usage_type": "call"}, {"api_name": "re.search", "line_number": 82, "usage_type": "call"}, {"api_name": "core.log.__log_into_file", "line_number": 96, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 97, "usage_type": "call"}, {"api_name": "core._time.now", "line_number": 98, "usage_type": "call"}, {"api_name": "core.log.__log_into_file", "line_number": 101, "usage_type": "call"}, {"api_name": "core.targets.target_type", "line_number": 109, "usage_type": "call"}, {"api_name": "core.targets.target_type", "line_number": 120, "usage_type": "call"}, {"api_name": "core.targets.target_to_host", "line_number": 121, "usage_type": "call"}, {"api_name": "core.load_modules.load_file_path", "line_number": 124, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 125, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 125, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 125, "usage_type": "attribute"}, {"api_name": "core.log.__log_into_file", "line_number": 126, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 131, "usage_type": "call"}, {"api_name": "threading.activeCount", "line_number": 142, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 156, "usage_type": "call"}, {"api_name": "threading.activeCount", "line_number": 159, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 167, "usage_type": "call"}, {"api_name": "core._time.now", "line_number": 168, "usage_type": "call"}, {"api_name": "core.log.__log_into_file", "line_number": 170, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 171, "usage_type": "call"}]} +{"seq_id": "437529329", "text": "# sqlite.py\n# Copyright (C) 2005, 2006, 2007, 2008, 2009 Michael Bayer mike_mp@zzzcomputing.com\n#\n# This module is part of SQLAlchemy and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\"\"\"Support for the SQLite database.\n\nFor information on connecting using a specific driver, see the documentation\nsection regarding that driver.\n\nDate and Time Types\n-------------------\n\nSQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide \nout of the box functionality for translating values between Python `datetime` objects\nand a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`\nand related types provide date formatting and parsing functionality when SQlite is used.\nThe implementation classes are :class:`_SLDateTime`, :class:`_SLDate` and :class:`_SLTime`.\nThese types represent dates and times as ISO formatted strings, which also nicely\nsupport ordering. There's no reliance on typical \"libc\" internals for these functions\nso historical dates are fully supported.\n\n\n\"\"\"\n\nimport datetime, re, time\n\nfrom sqlalchemy import schema as sa_schema\nfrom sqlalchemy import sql, exc, pool, DefaultClause\nfrom sqlalchemy.engine import default\nfrom sqlalchemy.engine import reflection\nfrom sqlalchemy import types as sqltypes\nfrom sqlalchemy import util\nfrom sqlalchemy.sql import compiler, functions as sql_functions\nfrom sqlalchemy.util import NoneType\n\nfrom sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\\\n FLOAT, INTEGER, NUMERIC, SMALLINT, TEXT, TIME,\\\n TIMESTAMP, VARCHAR\n \n\nclass _NumericMixin(object):\n def bind_processor(self, dialect):\n type_ = self.asdecimal and str or float\n def process(value):\n if value is not None:\n return type_(value)\n else:\n return value\n return process\n\nclass _SLNumeric(_NumericMixin, sqltypes.Numeric):\n pass\n\nclass _SLFloat(_NumericMixin, sqltypes.Float):\n pass\n\n# since SQLite has no date types, we're assuming that SQLite via ODBC\n# or JDBC would similarly have no built in date support, so the \"string\" based logic\n# would apply to all implementing dialects.\nclass _DateTimeMixin(object):\n def _bind_processor(self, format, elements):\n def process(value):\n if not isinstance(value, (NoneType, datetime.date, datetime.datetime, datetime.time)):\n raise TypeError(\"SQLite Date, Time, and DateTime types only accept Python datetime objects as input.\")\n elif value is not None:\n return format % tuple([getattr(value, attr, 0) for attr in elements])\n else:\n return None\n return process\n\n def _result_processor(self, fn, regexp):\n def process(value):\n if value is not None:\n return fn(*[int(x or 0) for x in regexp.match(value).groups()])\n else:\n return None\n return process\n\nclass _SLDateTime(_DateTimeMixin, sqltypes.DateTime):\n __legacy_microseconds__ = False\n\n def bind_processor(self, dialect):\n if self.__legacy_microseconds__:\n return self._bind_processor(\n \"%4.4d-%2.2d-%2.2d %2.2d:%2.2d:%2.2d.%s\", \n (\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\")\n )\n else:\n return self._bind_processor(\n \"%4.4d-%2.2d-%2.2d %2.2d:%2.2d:%2.2d.%06d\", \n (\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\")\n )\n\n _reg = re.compile(r\"(\\d+)-(\\d+)-(\\d+)(?: (\\d+):(\\d+):(\\d+)(?:\\.(\\d+))?)?\")\n def result_processor(self, dialect):\n return self._result_processor(datetime.datetime, self._reg)\n\nclass _SLDate(_DateTimeMixin, sqltypes.Date):\n def bind_processor(self, dialect):\n return self._bind_processor(\n \"%4.4d-%2.2d-%2.2d\", \n (\"year\", \"month\", \"day\")\n )\n\n _reg = re.compile(r\"(\\d+)-(\\d+)-(\\d+)\")\n def result_processor(self, dialect):\n return self._result_processor(datetime.date, self._reg)\n\nclass _SLTime(_DateTimeMixin, sqltypes.Time):\n __legacy_microseconds__ = False\n\n def bind_processor(self, dialect):\n if self.__legacy_microseconds__:\n return self._bind_processor(\n \"%2.2d:%2.2d:%2.2d.%s\", \n (\"hour\", \"minute\", \"second\", \"microsecond\")\n )\n else:\n return self._bind_processor(\n \"%2.2d:%2.2d:%2.2d.%06d\", \n (\"hour\", \"minute\", \"second\", \"microsecond\")\n )\n\n _reg = re.compile(r\"(\\d+):(\\d+):(\\d+)(?:\\.(\\d+))?\")\n def result_processor(self, dialect):\n return self._result_processor(datetime.time, self._reg)\n\n\nclass _SLBoolean(sqltypes.Boolean):\n def bind_processor(self, dialect):\n def process(value):\n if value is None:\n return None\n return value and 1 or 0\n return process\n\n def result_processor(self, dialect):\n def process(value):\n if value is None:\n return None\n return value == 1\n return process\n\ncolspecs = {\n sqltypes.Boolean: _SLBoolean,\n sqltypes.Date: _SLDate,\n sqltypes.DateTime: _SLDateTime,\n sqltypes.Float: _SLFloat,\n sqltypes.Numeric: _SLNumeric,\n sqltypes.Time: _SLTime,\n}\n\nischema_names = {\n 'BLOB': sqltypes.BLOB,\n 'BOOL': sqltypes.BOOLEAN,\n 'BOOLEAN': sqltypes.BOOLEAN,\n 'CHAR': sqltypes.CHAR,\n 'DATE': sqltypes.DATE,\n 'DATETIME': sqltypes.DATETIME,\n 'DECIMAL': sqltypes.DECIMAL,\n 'FLOAT': sqltypes.FLOAT,\n 'INT': sqltypes.INTEGER,\n 'INTEGER': sqltypes.INTEGER,\n 'NUMERIC': sqltypes.NUMERIC,\n 'REAL': sqltypes.Numeric,\n 'SMALLINT': sqltypes.SMALLINT,\n 'TEXT': sqltypes.TEXT,\n 'TIME': sqltypes.TIME,\n 'TIMESTAMP': sqltypes.TIMESTAMP,\n 'VARCHAR': sqltypes.VARCHAR,\n}\n\n\n\nclass SQLiteCompiler(compiler.SQLCompiler):\n extract_map = compiler.SQLCompiler.extract_map.copy()\n extract_map.update({\n 'month': '%m',\n 'day': '%d',\n 'year': '%Y',\n 'second': '%S',\n 'hour': '%H',\n 'doy': '%j',\n 'minute': '%M',\n 'epoch': '%s',\n 'dow': '%w',\n 'week': '%W'\n })\n\n def visit_now_func(self, fn, **kw):\n return \"CURRENT_TIMESTAMP\"\n \n def visit_char_length_func(self, fn, **kw):\n return \"length%s\" % self.function_argspec(fn)\n \n def visit_cast(self, cast, **kwargs):\n if self.dialect.supports_cast:\n return super(SQLiteCompiler, self).visit_cast(cast)\n else:\n return self.process(cast.clause)\n\n def visit_extract(self, extract):\n try:\n return \"CAST(STRFTIME('%s', %s) AS INTEGER)\" % (\n self.extract_map[extract.field], self.process(extract.expr))\n except KeyError:\n raise exc.ArgumentError(\n \"%s is not a valid extract argument.\" % extract.field)\n\n def limit_clause(self, select):\n text = \"\"\n if select._limit is not None:\n text += \" \\n LIMIT \" + str(select._limit)\n if select._offset is not None:\n if select._limit is None:\n text += \" \\n LIMIT -1\"\n text += \" OFFSET \" + str(select._offset)\n else:\n text += \" OFFSET 0\"\n return text\n\n def for_update_clause(self, select):\n # sqlite has no \"FOR UPDATE\" AFAICT\n return ''\n\n\nclass SQLiteDDLCompiler(compiler.DDLCompiler):\n\n def get_column_specification(self, column, **kwargs):\n colspec = self.preparer.format_column(column) + \" \" + self.dialect.type_compiler.process(column.type)\n default = self.get_column_default_string(column)\n if default is not None:\n colspec += \" DEFAULT \" + default\n\n if not column.nullable:\n colspec += \" NOT NULL\"\n return colspec\n \nclass SQLiteTypeCompiler(compiler.GenericTypeCompiler):\n def visit_binary(self, type_):\n return self.visit_BLOB(type_)\n \nclass SQLiteIdentifierPreparer(compiler.IdentifierPreparer):\n reserved_words = set([\n 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc',\n 'attach', 'autoincrement', 'before', 'begin', 'between', 'by',\n 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit',\n 'conflict', 'constraint', 'create', 'cross', 'current_date',\n 'current_time', 'current_timestamp', 'database', 'default',\n 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct',\n 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive',\n 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob',\n 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index',\n 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is',\n 'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural',\n 'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer',\n 'plan', 'pragma', 'primary', 'query', 'raise', 'references',\n 'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback',\n 'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to',\n 'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using',\n 'vacuum', 'values', 'view', 'virtual', 'when', 'where',\n ])\n\nclass SQLiteDialect(default.DefaultDialect):\n name = 'sqlite'\n supports_alter = False\n supports_unicode_statements = True\n supports_unicode_binds = True\n supports_default_values = True\n supports_empty_insert = False\n supports_cast = True\n\n default_paramstyle = 'qmark'\n statement_compiler = SQLiteCompiler\n ddl_compiler = SQLiteDDLCompiler\n type_compiler = SQLiteTypeCompiler\n preparer = SQLiteIdentifierPreparer\n ischema_names = ischema_names\n colspecs = colspecs\n isolation_level = None\n\n def __init__(self, isolation_level=None, **kwargs):\n default.DefaultDialect.__init__(self, **kwargs)\n if isolation_level and isolation_level not in ('SERIALIZABLE',\n 'READ UNCOMMITTED'):\n raise exc.ArgumentError(\"Invalid value for isolation_level. \"\n \"Valid isolation levels for sqlite are 'SERIALIZABLE' and \"\n \"'READ UNCOMMITTED'.\")\n self.isolation_level = isolation_level\n\n def visit_pool(self, pool):\n if self.isolation_level is not None:\n class SetIsolationLevel(object):\n def __init__(self, isolation_level):\n if isolation_level == 'READ UNCOMMITTED':\n self.isolation_level = 1\n else:\n self.isolation_level = 0\n\n def connect(self, conn, rec):\n cursor = conn.cursor()\n cursor.execute(\"PRAGMA read_uncommitted = %d\" % self.isolation_level)\n cursor.close()\n pool.add_listener(SetIsolationLevel(self.isolation_level))\n\n def table_names(self, connection, schema):\n if schema is not None:\n qschema = self.identifier_preparer.quote_identifier(schema)\n master = '%s.sqlite_master' % qschema\n s = (\"SELECT name FROM %s \"\n \"WHERE type='table' ORDER BY name\") % (master,)\n rs = connection.execute(s)\n else:\n try:\n s = (\"SELECT name FROM \"\n \" (SELECT * FROM sqlite_master UNION ALL \"\n \" SELECT * FROM sqlite_temp_master) \"\n \"WHERE type='table' ORDER BY name\")\n rs = connection.execute(s)\n except exc.DBAPIError:\n raise\n s = (\"SELECT name FROM sqlite_master \"\n \"WHERE type='table' ORDER BY name\")\n rs = connection.execute(s)\n\n return [row[0] for row in rs]\n\n def has_table(self, connection, table_name, schema=None):\n quote = self.identifier_preparer.quote_identifier\n if schema is not None:\n pragma = \"PRAGMA %s.\" % quote(schema)\n else:\n pragma = \"PRAGMA \"\n qtable = quote(table_name)\n cursor = _pragma_cursor(connection.execute(\"%stable_info(%s)\" % (pragma, qtable)))\n row = cursor.fetchone()\n\n # consume remaining rows, to work around\n # http://www.sqlite.org/cvstrac/tktview?tn=1884\n while cursor.fetchone() is not None:\n pass\n\n return (row is not None)\n\n @reflection.cache\n def get_table_names(self, connection, schema=None, **kw):\n return self.table_names(connection, schema)\n\n @reflection.cache\n def get_view_names(self, connection, schema=None, **kw):\n if schema is not None:\n qschema = self.identifier_preparer.quote_identifier(schema)\n master = '%s.sqlite_master' % qschema\n s = (\"SELECT name FROM %s \"\n \"WHERE type='view' ORDER BY name\") % (master,)\n rs = connection.execute(s)\n else:\n try:\n s = (\"SELECT name FROM \"\n \" (SELECT * FROM sqlite_master UNION ALL \"\n \" SELECT * FROM sqlite_temp_master) \"\n \"WHERE type='view' ORDER BY name\")\n rs = connection.execute(s)\n except exc.DBAPIError:\n raise\n s = (\"SELECT name FROM sqlite_master \"\n \"WHERE type='view' ORDER BY name\")\n rs = connection.execute(s)\n\n return [row[0] for row in rs]\n\n @reflection.cache\n def get_view_definition(self, connection, view_name, schema=None, **kw):\n quote = self.identifier_preparer.quote_identifier\n if schema is not None:\n qschema = self.identifier_preparer.quote_identifier(schema)\n master = '%s.sqlite_master' % qschema\n s = (\"SELECT sql FROM %s WHERE name = '%s'\"\n \"AND type='view'\") % (master, view_name)\n rs = connection.execute(s)\n else:\n try:\n s = (\"SELECT sql FROM \"\n \" (SELECT * FROM sqlite_master UNION ALL \"\n \" SELECT * FROM sqlite_temp_master) \"\n \"WHERE name = '%s' \"\n \"AND type='view'\") % view_name\n rs = connection.execute(s)\n except exc.DBAPIError:\n raise\n s = (\"SELECT sql FROM sqlite_master WHERE name = '%s' \"\n \"AND type='view'\") % view_name\n rs = connection.execute(s)\n\n result = rs.fetchall()\n if result:\n return result[0].sql\n\n @reflection.cache\n def get_columns(self, connection, table_name, schema=None, **kw):\n quote = self.identifier_preparer.quote_identifier\n if schema is not None:\n pragma = \"PRAGMA %s.\" % quote(schema)\n else:\n pragma = \"PRAGMA \"\n qtable = quote(table_name)\n c = _pragma_cursor(connection.execute(\"%stable_info(%s)\" % (pragma, qtable)))\n found_table = False\n columns = []\n while True:\n row = c.fetchone()\n if row is None:\n break\n (name, type_, nullable, default, has_default, primary_key) = (row[1], row[2].upper(), not row[3], row[4], row[4] is not None, row[5])\n name = re.sub(r'^\\\"|\\\"$', '', name)\n if default:\n default = re.sub(r\"^\\'|\\'$\", '', default)\n match = re.match(r'(\\w+)(\\(.*?\\))?', type_)\n if match:\n coltype = match.group(1)\n args = match.group(2)\n else:\n coltype = \"VARCHAR\"\n args = ''\n try:\n coltype = self.ischema_names[coltype]\n except KeyError:\n util.warn(\"Did not recognize type '%s' of column '%s'\" %\n (coltype, name))\n coltype = sqltypes.NullType\n if args is not None:\n args = re.findall(r'(\\d+)', args)\n coltype = coltype(*[int(a) for a in args])\n\n columns.append({\n 'name' : name,\n 'type' : coltype,\n 'nullable' : nullable,\n 'default' : default,\n 'primary_key': primary_key\n })\n return columns\n\n @reflection.cache\n def get_primary_keys(self, connection, table_name, schema=None, **kw):\n cols = self.get_columns(connection, table_name, schema, **kw)\n pkeys = []\n for col in cols:\n if col['primary_key']:\n pkeys.append(col['name'])\n return pkeys\n\n @reflection.cache\n def get_foreign_keys(self, connection, table_name, schema=None, **kw):\n quote = self.identifier_preparer.quote_identifier\n if schema is not None:\n pragma = \"PRAGMA %s.\" % quote(schema)\n else:\n pragma = \"PRAGMA \"\n qtable = quote(table_name)\n c = _pragma_cursor(connection.execute(\"%sforeign_key_list(%s)\" % (pragma, qtable)))\n fkeys = []\n fks = {}\n while True:\n row = c.fetchone()\n if row is None:\n break\n (constraint_name, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])\n rtbl = re.sub(r'^\\\"|\\\"$', '', rtbl)\n lcol = re.sub(r'^\\\"|\\\"$', '', lcol)\n rcol = re.sub(r'^\\\"|\\\"$', '', rcol)\n try:\n fk = fks[constraint_name]\n except KeyError:\n fk = {\n 'name' : constraint_name,\n 'constrained_columns' : [],\n 'referred_schema' : None,\n 'referred_table' : rtbl,\n 'referred_columns' : []\n }\n fkeys.append(fk)\n fks[constraint_name] = fk\n\n # look up the table based on the given table's engine, not 'self',\n # since it could be a ProxyEngine\n if lcol not in fk['constrained_columns']:\n fk['constrained_columns'].append(lcol)\n if rcol not in fk['referred_columns']:\n fk['referred_columns'].append(rcol)\n return fkeys\n\n @reflection.cache\n def get_indexes(self, connection, table_name, schema=None, **kw):\n quote = self.identifier_preparer.quote_identifier\n if schema is not None:\n pragma = \"PRAGMA %s.\" % quote(schema)\n else:\n pragma = \"PRAGMA \"\n include_auto_indexes = kw.pop('include_auto_indexes', False)\n qtable = quote(table_name)\n c = _pragma_cursor(connection.execute(\"%sindex_list(%s)\" % (pragma, qtable)))\n indexes = []\n while True:\n row = c.fetchone()\n if row is None:\n break\n # ignore implicit primary key index.\n # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html\n elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'):\n continue\n\n indexes.append(dict(name=row[1], column_names=[], unique=row[2]))\n # loop thru unique indexes to get the column names.\n for idx in indexes:\n c = connection.execute(\"%sindex_info(%s)\" % (pragma, quote(idx['name'])))\n cols = idx['column_names']\n while True:\n row = c.fetchone()\n if row is None:\n break\n cols.append(row[2])\n return indexes\n\n\ndef _pragma_cursor(cursor):\n \"\"\"work around SQLite issue whereby cursor.description is blank when PRAGMA returns no rows.\"\"\"\n \n if cursor.closed:\n cursor._fetchone_impl = lambda: None\n return cursor\n", "sub_path": "libs/sqlalchemy/dialects/sqlite/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 20055, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlalchemy.types.Numeric", "line_number": 52, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 52, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Float", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 55, "usage_type": "name"}, {"api_name": "sqlalchemy.util.NoneType", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.time", "line_number": 64, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 80, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 80, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 97, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.Date", "line_number": 99, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 99, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.Time", "line_number": 110, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 110, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.time", "line_number": 127, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types.Boolean", "line_number": 130, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Boolean", "line_number": 146, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 146, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Date", "line_number": 147, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 147, "usage_type": "name"}, {"api_name": "sqlalchemy.types.DateTime", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 148, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Float", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 149, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Numeric", "line_number": 150, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 150, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Time", "line_number": 151, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 151, "usage_type": "name"}, {"api_name": "sqlalchemy.types.BLOB", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 155, "usage_type": "name"}, {"api_name": "sqlalchemy.types.BOOLEAN", "line_number": 156, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 156, "usage_type": "name"}, {"api_name": "sqlalchemy.types.BOOLEAN", "line_number": 157, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 157, "usage_type": "name"}, {"api_name": "sqlalchemy.types.CHAR", "line_number": 158, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 158, "usage_type": "name"}, {"api_name": "sqlalchemy.types.DATE", "line_number": 159, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 159, "usage_type": "name"}, {"api_name": "sqlalchemy.types.DATETIME", "line_number": 160, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 160, "usage_type": "name"}, {"api_name": "sqlalchemy.types.DECIMAL", "line_number": 161, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 161, "usage_type": "name"}, {"api_name": "sqlalchemy.types.FLOAT", "line_number": 162, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 162, "usage_type": "name"}, {"api_name": "sqlalchemy.types.INTEGER", "line_number": 163, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 163, "usage_type": "name"}, {"api_name": "sqlalchemy.types.INTEGER", "line_number": 164, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 164, "usage_type": "name"}, {"api_name": "sqlalchemy.types.NUMERIC", "line_number": 165, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 165, "usage_type": "name"}, {"api_name": "sqlalchemy.types.Numeric", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 166, "usage_type": "name"}, {"api_name": "sqlalchemy.types.SMALLINT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 167, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TEXT", "line_number": 168, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 168, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TIME", "line_number": 169, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 169, "usage_type": "name"}, {"api_name": "sqlalchemy.types.TIMESTAMP", "line_number": 170, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 170, "usage_type": "name"}, {"api_name": "sqlalchemy.types.VARCHAR", "line_number": 171, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 171, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.compiler.SQLCompiler", "line_number": 176, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.compiler", "line_number": 176, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.compiler.SQLCompiler.extract_map.copy", "line_number": 177, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.compiler.SQLCompiler", "line_number": 177, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.compiler", "line_number": 177, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.ArgumentError", "line_number": 208, "usage_type": "call"}, {"api_name": "sqlalchemy.exc", "line_number": 208, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.compiler.DDLCompiler", "line_number": 228, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.compiler", "line_number": 228, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default", "line_number": 232, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default", "line_number": 233, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default", "line_number": 234, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.compiler.GenericTypeCompiler", "line_number": 240, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.compiler", "line_number": 240, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.compiler.IdentifierPreparer", "line_number": 244, "usage_type": "attribute"}, {"api_name": "sqlalchemy.sql.compiler", "line_number": 244, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default.DefaultDialect", "line_number": 265, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.default", "line_number": 265, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default.DefaultDialect.__init__", "line_number": 284, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.default.DefaultDialect", "line_number": 284, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.default", "line_number": 284, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.ArgumentError", "line_number": 287, "usage_type": "call"}, {"api_name": "sqlalchemy.exc", "line_number": 287, "usage_type": "name"}, {"api_name": "sqlalchemy.pool.add_listener", "line_number": 305, "usage_type": "call"}, {"api_name": "sqlalchemy.pool", "line_number": 305, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.DBAPIError", "line_number": 321, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 321, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 346, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 346, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.DBAPIError", "line_number": 365, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 365, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 350, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 350, "usage_type": "name"}, {"api_name": "sqlalchemy.exc.DBAPIError", "line_number": 390, "usage_type": "attribute"}, {"api_name": "sqlalchemy.exc", "line_number": 390, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 373, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 373, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default", "line_number": 415, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 416, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.default", "line_number": 417, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.default", "line_number": 418, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 418, "usage_type": "call"}, {"api_name": "re.match", "line_number": 419, "usage_type": "call"}, {"api_name": "sqlalchemy.util.warn", "line_number": 429, "usage_type": "call"}, {"api_name": "sqlalchemy.util", "line_number": 429, "usage_type": "name"}, {"api_name": "sqlalchemy.types.NullType", "line_number": 431, "usage_type": "attribute"}, {"api_name": "sqlalchemy.types", "line_number": 431, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 433, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.default", "line_number": 440, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 400, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 400, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 445, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 445, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 470, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 471, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 472, "usage_type": "call"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 454, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 454, "usage_type": "name"}, {"api_name": "sqlalchemy.engine.reflection.cache", "line_number": 494, "usage_type": "attribute"}, {"api_name": "sqlalchemy.engine.reflection", "line_number": 494, "usage_type": "name"}]} +{"seq_id": "19893653", "text": "from sklearn.metrics import precision_recall_curve\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.metrics import precision_recall_fscore_support\nimport matplotlib.pyplot as plt\nimport pickle\n\npreds = pickle.load(open(\"dlnd_1024_cnn_output.pickle\",\"rb\"))\npredictions = [j for i in preds for j in i[1]]\ngold = [j for i in preds for j in i[2]]\nclass0_scores = [j[0] for i in preds for j in i[0]]\nclass1_scores = [j[1] for i in preds for j in i[0]]\ndel preds\ncf = confusion_matrix(gold,predictions)\nprint(\"Given Non-novel Predicted Non-novel: \"+str(cf[0][0]))\nprint(\"Given Non-novel Predicted Novel: \"+str(cf[0][1]))\nprint(\"Given Novel Predicted Non-novel: \"+str(cf[1][0]))\nprint(\"Given Novel Predicted Novel: \"+str(cf[1][1]))\ninvert_gold = [1-i for i in gold]\nprecision,recall,thresholds = precision_recall_curve(invert_gold,class0_scores)\n#average_precision = average_precision_score(invert_gold, class0_scores)\nplt.xlabel('Recall')\nplt.ylabel('Precision')\nplt.ylim([0.0, 1.05])\nplt.xlim([0.0, 1.0])\n#plt.title('2-class Precision-Recall curve: Average precision={0:0.2f}'.format(average_precision))\nplt.title('Non-novel Precision-Recall curve')\nplt.plot(recall,precision,'b-')\nmng = plt.get_current_fig_manager()\nmng.window.showMaximized()\nplt.show()\n# cf = confusion_matrix(gold,predictions,labels=[0,1])\n# acc= accuracy_score(gold,predictions)\n# p,r,f,_=precision_recall_fscore_support(gold,predictions,labels=[0,1])\n# print(\"\\nConfusion matrix:\\n\"+str(cf))\n# print(\"\\nAccuracy: \"+str(acc))\n# print(\"\\nClass wise precisions: \"+str(p))\n# print(\"Class wise recalls: \"+str(r))\n# print(\"Class wise fscores: \"+str(f))", "sub_path": "dlnd/analyze_cnn_output.py", "file_name": "analyze_cnn_output.py", "file_ext": "py", "file_size_in_byte": 1691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pickle.load", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 14, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_recall_curve", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "490167560", "text": "#!/usr/bin/python3.5\n# -*- coding:utf-8 -*-\n# Author: Wang Haoming\n\nimport pandas as pd\nimport re\nimport multiprocessing\nimport subprocess\nimport time\nimport os, sys\n'''\ndf.iloc[] is Series Objects\n'''\n# each_lineNumber = int(\n# int(os.popen(r\"wc -l /home/wanghm/whm/1FG/FG-fix_ORF-fix_nofusion5-rm_repeat.gff | cut -d' ' -f 1\").read()) / 4)\n# print(each_lineNumber)\n# subprocess.Popen(r\"split -l {0} /home/wanghm/whm/1FG/FG-fix_ORF-fix_nofusion5-rm_repeat.gff.bak -d splitGFF_\".format(\n# each_lineNumber), shell=True)\n\ndef worker(splited_gff_file):\n df = pd.read_table(splited_gff_file, header=None, sep=\"\\t\",\n low_memory=False)\n gene_element = df.iloc[:, 2]\n info = df.iloc[:, 8]\n for index, i in enumerate(gene_element):\n if i == \"mRNA\":\n df.iloc[:, 8][index] = \"ID=transcript:{0};Parent=gene:{1};biotype=protein_coding;transcript_id={2}\".format(\n re.split('=|;', info[index])[1], re.split(';|=', info[index])[3], re.split(';|=', info[index])[1])\n if i == \"CDS\":\n print(info[index])\n df.iloc[:, 8][index] = \"ID=CDS:{0};Parent=transcript:{1}\".format(re.split('=', info[index])[1],\n re.split('=', info[index])[1])\n elif i == \"exon\":\n df.iloc[:, 8][index] = \"Parent=transcript:{0};Name={1};exon_id={2}\".format(re.split('=', info[index])[1],\n re.split('=', info[index])[1],\n re.split('=', info[index])[1])\n df.to_csv('/home/wanghm/whm/1FG/newGffOut/%s.csv'%(splited_gff_file.split(\"/\")[6]), index=False, header=False, sep=\"\\t\")\n print(\"A part of work finished!\")\n\n\nif __name__ == \"__main__\":\n pool = multiprocessing.Pool(processes=4)\n for i in range(4):\n splited_gff_file = os.path.abspath(\"splitGFF_0%d\"%(i))\n pool.apply_async(worker, (splited_gff_file, ))\n pool.close()\n pool.join()\n print(\"finished!\")\n\n", "sub_path": "utils/gff_modification.py", "file_name": "gff_modification.py", "file_ext": "py", "file_size_in_byte": 2110, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_table", "line_number": 21, "usage_type": "call"}, {"api_name": "re.split", "line_number": 28, "usage_type": "call"}, {"api_name": "re.split", "line_number": 31, "usage_type": "call"}, {"api_name": "re.split", "line_number": 32, "usage_type": "call"}, {"api_name": "re.split", "line_number": 34, "usage_type": "call"}, {"api_name": "re.split", "line_number": 35, "usage_type": "call"}, {"api_name": "re.split", "line_number": 36, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "392427579", "text": "from tools.file_input_output import read_from_file\nfrom tools.utils import get_resources_used\nfrom trials import cluster\nfrom trials.cluster import Cluster\n\n\ndef read_in_clusters(input):\n '''\n\n '''\n reads = read_from_file(input_file=input, file_type=\"txt\")\n\n return [e.split(\"\\t\") for e in reads]\n\n\ndef make_clusters(reads):\n no_clusters = get_number_of_clusters(reads)\n clusters = []\n for i in range(no_clusters):\n clusters.append(Cluster())\n\n for read in reads:\n # print(clusters[int(read[1])])\n clusters[int(read[1])-1].add_read(bc_seq=read[0], cluser_size=int(read[2]), umi_no=int(read[3]), gene_name=read[4])\n\n return clusters\n\n\ndef get_barcode_list(clusters):\n barcodes_no = len(clusters)\n barcodes = [None] * barcodes_no\n for cluster_no in range(barcodes_no):\n barcodes[cluster_no] = clusters[cluster_no].cellular_barcode\n\n return barcodes\n\n\ndef get_number_of_clusters(reads):\n cluster_no = 0\n for read in reads:\n cluster_no = max(int(read[1]), cluster_no)\n\n return cluster_no\n\n\ndef get_number_of_genes(gene_list):\n return len(gene_list)\n\n\ndef get_gene_list(clusters):\n gene_list = []\n for cluster in clusters:\n for gene in cluster.gene_counts.items():\n if gene[0] not in gene_list:\n gene_list.append(gene[0])\n return gene_list\n\n\ndef collapse_cluster_umis(clusters):\n for cluster in clusters:\n cluster.collapse_umis()\n return clusters\n\n\ndef calculate_dge_matrix(clusters, gene_list,):\n count_matrix = []\n cluster_no = len(clusters)\n gene_no = get_number_of_genes(gene_list)\n # generate empty count matrix of dimension gene_no x cluster_no\n for row in range(gene_no):\n count_matrix.append([0]*cluster_no)\n # populate count matrix\n for cluster in range(cluster_no):\n for gene in clusters[cluster].gene_counts.items():\n row = gene_list.index(gene[0])\n col = cluster\n count_matrix[row-1][col-1] = gene[1][0]\n\n return count_matrix\n\n\ndef save_dge_matrix(dge_matrix, gene_list, barcode_list, output_directory, output_file_name):\n handler = open(output_directory + \"/\" + output_file_name + \".txt\", \"w\")\n\n for barcode in barcode_list:\n handler.write(\"\\t\")\n handler.write(barcode)\n\n handler.write(\"\\n\")\n for gene in range(len(gene_list)):\n handler.write(gene_list[gene])\n for count in dge_matrix[gene]:\n handler.write(\"\\t\")\n handler.write(str(count))\n handler.write(\"\\n\")\n\n\n@get_resources_used\ndef produce_dge_matrix(reads_path, out_dir, output_file_name):\n reads = read_in_clusters(reads_path)\n clusters = make_clusters(reads)\n clusters_umi_collapsed = collapse_cluster_umis(clusters)\n gene_list = get_gene_list(clusters_umi_collapsed)\n barcode_list = get_barcode_list(clusters_umi_collapsed)\n dge_matrix = calculate_dge_matrix(clusters_umi_collapsed, gene_list)\n save_dge_matrix(dge_matrix, gene_list, barcode_list, out_dir, output_file_name)\n\n\nreads_path = \"/Users/manuel/Desktop/BC_UMI_gen.txt\"\nout_path = \"/Users/manuel/Desktop\"\nout_file_name = \"expresseion\"\nproduce_dge_matrix(reads_path, out_dir=out_path, output_file_name=out_file_name)\n\n\n# cellular_barcode - bc_cluster_no - cluster_size - umi_cluster_no - gene_name_", "sub_path": "old_files/calculate_DGE_matrix.py", "file_name": "calculate_DGE_matrix.py", "file_ext": "py", "file_size_in_byte": 3326, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tools.file_input_output.read_from_file", "line_number": 11, "usage_type": "call"}, {"api_name": "trials.cluster.Cluster", "line_number": 20, "usage_type": "call"}, {"api_name": "trials.cluster", "line_number": 52, "usage_type": "name"}, {"api_name": "trials.cluster.gene_counts.items", "line_number": 53, "usage_type": "call"}, {"api_name": "trials.cluster.gene_counts", "line_number": 53, "usage_type": "attribute"}, {"api_name": "trials.cluster", "line_number": 53, "usage_type": "name"}, {"api_name": "trials.cluster", "line_number": 60, "usage_type": "name"}, {"api_name": "trials.cluster.collapse_umis", "line_number": 61, "usage_type": "call"}, {"api_name": "trials.cluster", "line_number": 61, "usage_type": "name"}, {"api_name": "trials.cluster", "line_number": 73, "usage_type": "name"}, {"api_name": "trials.cluster", "line_number": 74, "usage_type": "name"}, {"api_name": "trials.cluster", "line_number": 76, "usage_type": "name"}, {"api_name": "tools.utils.get_resources_used", "line_number": 98, "usage_type": "name"}]} +{"seq_id": "573025159", "text": "import os, sys\nimport numpy as np\nimport tensorflow as tf\nfrom Config import *\nfrom Model import *\nfrom DataGenerator import *\nimport cv2, json, glob\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nconfig = Config()\ncmap = plt.get_cmap('viridis')\n\nclass NumpyEncoder(json.JSONEncoder):\n\t\"\"\" Special json encoder for numpy types \"\"\"\n\tdef default(self, obj):\n\t\tif isinstance(obj, (np.int_, np.intc, np.intp, np.int8,\n\t\t\tnp.int16, np.int32, np.int64, np.uint8,\n\t\t\tnp.uint16, np.uint32, np.uint64)):\n\t\t\treturn int(obj)\n\t\telif isinstance(obj, (np.float_, np.float16, np.float32, \n\t\t\tnp.float64)):\n\t\t\treturn float(obj)\n\t\telif isinstance(obj,(np.ndarray,)):\n\t\t\treturn obj.tolist()\n\t\treturn json.JSONEncoder.default(self, obj)\n\ndef savePNG(mat1, mat2, filename):\n\tif mat2.shape[0] < mat1.shape[0]:\n\t\tmat2 = cv2.resize(mat2, (0, 0), fx = 8, fy = 8, interpolation = cv2.INTER_NEAREST)\n\tif mat2.max() > 0:\n\t\tmat2 = mat2 / mat2.max()\n\tm1 = Image.fromarray(mat1, mode = 'RGB')\n\tm1.putalpha(255)\n\tm2 = Image.fromarray(np.array(cmap(mat2) * 255.0, np.uint8)).convert(mode = 'RGB')\n\tm2.putalpha(255)\n\tm2 = np.array(m2)\n\tm2[..., 3] = np.array(mat2 * 255.0, np.uint8)\n\tm2 = Image.fromarray(m2)\n\tImage.alpha_composite(m1, m2).save(filename)\n\treturn\n\nif __name__ == '__main__':\n\targv = {k: v for k, v in zip(sys.argv[1::2], sys.argv[2::2])}\n\tcity_name = argv['--city']\n\timg_bias = np.array(config.PATH[city_name]['bias'])\n\tbackbone = argv['--net']\n\tmode = argv['--mode']\n\tvis = argv['--vis'] != '0'\n\tassert(mode in ['val', 'test'])\n\tprint(city_name, backbone, mode, vis)\n\n\t# Define graph\n\tgraph = Model(\n\t\tbackbone = backbone,\n\t\tmax_num_vertices = config.MAX_NUM_VERTICES,\n\t\tlstm_out_channel = config.LSTM_OUT_CHANNEL, \n\t\tv_out_res = config.V_OUT_RES,\n\t)\n\taa = tf.placeholder(tf.float32)\n\tbb = tf.placeholder(tf.float32)\n\tvv = tf.placeholder(tf.float32)\n\tii = tf.placeholder(tf.float32)\n\too = tf.placeholder(tf.float32)\n\tee = tf.placeholder(tf.float32)\n\tll = tf.placeholder(tf.int32)\n\tff = tf.placeholder(tf.float32)\n\tdd = tf.placeholder(tf.int32)\n\n\ttrain_res = graph.train(aa, bb, vv, ii, oo, ee, ll, dd)\n\tpred_mask_res = graph.predict_mask(aa)\n\tpred_path_res = graph.predict_path(ff, ii)\n\n\t# for v in tf.global_variables():\n\t# \tprint(v.name)\n\t# quit()\n\n\toptimizer = tf.train.AdamOptimizer(learning_rate = config.LEARNING_RATE)\n\ttrain = optimizer.minimize(train_res[0] + train_res[1] + train_res[2] + train_res[3])\n\n\tsaver = tf.train.Saver(max_to_keep = 1)\n\tmodel_path = './Model_%s_%s/' % (backbone, city_name)\n\tfiles = glob.glob(model_path + '*.ckpt.meta')\n\tfiles = [(int(file.replace(model_path, '').replace('.ckpt.meta', '')), file) for file in files]\n\tfiles.sort()\n\t_, model_to_load = files[-1]\n\n\ttest_path = './Test_Result_%s_%s' % (backbone, city_name)\n\tif vis:\n\t\tif not os.path.exists(test_path):\n\t\t\tos.popen('mkdir %s' % test_path.replace('./', ''))\n\n\tresult = []\n\ttotal_time = 0\n\ttest_file_path = config.PATH[city_name]['img-%s' % mode]\n\ttest_info = json.load(open(config.PATH[city_name]['ann-%s' % mode]))\n\n\t# Launch graph\n\twith tf.Session() as sess:\n\t\twith open('Eval_%s_%s_%s.out' % (city_name, backbone, mode), 'w') as f:\n\t\t\t# Restore weights\n\t\t\tsaver.restore(sess, model_to_load[:-5])\n\t\t\tfor img_seq, img_info in enumerate(test_info['images']):\n\n\t\t\t\timg_file = test_file_path + '/' + img_info['file_name']\n\t\t\t\timg_id = img_info['id']\n\t\t\t\timg = np.array(Image.open(img_file).resize(config.AREA_SIZE))[..., 0: 3]\n\t\t\t\timg_bias = img.mean(axis = (0, 1))\n\t\t\t\ttime_res = [img_seq, img_id]\n\n\t\t\t\tt = time.time()\n\t\t\t\tfeature, pred_boundary, pred_vertices = sess.run(pred_mask_res, feed_dict = {aa: img - img_bias})\n\n\t\t\t\tif vis:\n\t\t\t\t\tsavePNG(img, np.zeros(config.AREA_SIZE), test_path + '/%d-0.png' % img_id)\n\t\t\t\t\tsavePNG(img, pred_boundary[0, ..., 0] * 255, test_path + '/%d-1.png' % img_id)\n\t\t\t\t\tsavePNG(img, pred_vertices[0, ..., 0] * 255, test_path + '/%d-2.png' % img_id)\n\n\t\t\t\tmap_b, map_v, vertices, edges = getVESimple(pred_boundary[0], pred_vertices[0])\n\t\t\t\tresult.append({\n\t\t\t\t\t'image_id': img_id,\n\t\t\t\t\t'vertices': vertices,\n\t\t\t\t\t'edges': edges\n\t\t\t\t})\n\t\t\t\ttime_res.append(time.time() - t)\n\n\t\t\t\tprint('%d, %d, %.3lf' % tuple(time_res))\n\t\t\t\tf.write('%d, %d, %.3lf\\n' % tuple(time_res))\n\t\t\t\tf.flush()\n\n\t\t\t\tif img_seq % 100 == 0:\n\t\t\t\t\twith open('predictions_simple_%s_%s_%s.json' % (city_name, backbone, mode), 'w') as fp:\n\t\t\t\t\t\tfp.write(json.dumps(result, cls = NumpyEncoder))\n\t\t\t\t\t\tfp.close()\n\n\t\t\twith open('predictions_simple_%s_%s_%s.json' % (city_name, backbone, mode), 'w') as fp:\n\t\t\t\tfp.write(json.dumps(result, cls = NumpyEncoder))\n\t\t\t\tfp.close()\n\n\n", "sub_path": "road_polygon_new/EvaluateSimple.py", "file_name": "EvaluateSimple.py", "file_ext": "py", "file_size_in_byte": 4555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.get_cmap", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "json.JSONEncoder", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.int_", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.intc", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.intp", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.int8", "line_number": 17, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.int32", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.int64", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.uint16", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.uint32", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.uint64", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.float_", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.JSONEncoder.default", "line_number": 26, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 30, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.Image.fromarray", "line_number": 35, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 35, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 35, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 38, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.Image.alpha_composite", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 60, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 61, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 61, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 62, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 62, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 63, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 63, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 64, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 65, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 65, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 66, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 66, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 67, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 67, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.train.Saver", "line_number": 81, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 81, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.popen", "line_number": 91, "usage_type": "call"}, {"api_name": "json.load", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 107, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 107, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 107, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 115, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 133, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "640892433", "text": "#Yuran Yan 78666452 Sibo Wang 50617929\r\n#This is the I32CFSP file\r\nimport socket\r\nfrom collections import namedtuple\r\nGame_Connection = namedtuple('Game_connection',['socket','game_input','game_output'])\r\ndef connect(address:tuple) -> Game_Connection:\r\n '''connect to the indicated address'''\r\n game_socket = socket.socket()\r\n game_socket.connect(address)\r\n infile = game_socket.makefile('r')\r\n outfile = game_socket.makefile('w')\r\n print(\"You are connected\")\r\n return Game_Connection(socket = game_socket,game_input = infile,game_output = outfile)\r\ndef close(connection:Game_Connection) -> None:\r\n '''close the connection'''\r\n connection.game_input.close()\r\n connection.game_output.close()\r\n connection.socket.close()\r\ndef start_game(connection:Game_Connection):\r\n '''initiate the game with the server'''\r\n user_message = \"I32CFSP_HELLO \" + _ask_user_name()\r\n _message_send(connection,user_message)\r\n server_message = _message_recieve(connection)\r\n _message_send(connection,\"AI_GAME\")\r\n server_message = _message_recieve(connection)\r\ndef make_move(connection:Game_Connection,user_message:str) -> list:\r\n '''drop or pop a column'''\r\n _message_send(connection,user_message)\r\n server_message = _action_recieve(connection)\r\n return server_message\r\ndef _ask_user_name() -> str:\r\n '''ask for the user's name, keep asking if invalid'''\r\n user_name = input('Please enter your name(no space or tab allowed):').strip()\r\n while ' ' in user_name or '\\t' in user_name:\r\n user_name = input(\"user name is not valid, please try another one:\").strip() \r\n return user_name\r\ndef _message_send(connection:Game_Connection,user_message:str) -> None:\r\n '''write the message to the game output'''\r\n connection.game_output.write(user_message + '\\r\\n')\r\n connection.game_output.flush()\r\ndef _message_recieve(connection:Game_Connection) -> str:\r\n '''read the message recieved'''\r\n server_message = connection.game_input.readline()[:-1]\r\n print(server_message)\r\n return server_message\r\ndef _action_recieve(connection:Game_Connection) -> list:\r\n '''recieve the message from the server and check if there's a winner'''\r\n message = []\r\n for i in range(3):\r\n server_message = connection.game_input.readline()[:-1]\r\n message.append(server_message)\r\n if server_message == \"WINNER_RED\" or server_message == \"WINNER_YELLOW\":\r\n break\r\n return message\r\n", "sub_path": "Python projects (ICS 32)/2/intertools.py", "file_name": "intertools.py", "file_ext": "py", "file_size_in_byte": 2467, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.namedtuple", "line_number": 5, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "443460694", "text": "\"\"\"This module provides a command to open the Rainmeter Skin folder.\"\"\"\n\n\nimport os.path\n\nimport sublime\nimport sublime_plugin\n\nfrom .path.skin_path_provider import get_cached_skin_path\n\n\nclass RainmeterOpenSkinsFolderCommand(sublime_plugin.WindowCommand): #pylint: disable=R0903; sublime text API, methods are overriden\n \"\"\"\n WindowCommands are instantiated once per window.\n\n The Window object may be retrieved via self.window.\n \"\"\"\n\n def run(self):\n \"\"\"Called when the command is run.\"\"\"\n skinspath = get_cached_skin_path()\n if not skinspath or not os.path.exists(skinspath):\n sublime.error_message(\n \"Error while trying to open Rainmeter\" +\n \" skins folder: Directory not found. Please check the\" +\n \" value of your \\\"skins_path\\\" setting.\")\n return\n self.window.run_command(\"open_dir\", {\"dir\": skinspath})\n", "sub_path": "openskinsfolder.py", "file_name": "openskinsfolder.py", "file_ext": "py", "file_size_in_byte": 921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sublime_plugin.WindowCommand", "line_number": 12, "usage_type": "attribute"}, {"api_name": "path.skin_path_provider.get_cached_skin_path", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path.exists", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 22, "usage_type": "name"}, {"api_name": "sublime.error_message", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "152198137", "text": "from odoo import models, fields, api\nfrom datetime import datetime, date\n\n\nclass res_partner(models.Model):\n _inherit = 'res.partner'\n\n @api.one\n @api.depends('birthday')\n def compute_age(self):\n for partner in self:\n if partner.birthday:\n today = fields.date.today()\n born = datetime.strptime(partner.birthday, '%Y-%m-%d')\n self.age = today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n else:\n self.age = 0\n\n name = fields.Char('Name', translate=True, required=True)\n identification_no = fields.Char('National ID/Iqama')\n age = fields.Integer('Age', compute='compute_age')\n birthday = fields.Date('Birth Day')\n gender = fields.Selection([('male', 'Male'), ('female', 'Female')], 'Gender')\n", "sub_path": "itc_production_module/odootec_hr_custom/models/res_partner.py", "file_name": "res_partner.py", "file_ext": "py", "file_size_in_byte": 832, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "odoo.models.Model", "line_number": 5, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 5, "usage_type": "name"}, {"api_name": "odoo.fields.date.today", "line_number": 13, "usage_type": "call"}, {"api_name": "odoo.fields.date", "line_number": 13, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 13, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.api.one", "line_number": 8, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 8, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 9, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 20, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 20, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 21, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.fields.Date", "line_number": 22, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 22, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 23, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "305130461", "text": "'''\nStatistical Computing for Scientists and Engineers\nHomework 1\nFall 2018\nUniversity of Notre Dame\n'''\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats\nimport errno\nimport os.path\n\ndef readCSV(fileDir='.'):\n '''\n Reads in .csv data file\n @args: fileDir = path to file\n '''\n file_path = os.path.join(fileDir, 'camera.csv')\n \n if(os.path.exists(file_path)):\n return np.genfromtxt(file_path, delimiter=',',skip_header=2)\n else:\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file_path)\n\ndef mleGuassian2D(x):\n '''\n Does MLE for a two deminsional guassian\n @args: x = np.array([2xN]) training data\n @returns: mu, sigma = mean [2x1] and std [2x2] of trained guassian\n '''\n N = x.shape[1]\n mu = np.zeros((2,1))\n sig2 = np.eye(2)\n\n ##############################################################\n # INSERT CODE BELOW\n ##############################################################\n \n ##############################################################\n # INSERT CODE ABOVE\n ##############################################################\n return mu, np.sqrt(sig2)\n\ndef getGuassianDist(mu0, covar0, X, Y):\n '''\n Used to plot Gaussian contour\n @args: mu0 = np.array([Dx1]) vector of means, \n covar0 = np.array([DxD]) convariance array,\n X, Y = [NxM] mesh array with points to eval Gaussian at\n @returns: Z = [NxM] array of the contour field\n '''\n Z = np.zeros(X.shape)\n #Get guassian distribution\n g_reg = 1.0/(2.0*np.pi) *1/(np.linalg.det(covar0)**(0.5))\n covar_i = np.linalg.inv(covar0)\n for (i,j), val in np.ndenumerate(X):\n x = np.expand_dims(np.array([X[i,j], Y[i,j]]), axis=1)\n Z[i,j] = g_reg*np.exp(-0.5*((x-mu0).T.dot(covar_i)).dot((x-mu0)))\n\n return Z\n\n\nif __name__== \"__main__\":\n \n # Start by reading in the camera data\n data = readCSV()\n # Get data\n x0 = np.stack([data[:,2], data[:,1]], axis=0)\n # MLE\n mu, sigma = mleGuassian2D(x0)\n\n print('Plotting Figure')\n # Plot Normalized Histogram\n plt.scatter(x0[0], x0[1], color='k', marker='x',label=\"Training Data\")\n # Plot MLE Guassian\n x = np.linspace(min(x0[0])-100,max(x0[0])+100,150)\n y = np.linspace(min(x0[1])-5,max(x0[1])+5,150)\n X, Y = np.meshgrid(x, y)\n Z = getGuassianDist(mu, np.power(sigma,2), X, Y)\n #Plot guassian\n cmap = plt.cm.brg\n levels = 15\n plt.contour(X, Y, Z, levels, cmap=plt.cm.get_cmap(cmap, levels), zorder=1)\n\n plt.title('Camera Resolution vs. Year')\n plt.xlabel('Camera Resolution Width (Pixels)')\n plt.ylabel('Camera Release Year')\n plt.legend()\n \n # plt.savefig('Hm1-P5d.png', bbox_inches='tight')\n plt.show()", "sub_path": "Homework/HW1/Hm1-P5d.py", "file_name": "Hm1-P5d.py", "file_ext": "py", "file_size_in_byte": 2756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 22, "usage_type": "call"}, {"api_name": "errno.ENOENT", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.strerror", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.linalg.det", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.linalg.inv", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.ndenumerate", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 82, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 84, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 87, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 87, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "142868829", "text": "import os\nimport numpy as np\nfrom chow_test import p_value\nimport matplotlib.pyplot as plt\n\n\ndef split_ranking(prem2socre):\n \"\"\"\n @Args:\n the sorted list of pairs (prem, score)\n @Returns:\n scores: the sorted scores\n last_zero_index: the index of the last zero in the scores list\n first_inf_index: the index of the first inf in the scores list\n \"\"\"\n scores = np.array([pair[1] for pair in prem2socre])\n try:\n last_zero_index = np.max(np.where(0.0 == scores))\n except:\n last_zero_index = None\n try:\n first_inf_index = np.min(np.where(float(\"inf\") == scores))\n except:\n first_inf_index = None\n return scores, last_zero_index, first_inf_index\n\n\ndef mse_loss(socres, pred_scores):\n return np.mean((socres - pred_scores) ** 2)\n\n\ndef plot_figure(left_coeff, right_coeff,\n x, scores, min_cut, start_index, end_index):\n left_x = x[start_index: min_cut]\n right_x = x[min_cut: end_index]\n pred_left_scores = left_coeff[0] * left_x + left_coeff[1]\n pred_right_scores = right_coeff[0] * right_x + right_coeff[1]\n fig, ax = plt.subplots()\n ax.scatter(x[start_index: end_index],\n scores[start_index: end_index], marker=\"*\")\n ax.plot(left_x, pred_left_scores, color=\"darkorange\")\n ax.plot(right_x, pred_right_scores, color=\"forestgreen\")\n plt.savefig(os.path.join(\"../figures\", str(min_cut)))\n\n\ndef linear_regression_selection(prem2socre, show_figure=False):\n prems = [pair[0] for pair in prem2socre]\n scores, last_zero_index, first_inf_index = split_ranking(prem2socre)\n # N = len(scores)\n # candicate_scores = scores[last_zero_index + 1: first_inf_index]\n x = np.arange(len(scores), dtype=np.float64)\n\n start_index = last_zero_index + 1 if last_zero_index else 0\n end_index = first_inf_index if first_inf_index else len(prems)\n stop_flag = False\n # min_cuts = []\n while not stop_flag:\n p_min = float(\"inf\")\n min_cut = -1\n # min_coeff_total = np.zeros(2)\n min_left_coeff = np.zeros(2)\n min_right_coeff = np.zeros(2)\n for i in range(start_index + 1, end_index):\n left_scores = scores[start_index: i]\n right_scores = scores[i: end_index]\n left_x = x[start_index: i]\n right_x = x[i: end_index]\n p, _, left_coeff, right_coeff = p_value(\n left_scores, left_x, right_scores, right_x)\n if p < p_min:\n p_min = p\n # the last index of left part\n min_cut = i\n min_left_coeff = left_coeff\n min_right_coeff = right_coeff\n if p_min > 1e-64:\n stop_flag = True\n else:\n if show_figure:\n plot_figure(min_left_coeff, min_right_coeff,\n x, scores, min_cut, start_index, end_index)\n # min_cuts.append(min_cut)\n end_index = min_cut\n\n if min_cut > -1:\n actual_index = np.max(np.where(scores[i - 1] == scores))\n selected_prems = prems[: actual_index + 1]\n else:\n selected_prems = prems[: first_inf_index]\n # actual_cut_indexs = [\n # np.max(np.where(scores[s - 1] == scores)) + 1 for s in min_cuts]\n # selected_prems_list = [prems[: index + 1] for index in actual_cut_indexs]\n # cut2prem = dict(zip(min_cuts, selected_prems_list))\n\n return selected_prems\n\n\ndef compute_ranking_density(thm, proofs, ranking):\n useful_prem_list = proofs[thm]\n max_indexs = []\n for useful_prems in useful_prem_list:\n try:\n max_index = max([ranking.index(prem)\n for prem in useful_prems])\n except:\n max_index = -1\n max_indexs.append(max_index)\n temp_densities = [(len(useful_prems) + 1) / (max_index + 2)\n for index in max_indexs]\n density = max(temp_densities)\n print(density)\n return density\n\n\ndef compute_ranking_selectivity(thm, proofs, ranking):\n \"\"\"\n proofs: Proofs class\n ranking_root: the dir to the ranking\n \"\"\"\n\n useful_prem_list = proofs[thm]\n\n temp = []\n for useful_prems in useful_prem_list:\n max_index = max([ranking.index(prem)\n for prem in useful_prems])\n\n temp.append((max_index + 2) / (len(ranking) + 1))\n\n density = min(temp)\n print(density)\n return density\n\n\ndef process_problem(thm, ranking_dir,\n statements, problem_dir,\n E_output_dir, Vampire_output_dir):\n prem2score = scored_premises_from_csv_ranking(thm, ranking_dir)\n ranking = linear_regression_selection(prem2score)\n input_file = os.path.join(problem_dir, thm)\n E_output_file = os.path.join(E_output_dir, thm)\n Vampire_output_file = os.path.join(Vampire_output_dir, thm)\n write_problem(thm, ranking, statements, input_file)\n run_E_prover(input_file, E_output_file, cpu_time=60)\n run_Vampire_prover(input_file, Vampire_output_file, cpu_time=60)\n\n\n# def set_parameters():\n# params = argparse.ArgumentParser()\n# params.add_argument(\"--ranking_dir\",\n# type=str,\n# default=\"../ranking/weighted_average\",\n# help=\"the root path to save ranking csv file\")\n# params.add_argument(\"--problem_dir\",\n# type=str,\n# default=\"../problem/weighted_average_chow_cut\",\n# help=\"the root path to save problems\")\n# params.add_argument(\"--E_output_dir\",\n# type=str,\n# default=\"../E_output/weighted_average_chow_cut\",\n# help=\"the root paht to save E outputs\")\n# params.add_argument(\"--Vampire_output_dir\",\n# type=str,\n# default=\"../Vampire_output/weighted_average_chow_cut\",\n# help=\"the root paht to save Vampire outputs\")\n# args = params.parse_args(args=[])\n# return args\n\n\n# if __name__ == \"__main__\":\n# statements = Statements(\"../data/statements\")\n# problem_order = Problem_Order(\"../data/ProblemsInMMLOrder\")\n\n# args = set_parameters()\n\n# assert os.path.exists(args.ranking_dir)\n# if not os.path.exists(args.problem_dir):\n# os.makedirs(args.problem_dir)\n# if not os.path.exists(args.E_output_dir):\n# os.makedirs(args.E_output_dir)\n# if not os.path.exists(args.Vampire_output_dir):\n# os.makedirs(args.Vampire_output_dir)\n\n# Parallel(n_jobs=10)(delayed(process_problem)(thm, args.ranking_dir,\n# statements,\n# args.problem_dir,\n# args.E_output_dir,\n# args.Vampire_output_dir)\n# for thm in tqdm(problem_order))\n", "sub_path": "linear_cut/process.py", "file_name": "process.py", "file_ext": "py", "file_size_in_byte": 6958, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.array", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 62, "usage_type": "call"}, {"api_name": "chow_test.p_value", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 142, "usage_type": "call"}, {"api_name": "os.path", "line_number": 142, "usage_type": "attribute"}]} +{"seq_id": "155458560", "text": "from __future__ import print_function\n\nimport json\nimport logging\n\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\n\n# this adds the component-level `lib` directory to the Python import path\nimport sys, os\n# get this file's directory independent of where it's run from\nhere = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(here, \"../../\"))\nsys.path.append(os.path.join(here, \"../../vendored\"))\n\n# import the shared library, now anything in component/lib/__init__.py can be\n# referenced as `lib.something`\nimport lib\nfrom boto3.dynamodb.conditions import Attr\n\ndef handler(event, context):\n log.debug(\"Received event {}\".format(json.dumps(event)))\n\n # Test for required attributes\n required_keys = ['altname', 'location_id']\n lib.validation.check_keys(required_keys, event, False)\n\n try:\n lib.LocationAltnamesTable.delete_item(\n Key={'location_id': event['location_id'], 'altname': event['altname']}, \n ConditionExpression=Attr('location_id').eq(event['location_id']) & Attr('altname').eq(event['altname'])\n )\n except lib.exceptions.ClientError as ce:\n if \"ConditionalCheckFailedException\" in ce.message:\n raise lib.exceptions.NotFoundException(\"Object '%s'+'%s' not found.\" % (event['location_id'], event['altname']))\n raise lib.exceptions.InternalServerException(ce.message)\n", "sub_path": "lookup/functions/delete/handler.py", "file_name": "handler.py", "file_ext": "py", "file_size_in_byte": 1386, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 12, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 22, "usage_type": "call"}, {"api_name": "lib.validation.check_keys", "line_number": 26, "usage_type": "call"}, {"api_name": "lib.validation", "line_number": 26, "usage_type": "attribute"}, {"api_name": "lib.LocationAltnamesTable.delete_item", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.LocationAltnamesTable", "line_number": 29, "usage_type": "attribute"}, {"api_name": "boto3.dynamodb.conditions.Attr", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.exceptions", "line_number": 33, "usage_type": "attribute"}, {"api_name": "lib.exceptions.NotFoundException", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.exceptions", "line_number": 35, "usage_type": "attribute"}, {"api_name": "lib.exceptions.InternalServerException", "line_number": 36, "usage_type": "call"}, {"api_name": "lib.exceptions", "line_number": 36, "usage_type": "attribute"}]} +{"seq_id": "239906333", "text": "\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axisartist.angle_helper as angle_helper\nfrom matplotlib.projections import PolarAxes\nfrom matplotlib.transforms import Affine2D\nfrom mpl_toolkits.axisartist import SubplotHost\nfrom mpl_toolkits.axisartist import GridHelperCurveLinear\n\n\ndef curvelinear_test2(fig, rect=111):\n \"\"\"\n Polar projection, but in a rectangular box.\n \"\"\"\n\n # see demo_curvelinear_grid.py for details\n tr = Affine2D().translate(0, 90) + Affine2D().scale(np.pi / 180., 1.) + \\\n PolarAxes.PolarTransform()\n\n extreme_finder = angle_helper.ExtremeFinderCycle(10, 60,\n lon_cycle=360,\n lat_cycle=None,\n lon_minmax=None,\n lat_minmax=(-90, np.inf),\n )\n # Changes theta gridline count\n grid_locator1 = angle_helper.LocatorHMS(12)\n grid_locator2 = angle_helper.LocatorDMS(6)\n tick_formatter1 = angle_helper.FormatterHMS()\n tick_formatter2 = angle_helper.FormatterDMS()\n\n grid_helper = GridHelperCurveLinear(tr,\n extreme_finder=extreme_finder,\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=tick_formatter2\n )\n\n ax1 = SubplotHost(fig, rect, grid_helper=grid_helper)\n\n # make ticklabels of right and top axis visible.\n ax1.axis[\"right\"].major_ticklabels.set_visible(True)\n ax1.axis[\"top\"].major_ticklabels.set_visible(True)\n ax1.axis[\"bottom\"].major_ticklabels.set_visible(True)\n # let right and bottom axis show ticklabels for 1st coordinate (angle)\n ax1.axis[\"right\"].get_helper().nth_coord_ticks = 0\n ax1.axis[\"bottom\"].get_helper().nth_coord_ticks = 0\n\n #\n fig.add_subplot(ax1)\n\n grid_helper = ax1.get_grid_helper()\n\n # You may or may not need these - they set the view window explicitly\n # rather than using the default as determined by matplotlib with extreme\n # finder.\n ax1.set_aspect(1.)\n ax1.set_xlim(-4, 25) # moves the origin left-right in ax1\n ax1.set_ylim(-2.5, 30) # moves the origin up-down\n\n ax1.set_ylabel('$DEC\\,(^{\\circ})$')\n ax1.set_xlabel('$RA\\,(h)$')\n ax1.grid(True)\n # ax1.grid(linestyle='--', which='x') # either keyword applies to both\n # ax1.grid(linestyle=':', which='y') # sets of gridlines\n\n return ax1, tr\n\n\ndef ra_dec_plots(pl_params):\n '''\n Generate RA vs DEC plots.\n '''\n\n fig, gs, ra, dec, bb_ra, bb_dec, data_arr, v_min, v_max, rad_pc, z_lab =\\\n pl_params\n\n # tr.transform_point((x, 0)) is always (0,0)\n ax1, tr = curvelinear_test2(fig, gs)\n\n # Define colormap.\n cm = plt.cm.get_cmap('RdYlBu_r')\n\n # Plot literature clusters.\n # Get transformed data.\n ra_dec_tr = tr.transform(zip(ra, dec))\n # Size relative to the clusters actual size in pc.\n if gs == 326:\n # Plot Bica database.\n bb_ra_dec_tr = tr.transform(zip(bb_ra, bb_dec))\n plt.scatter(bb_ra_dec_tr[:, 0], bb_ra_dec_tr[:, 1], marker='.', s=8,\n c='k', lw=0.5, zorder=1)\n SC = ax1.scatter(ra_dec_tr[:, 0], ra_dec_tr[:, 1], marker='o', s=20,\n c='r', lw=0.1, zorder=9)\n else:\n siz = np.asarray(rad_pc) * 4. # if gs != 325 else 20\n SC = ax1.scatter(ra_dec_tr[:, 0], ra_dec_tr[:, 1], marker='o', s=siz,\n c=data_arr, cmap=cm, vmin=v_min, vmax=v_max, lw=0.1,\n zorder=9)\n # Colorbar\n cbar = plt.colorbar(SC, shrink=1., pad=0.05)\n cbar.ax.tick_params(labelsize=8)\n # cbar.set_clim(0., 0.4)\n cbar.set_label(z_lab, fontsize=12)\n\n # Plot clouds center. Central coords stored in degrees.\n c_SMC = [13.1875, -72.82861111]\n c_LMC = [80.2375, -69.47805556]\n # Get transformed data.\n clouds_cent = tr.transform([c_SMC, c_LMC])\n plt.scatter(clouds_cent[:, 0], clouds_cent[:, 1], marker='v', s=65,\n c='b', edgecolor='w', lw=0.8, zorder=10)\n", "sub_path": "functions/ra_dec_map.py", "file_name": "ra_dec_map.py", "file_ext": "py", "file_size_in_byte": 4370, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.transforms.Affine2D", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 17, "usage_type": "attribute"}, {"api_name": "matplotlib.projections.PolarAxes.PolarTransform", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.projections.PolarAxes", "line_number": 18, "usage_type": "name"}, {"api_name": "mpl_toolkits.axisartist.angle_helper.ExtremeFinderCycle", "line_number": 20, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.angle_helper", "line_number": 20, "usage_type": "name"}, {"api_name": "numpy.inf", "line_number": 24, "usage_type": "attribute"}, {"api_name": "mpl_toolkits.axisartist.angle_helper.LocatorHMS", "line_number": 27, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.angle_helper", "line_number": 27, "usage_type": "name"}, {"api_name": "mpl_toolkits.axisartist.angle_helper.LocatorDMS", "line_number": 28, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.angle_helper", "line_number": 28, "usage_type": "name"}, {"api_name": "mpl_toolkits.axisartist.angle_helper.FormatterHMS", "line_number": 29, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.angle_helper", "line_number": 29, "usage_type": "name"}, {"api_name": "mpl_toolkits.axisartist.angle_helper.FormatterDMS", "line_number": 30, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.angle_helper", "line_number": 30, "usage_type": "name"}, {"api_name": "mpl_toolkits.axisartist.GridHelperCurveLinear", "line_number": 32, "usage_type": "call"}, {"api_name": "mpl_toolkits.axisartist.SubplotHost", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 83, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "529381884", "text": "import pytest\n\nfrom pretalx.orga.templatetags.review_score import (\n _review_score_number, _review_score_override,\n)\n\n\n@pytest.fixture\ndef event_with_score_context(event):\n event.settings.set('review_score_name_0', 'meh.')\n event.settings.set('review_score_name_1', 'okay')\n event.settings.set('review_score_name_2', 'good')\n event.settings.set('review_score_name_3', 'great')\n event.settings.set('review_min_score', 0)\n event.settings.set('review_max_score', 3)\n\n class request:\n pass\n\n r = request()\n r.event = event\n return {'request': r}\n\n\n@pytest.mark.parametrize('score,expected', (\n (3, '3/3 (»great«)'),\n (2, '2/3 (»good«)'),\n (1, '1/3 (»okay«)'),\n (0, '0/3 (»meh.«)'),\n (1.5, '1.5/3'),\n (None, 'ø'),\n))\n@pytest.mark.django_db()\ndef test_templatetag_review_score(score, expected, event_with_score_context):\n assert _review_score_number(event_with_score_context, score) == expected\n\n\n@pytest.mark.parametrize('positive,negative,expected', (\n (1, 0, ''),\n (0, 1, ''),\n (2, 0, ' 2'),\n (0, 2, ' 2'),\n (1, 1, ' 1 1'),\n))\n@pytest.mark.django_db()\ndef test_templatetag_review_score_overrid(positive, negative, expected):\n assert _review_score_override(positive, negative) == expected\n", "sub_path": "src/tests/unit/orga/test_templatetags.py", "file_name": "test_templatetags.py", "file_ext": "py", "file_size_in_byte": 1630, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pytest.fixture", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pretalx.orga.templatetags.review_score._review_score_number", "line_number": 35, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 25, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pytest.mark.django_db", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pretalx.orga.templatetags.review_score._review_score_override", "line_number": 47, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 38, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pytest.mark.django_db", "line_number": 45, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 45, "usage_type": "attribute"}]} +{"seq_id": "527914394", "text": "# imports for tornado\nimport tornado\nfrom tornado import web, httpserver, ioloop\n\n# imports for info output\nimport os\n\n\nclass DummyHandler(tornado.web.RequestHandler):\n #def get(self):\n #filename = self.get_argument(\"obj\", strip=False)\n pass\n\n\ndef CreateInfoHandler(metadata):\n name = str(metadata.name ).replace(\"\\n\", \"
\")\n version = str(metadata.version ).replace(\"\\n\", \"
\")\n description = str(metadata.description).replace(\"\\n\", \"
\")\n copyright = str(metadata.copyright ).replace(\"\\n\", \"
\")\n license = str(metadata.license ).replace(\"\\n\", \"
\")\n class InfoHandler(tornado.web.RequestHandler):\n # Emits a string which describes the purpose of the analytics\n def get(self):\n info = \"\"\"\n

{name:s} - {version:s}

\n
\n

{description:s}

\n
\n

{license:s}

\n
\n

{copyright:s}

\n \"\"\".strip().format(\n name = name,\n version = version,\n description = description,\n license = license,\n copyright = copyright\n )\n self.write(info)\n return InfoHandler\n\n\nclass Router(tornado.web.Application):\n def __init__(self, metadata, handlers):\n for key in [\"description\", \"license\"]:\n fpath = metadata.__getattribute__(key)\n if os.path.isfile(fpath):\n with open(fpath) as file:\n metadata.__setattr__(key, file.read())\n\n handlers = [\n (r'/', CreateInfoHandler(metadata)),\n (r'/analyze/', handlers.get(\"analyze\") or DummyHandler),\n (r'/feed/', handlers.get(\"feed\") or DummyHandler),\n (r'/check/', handlers.get(\"check\") or DummyHandler),\n (r'/results/', handlers.get(\"results\") or DummyHandler),\n (r'/status/', handlers.get(\"status\") or DummyHandler),\n ]\n\n settings = dict(\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\n static_path=os.path.join(os.path.dirname(__file__), 'static'),\n )\n tornado.web.Application.__init__(self, handlers, **settings)\n self.engine = None\n\n def ListenAndServe(self, httpbinding):\n server = tornado.httpserver.HTTPServer(self)\n server.listen(httpbinding)\n tornado.ioloop.IOLoop.instance().start()\n", "sub_path": "python3/services/router.py", "file_name": "router.py", "file_ext": "py", "file_size_in_byte": 2416, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tornado.web", "line_number": 9, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 21, "usage_type": "attribute"}, {"api_name": "tornado.web", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 62, "usage_type": "call"}, {"api_name": "tornado.web.Application.__init__", "line_number": 64, "usage_type": "call"}, {"api_name": "tornado.web", "line_number": 64, "usage_type": "attribute"}, {"api_name": "tornado.httpserver.HTTPServer", "line_number": 68, "usage_type": "call"}, {"api_name": "tornado.httpserver", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tornado.ioloop.IOLoop.instance", "line_number": 70, "usage_type": "call"}, {"api_name": "tornado.ioloop", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "110262414", "text": "from __future__ import absolute_import, unicode_literals\n\nfrom contextlib import closing\n\nimport kombu\n\n\nclass BasicFunctionality(object):\n\n def test_connect(self, connection):\n connection.connect()\n connection.close()\n\n def test_publish_consume(self, connection):\n test_queue = kombu.Queue('test', routing_key='test')\n\n def callback(body, message):\n assert body == {'hello': 'world'}\n assert message.content_type == 'application/x-python-serialize'\n message.delivery_info['routing_key'] == 'test'\n message.delivery_info['exchange'] == ''\n message.ack()\n assert message.payload == body\n\n with connection as conn:\n with conn.channel() as channel:\n producer = kombu.Producer(channel)\n producer.publish(\n {'hello': 'world'},\n retry=True,\n exchange=test_queue.exchange,\n routing_key=test_queue.routing_key,\n declare=[test_queue],\n serializer='pickle'\n )\n\n consumer = kombu.Consumer(\n conn, [test_queue], accept=['pickle']\n )\n consumer.register_callback(callback)\n with consumer:\n conn.drain_events(timeout=1)\n\n def test_simple_queue_publish_consume(self, connection):\n with connection as conn:\n with closing(conn.SimpleQueue('simple_queue_test')) as queue:\n queue.put({'Hello': 'World'}, headers={'k1': 'v1'})\n message = queue.get(timeout=1)\n assert message.payload == {'Hello': 'World'}\n assert message.content_type == 'application/json'\n assert message.content_encoding == 'utf-8'\n assert message.headers == {'k1': 'v1'}\n message.ack()\n\n def test_simple_buffer_publish_consume(self, connection):\n with connection as conn:\n with closing(conn.SimpleBuffer('simple_buffer_test')) as buf:\n buf.put({'Hello': 'World'}, headers={'k1': 'v1'})\n message = buf.get(timeout=1)\n assert message.payload == {'Hello': 'World'}\n assert message.content_type == 'application/json'\n assert message.content_encoding == 'utf-8'\n assert message.headers == {'k1': 'v1'}\n message.ack()\n", "sub_path": "t/integration/common.py", "file_name": "common.py", "file_ext": "py", "file_size_in_byte": 2487, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "kombu.Queue", "line_number": 15, "usage_type": "call"}, {"api_name": "kombu.Producer", "line_number": 27, "usage_type": "call"}, {"api_name": "kombu.Consumer", "line_number": 37, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 46, "usage_type": "call"}, {"api_name": "contextlib.closing", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "536014025", "text": "import sys\nimport pymysql\n# Adds a stock and table name to database\n\nconn = pymysql.connect('localhost', 'leemg', 'MarLee21!', 'CAP_stock2020')\n\nstock_name = sys.argv[1]\ntable_name = sys.argv[2]\n\ncursor = conn.cursor()\n\nquery = \"SELECT * FROM \" + table_name\ntry:\n\tmsg = cursor.execute(query)\n\tprint(\"Stock already added\")\nexcept pymysql.err.ProgrammingError as e:\n\tcode, msg = e.args\n\tif code == 1146:\n\t\tinsert = \"INSERT INTO List_Of_Small_Stocks(name,tablename) \" \\\n\t\t\t\t\"VALUES(%s,%s)\"\n\t\targs = (stock_name, table_name)\n\t\tcursor.execute(insert, args)\n\t\ttable = \"CREATE TABLE \"+table_name+\" (username VARCHAR(50),followers INT,following INT,date_tweeted DATE,retweet_author VARCHAR(50),retweet_followers INT,retweet_following INT,retweets INT,favorites INT,status VARCHAR(800))\"\n\t\tcursor.execute(table)\n\t\tconn.commit()\n\t\tprint(\"Stock added\")\n\telse:\n\t\tprint(e)\n\t\tconn.rollback()\nfinally:\n\tconn.close()\n\tcursor.close()", "sub_path": "Twitter/AddSmallStock.py", "file_name": "AddSmallStock.py", "file_ext": "py", "file_size_in_byte": 916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pymysql.connect", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pymysql.err", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "556511251", "text": "from functools import reduce\nimport random\nimport pandas as pd\nimport names\nfrom faker import Faker\n\nFAKE = Faker()\n\ndef gen_airports(n: int):\n r = range(1, n + 1)\n airportID = r\n name = list(map(lambda a: FAKE.last_name(), r))\n abbreviation = list(map(lambda a: \"\".join(FAKE.random_letters()[:4]), r))\n cityID = list(map(lambda a: random.randint(1, n), r))\n coloms_name = (\"airportID\", \"fullName\", \"abbreviation\", \"cityID\")\n t_name = \"airport\"\n\n data = \"\"\n for i in r:\n data += f\"insert into {t_name} ({', '.join(coloms_name)}) values ({airportID[i-1]}, '{name[i-1]}', '{abbreviation[i-1]}', {cityID[i-1]});\\n\"\n\n with open(\"gen_airports.sql\", \"w\") as f:\n f.write(data)\n\n\ngen_airports(4000)", "sub_path": "2 data insert/generateAirports.py", "file_name": "generateAirports.py", "file_ext": "py", "file_size_in_byte": 734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "faker.Faker", "line_number": 7, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "464454738", "text": "\"\"\"reddot URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom . import views\n\n\nurlpatterns = [\n url(r'^inbox/$', views.Inbox.as_view(), name='inbox'),\n url(r'^messages/$', views.Messages.as_view(), name='messages'),\n url(r'^mentions/$', views.Mentions.as_view(), name='mentions'),\n url(r'^compose/$', views.SendPM.as_view(), name='compose'),\n url(r'^sent/$', views.Sent.as_view(), name='sent'),\n url(r'^post_replies/$', views.PostReplies.as_view(), name=\"post_replies\"),\n url(r'compose/to=(?P[\\w]+)$', views.SenPMTo.as_view(), name='pm'),\n]", "sub_path": "redditlike/message/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "533473926", "text": "import copy\nimport functools\nimport logging\nimport os\nimport shutil\nimport textwrap\nfrom importlib import import_module\n\nimport torch\nimport torch.fx as fx\n\nfrom torch._dynamo.debug_utils import (\n AccuracyError,\n backend_accuracy_fails,\n BUCK_CMD_PREFIX,\n BuckTargetWriter,\n extra_imports,\n generate_config_string,\n helper_for_dump_minify,\n minifier_dir,\n NNModuleToString,\n run_fwd_maybe_bwd,\n TEST_REPLACEABLE_COMMENT,\n)\n\nfrom .. import config\nfrom ..backends.registry import lookup_backend, register_debug_backend\nfrom ..utils import clone_inputs\n\nlog = logging.getLogger(__name__)\n\n\ninductor_config = import_module(\"torch._inductor.config\")\nuse_buck = inductor_config.is_fbcode()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n# MAIN ENTRY POINT\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\n\ndef wrap_backend_debug(unconfigured_compiler_fn, compiler_name: str):\n \"\"\"\n A minifier decorator that wraps the TorchDynamo produced Fx graph modules.\n As opposed to wrap_compiler_debug, this wrapper intercepts at the\n TorchDynamo produced Fx Graph Module. This makes it backend-agnostic to some\n level, e.g., it is useful for minifying issues related to Aot Autograd\n tracing. If an error is found, we minify and save the minified repro in\n repro.tar.gz.\n \"\"\"\n\n @functools.wraps(unconfigured_compiler_fn)\n def debug_wrapper(gm, example_inputs, **kwargs):\n compiler_fn = functools.partial(unconfigured_compiler_fn, **kwargs)\n assert config.repro_after in (\"dynamo\", \"aot\", None)\n\n if config.repro_after == \"dynamo\":\n\n def add_paths(exc):\n exc.minifier_path = os.path.join(minifier_dir(), \"minifier_launcher.py\")\n if use_buck:\n exc.buck_command = \" \".join(\n BUCK_CMD_PREFIX\n + [BuckTargetWriter(exc.minifier_path).cmd_line_path]\n )\n\n if config.repro_level == 3:\n dump_to_minify_after_dynamo(gm, example_inputs, compiler_name)\n\n # Check for either accuracy (level 4) or other type of failures.\n if config.repro_level == 4:\n # Check Accuracy\n compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)\n if backend_accuracy_fails(gm, example_inputs, compiler_fn):\n log.warning(\n \"Accuracy failed for the TorchDynamo produced graph. Creating script to minify the error.\"\n )\n dump_to_minify_after_dynamo(\n fx.GraphModule(gm, copy.deepcopy(gm.graph)),\n example_inputs,\n compiler_name,\n )\n exc = AccuracyError(\"Bad accuracy detected.\")\n add_paths(exc)\n raise exc\n else:\n try:\n compiled_gm = compiler_fn(copy.deepcopy(gm), example_inputs)\n run_fwd_maybe_bwd(compiled_gm, example_inputs)\n except Exception as exc:\n log.warning(\n \"Compiled Fx GraphModule failed. Creating script to minify the error.\"\n )\n if config.repro_level == 1:\n dump_state_fn = functools.partial(\n dump_backend_state, compiler_name=compiler_name\n )\n dump_state_fn(\n fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs\n )\n elif config.repro_level == 2:\n dump_to_minify_after_dynamo(\n fx.GraphModule(gm, copy.deepcopy(gm.graph)),\n example_inputs,\n compiler_name,\n )\n add_paths(exc)\n raise\n else:\n compiled_gm = compiler_fn(gm, example_inputs)\n\n return compiled_gm\n\n debug_wrapper._torchdynamo_orig_callable = unconfigured_compiler_fn # type: ignore[attr-defined]\n\n return debug_wrapper\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n# REPRO DUMPERS\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\n\ndef generate_dynamo_fx_repro_string(\n model_str, args, compiler_name, check_accuracy=False\n):\n \"\"\"\n Generate a repro string for backend-agnostic minified version.\n \"\"\"\n\n run_code = textwrap.dedent(\n f\"\"\"\nwith torch.cuda.amp.autocast(enabled={torch.is_autocast_enabled()}):\n ref = run_fwd_maybe_bwd(mod, args)\n res = run_fwd_maybe_bwd(opt_mod, args)\n \"\"\"\n )\n\n if config.repro_level == 4 or check_accuracy:\n run_code = textwrap.dedent(\n f\"\"\"\nmod.eval()\nopt_mod.eval()\n\nclass AccuracyError(Exception):\n pass\n\nwith torch.cuda.amp.autocast(enabled={torch.is_autocast_enabled()}):\n assert same_two_models(mod, mod, args), \"Eager itself failed\"\n if not same_two_models(mod, opt_mod, args):\n raise AccuracyError(\"Dynamo failed\")\n \"\"\"\n )\n\n return textwrap.dedent(\n f\"\"\"\nfrom math import inf\nimport torch\nfrom torch import tensor, device\nimport torch.fx as fx\nimport torch._dynamo\nfrom torch._dynamo.testing import rand_strided\nfrom torch._dynamo.debug_utils import run_fwd_maybe_bwd\nfrom torch._dynamo.debug_utils import same_two_models\n\n{generate_config_string()}\n\n{TEST_REPLACEABLE_COMMENT}\n{extra_imports}\n\nargs = {[(tuple(a.shape), tuple(a.stride()), a.dtype, a.device.type, a.requires_grad) for a in args]}\nargs = [rand_strided(sh, st, dt, dev).requires_grad_(rg) for (sh, st, dt, dev, rg) in args]\n\n{model_str}\n\nmod = Repro()\nopt_mod = torch._dynamo.optimize(\"{compiler_name}\")(mod)\n\n{run_code}\n \"\"\"\n )\n\n\ndef dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy=False):\n \"\"\"\n Saves the repro to a repro.py file\n \"\"\"\n curdir = os.getcwd()\n subdir = os.path.join(os.getcwd(), \"checkpoints\")\n if not os.path.exists(subdir):\n os.makedirs(subdir, exist_ok=True)\n file_name = os.path.join(subdir, f\"minified_{len(gm.graph.nodes)}_nodes.py\")\n log.warning(\n \"Writing checkpoint with %s nodes to %s\", len(gm.graph.nodes), file_name\n )\n\n model_str = NNModuleToString.convert(gm)\n with open(file_name, \"w\") as fd:\n fd.write(\n generate_dynamo_fx_repro_string(\n model_str, args, compiler_name, check_accuracy\n )\n )\n latest_repro = os.path.join(curdir, \"repro.py\")\n log.warning(\"Copying %s to %s for convenience\", file_name, latest_repro)\n\n if use_buck:\n BuckTargetWriter(latest_repro).write()\n\n shutil.copyfile(file_name, latest_repro)\n\n\ndef dump_backend_state(gm, args, compiler_name, check_accuracy=False):\n \"\"\"\n Dumps the dynamo graph to repro the issue.\n 1) It tries to convert Fx GraphModule to a string. If we can, it writes to a\n repro.py file.\n 2) If we can't convert Fx GraphModule to a string, we use to_folder to save\n the module and save a tar file.\n \"\"\"\n assert NNModuleToString.can_convert_to_string(gm)\n return dump_backend_repro_as_file(gm, args, compiler_name, check_accuracy)\n # return dump_backend_repro_as_tarfile(gm, args, compiler_name)\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n# MINIFIER DUMPER\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\n\ndef dump_to_minify_after_dynamo(gm, args, compiler_name):\n model_str = NNModuleToString.convert(gm)\n\n minifier_backend = \"dynamo_minifier_backend\"\n if config.repro_level == 4:\n minifier_backend = \"dynamo_accuracy_minifier_backend\"\n\n custom_compiler_error = (\n textwrap.dedent(\n \"\"\"\\\n raise RuntimeError(\n 'Compiler name is None - this likely means that a custom compiler '\n 'was called by torchdynamo. Please remove this error, import your '\n 'custom compiler function, and replace the compiler_name=\"None\" '\n 'line below to compiler_name='\n )\n \"\"\"\n )\n if compiler_name is None\n else \"\"\n )\n\n contents = textwrap.dedent(\n f\"\"\"\nimport os\nfrom math import inf\nimport torch\nfrom torch import tensor, device\nimport torch.fx as fx\nimport functools\nimport torch._dynamo\nfrom torch._dynamo.debug_utils import run_fwd_maybe_bwd\nfrom torch._dynamo.backends.registry import lookup_backend\nfrom torch._dynamo.testing import rand_strided\n\n{generate_config_string()}\n\n{TEST_REPLACEABLE_COMMENT}\n{extra_imports}\n\nargs = {[(tuple(a.shape), tuple(a.stride()), a.dtype, a.device.type, a.requires_grad) for a in args]}\nargs = [rand_strided(sh, st, dt, dev).requires_grad_(rg) for (sh, st, dt, dev, rg) in args]\n\n{model_str}\nmod = Repro()\n\n# Setup debug minifier compiler\ncompiler_fn = lookup_backend(\"{minifier_backend}\")\n{custom_compiler_error}\ndynamo_minifier_backend = functools.partial(\n compiler_fn,\n compiler_name=\"{compiler_name}\",\n)\nopt_mod = torch._dynamo.optimize(dynamo_minifier_backend)(mod)\n\nwith torch.cuda.amp.autocast(enabled={torch.is_autocast_enabled()}):\n opt_mod(*args)\n \"\"\"\n )\n helper_for_dump_minify(contents)\n\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n# MINIFIER BACKENDS\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #\n\n\n@register_debug_backend\ndef dynamo_minifier_backend(gm, example_inputs, compiler_name):\n from functorch.compile import minifier\n\n compiler_fn = lookup_backend(compiler_name)\n\n try:\n compiled_gm = compiler_fn(gm, example_inputs)\n run_fwd_maybe_bwd(compiled_gm, example_inputs)\n raise ValueError(\"No issue was detected\")\n except Exception as exc:\n orig_failure = str(exc)\n log.warning(\n \"Compiled Fx GraphModule failed. Creating script to minify the error.\"\n )\n dump_state_fn = functools.partial(\n dump_backend_state, compiler_name=compiler_name\n )\n dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)\n fails_fn = functools.partial(\n backend_fails,\n compiler_fn=compiler_fn,\n orig_failure=orig_failure,\n )\n minifier(\n gm,\n example_inputs,\n module_fails=fails_fn,\n dump_state=dump_state_fn,\n )\n return gm\n\n\n@register_debug_backend\ndef dynamo_accuracy_minifier_backend(gm, example_inputs, compiler_name):\n from functorch.compile import minifier\n\n compiler_fn = lookup_backend(compiler_name)\n\n # Set the eval mode to remove randomness.\n gm.eval()\n\n # Check Accuracy\n if backend_accuracy_fails(\n gm, example_inputs, compiler_fn, only_fwd=config.repro_forward_only\n ):\n log.warning(\"Accuracy failed for the TorchDynamo produced graph\")\n dump_state_fn = functools.partial(\n dump_backend_state, compiler_name=compiler_name, check_accuracy=True\n )\n fails_fn = functools.partial(\n backend_accuracy_fails,\n compiler_fn=compiler_fn,\n only_fwd=config.repro_forward_only,\n )\n dump_state_fn(fx.GraphModule(gm, copy.deepcopy(gm.graph)), example_inputs)\n minifier(\n gm,\n example_inputs,\n module_fails=fails_fn,\n dump_state=dump_state_fn,\n )\n else:\n log.error(\"Input graph does not fail accuracy testing\")\n return gm\n\n\ndef backend_fails(gm, example_inputs, compiler_fn, orig_failure):\n \"\"\"\n Minifier uses this function to identify if the minified graph module fails\n with the same error.\n\n One caveat is that minifier can potentially go into a wrong direction when\n the resulting graph module fails for a different reason. To avoid this, we\n save the string for the original exception and check similarity between new\n and old exception. They can be somewhat different in some cases, when the\n exception string depends on the failing node information. So, we have a\n loose similarity metric to guide the minifier path.\n \"\"\"\n from difflib import SequenceMatcher\n\n try:\n compiled_gm = compiler_fn(gm, example_inputs)\n run_fwd_maybe_bwd(compiled_gm, clone_inputs(example_inputs))\n return False\n except Exception as e:\n new_failure = str(e)\n if SequenceMatcher(None, orig_failure, new_failure).ratio() > 0.5:\n return True\n return False\n", "sub_path": "torch/_dynamo/repro/after_dynamo.py", "file_name": "after_dynamo.py", "file_ext": "py", "file_size_in_byte": 12772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 30, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 33, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "torch._dynamo.debug_utils.minifier_dir", "line_number": 59, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.BUCK_CMD_PREFIX", "line_number": 62, "usage_type": "name"}, {"api_name": "torch._dynamo.debug_utils.BuckTargetWriter", "line_number": 63, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 72, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.backend_accuracy_fails", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.fx.GraphModule", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.fx", "line_number": 78, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 78, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.AccuracyError", "line_number": 82, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 87, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.run_fwd_maybe_bwd", "line_number": 88, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.fx.GraphModule", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.fx", "line_number": 98, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 98, "usage_type": "call"}, {"api_name": "torch.fx.GraphModule", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.fx", "line_number": 102, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 102, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 51, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.is_autocast_enabled", "line_number": 132, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.is_autocast_enabled", "line_number": 147, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 154, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.generate_config_string", "line_number": 165, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.TEST_REPLACEABLE_COMMENT", "line_number": 167, "usage_type": "name"}, {"api_name": "torch._dynamo.debug_utils.extra_imports", "line_number": 168, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 187, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString.convert", "line_number": 196, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString", "line_number": 196, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 203, "usage_type": "call"}, {"api_name": "os.path", "line_number": 203, "usage_type": "attribute"}, {"api_name": "torch._dynamo.debug_utils.BuckTargetWriter", "line_number": 207, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 209, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString.can_convert_to_string", "line_number": 220, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString", "line_number": 220, "usage_type": "name"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString.convert", "line_number": 231, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.NNModuleToString", "line_number": 231, "usage_type": "name"}, {"api_name": "textwrap.dedent", "line_number": 238, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 252, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.generate_config_string", "line_number": 265, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.TEST_REPLACEABLE_COMMENT", "line_number": 267, "usage_type": "name"}, {"api_name": "torch._dynamo.debug_utils.extra_imports", "line_number": 268, "usage_type": "name"}, {"api_name": "torch.is_autocast_enabled", "line_number": 285, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.helper_for_dump_minify", "line_number": 289, "usage_type": "call"}, {"api_name": "backends.registry.lookup_backend", "line_number": 301, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.run_fwd_maybe_bwd", "line_number": 305, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 312, "usage_type": "call"}, {"api_name": "torch.fx.GraphModule", "line_number": 315, "usage_type": "call"}, {"api_name": "torch.fx", "line_number": 315, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 315, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 316, "usage_type": "call"}, {"api_name": "functorch.compile.minifier", "line_number": 321, "usage_type": "call"}, {"api_name": "backends.registry.register_debug_backend", "line_number": 297, "usage_type": "name"}, {"api_name": "backends.registry.lookup_backend", "line_number": 334, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.backend_accuracy_fails", "line_number": 340, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 344, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 347, "usage_type": "call"}, {"api_name": "torch._dynamo.debug_utils.backend_accuracy_fails", "line_number": 348, "usage_type": "argument"}, {"api_name": "torch.fx.GraphModule", "line_number": 352, "usage_type": "call"}, {"api_name": "torch.fx", "line_number": 352, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 352, "usage_type": "call"}, {"api_name": "functorch.compile.minifier", "line_number": 353, "usage_type": "call"}, {"api_name": "backends.registry.register_debug_backend", "line_number": 330, "usage_type": "name"}, {"api_name": "torch._dynamo.debug_utils.run_fwd_maybe_bwd", "line_number": 380, "usage_type": "call"}, {"api_name": "utils.clone_inputs", "line_number": 380, "usage_type": "call"}, {"api_name": "difflib.SequenceMatcher", "line_number": 384, "usage_type": "call"}]} +{"seq_id": "251700519", "text": "import flask\nfrom flask import request, jsonify\n\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\napp.config['JSON_AS_ASCII'] = False\n\npopular10lists = [{'name' : '헤드셋'},\n {'name' : '면도기'},\n {'name' : '스피커'},\n {'name' : '에어팟 프로'},\n {'name' : '제습기'},\n {'name' : '마스크 스트랩'},\n {'name' : '닌텐도 스위치'},\n {'name' : '크록스'},\n {'name' : '원피스'},\n {'name' : '마스크'}]\n\n@app.route('/', methods=['GET'])\ndef home():\n return \"

The First Flask

hello Flask

\"\n\n@app.route('/getJSONlists', methods=['GET'])\ndef api_all():\n return jsonify(popular10lists)\n\napp.run()", "sub_path": "api/firstFlask.py", "file_name": "firstFlask.py", "file_ext": "py", "file_size_in_byte": 670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "36041039", "text": "from flask import Blueprint, render_template\nimport os\nfrom flask import *\nimport secrets\n\n\ndef make_token():\n return secrets.token_urlsafe(4)\n\n\ndrug_sim = Blueprint('drug_sim', __name__,\n template_folder=os.path.join('templates'))\n\n\n@drug_sim.route('/cheminfo/drug_sim')\ndef render_cheminfo_pca_input():\n return render_template('cheminfo.drug_sim.html')\n\n\n@drug_sim.route('/cheminfo/drug_sim/input_process', methods=['POST'])\ndef get_sdf_for_drug_sim():\n file = request.files['file']\n if not file.filename.endswith('.csv'):\n return render_template('cheminfo.drug_sim.html', error=\"not a csv\")\n else:\n session['pca_token'] = make_token()\n if not os.path.exists(os.path.join(current_app.root_path, 'Cheminformatics', 'drug_sim', session.get(\"pca_token\"))):\n os.makedirs(os.path.join(current_app.root_path,\n 'Cheminformatics', 'drug_sim', session.get(\"pca_token\")))\n\n file.save(os.path.join(current_app.root_path,\n 'Cheminformatics', 'drug_sim', session.get(\"pca_token\"), file.filename))\n\n from Cheminformatics.drug_sim.run_nb import run_with_pm\n\n result = run_with_pm(os.path.join(current_app.root_path, 'Cheminformatics', 'drug_sim', session.get(\n \"pca_token\"), file.filename), session.get(\"pca_token\"))\n if result is True:\n return render_template(f\"{session.get('pca_token')}_output.html\")\n\n\n@drug_sim.route('/drug_sim_download/')\ndef pca_downloads(file):\n return send_file(os.path.join(current_app.root_path, \"Cheminformatics\", \"drug_sim\", file))\n", "sub_path": "Cheminformatics/drug_sim/blueprint.py", "file_name": "blueprint.py", "file_ext": "py", "file_size_in_byte": 1641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "secrets.token_urlsafe", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.Blueprint", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "Cheminformatics.drug_sim.run_nb.run_with_pm", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}]} +{"seq_id": "547618041", "text": "import json, boto3, os\n\n\n# TABLE_NAME env variable defined in SAM template, set to ViewCounts here for the pytest\n\n\ndef lambda_handler(event, context):\n TABLE_NAME = 'ViewCounts'\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(TABLE_NAME)\n\n response = table.update_item(\n Key={\n 'Page': 'cloudresumechallenge.andrewjpak.com'\n },\n UpdateExpression='SET ViewCount = if_not_exists(ViewCount, :val0) + :val1',\n ExpressionAttributeValues={\n ':val0': 0,\n ':val1': 1\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n retrieved_viewcount = json.dumps(int(response[\"Attributes\"][\"ViewCount\"]))\n\n return {\n 'statusCode': 200,\n 'headers': {\n 'Access-Control-Allow-Headers': 'Content-Type',\n 'Access-Control-Allow-Origin': \"*\",\n 'Access-Control-Allow-Methods': 'GET'\n },\n 'body': retrieved_viewcount\n }\n", "sub_path": "Test/getcount_test.py", "file_name": "getcount_test.py", "file_ext": "py", "file_size_in_byte": 953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "boto3.resource", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "345258975", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 28 12:35:50 2019\n\n@author: harshwardhan\n\"\"\"\n\nimport urllib.request\n\nwiki = \"https://simple.wikipedia.org/wiki/List_of_state_and_union_territory_capitals_in_India\"\npage = urllib.request.urlopen(wiki)\n\n\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(page) \n\n\nright_table=soup.find('table', class_='wikitable sortable plainrowheaders')\n\nA=[]\nB=[]\nC=[]\nD=[]\nE=[]\nF=[]\nG=[]\n\n\nfor row in right_table.find_all(\"tr\"): \n cells = row.findAll('td')\n states=row.findAll('th') \n if len(cells)==6: \n A.append(cells[0].text)\n B.append(states[0].text)\n C.append(cells[1].text)\n D.append(cells[2].text)\n E.append(cells[3].text)\n F.append(cells[4].text)\n G.append(cells[5].text)\n \n \nimport pandas as pd\ndf=pd.DataFrame(A,columns=['Number'])\ndf['State/UT']=B\ndf['Admin_Capital']=C\ndf['Legislative_Capital']=D\ndf['Judiciary_Capital']=E\ndf['Year_Capital']=F\ndf['Former_Capital']=G\nprint(df)\n\n \n ", "sub_path": "web_scraping/scode.py", "file_name": "scode.py", "file_ext": "py", "file_size_in_byte": 1021, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 12, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 12, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 12, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "149658057", "text": "import json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport logging\nimport re\nimport string\nfrom itertools import groupby\nfrom telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)\nfrom telegram.ext import (Updater, CommandHandler, MessageHandler, Filters, RegexHandler,\n ConversationHandler)\nimport model, sample, encoder\n\n# Enable logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\nTALK, START, REPLYTALK, REPLYRAW = range(4)\n\nmy_name = ''\napikey = ''\nowner=''\npersistent_context=' '\nsample_context=''\n\n\nmodel_name='775M'\nseed=None\nnsamples=1\nlength=128 \ntemperature=1\ntop_k=16 \nbatch_size = 1 # leftovers\nassert nsamples % batch_size == 0\noutput = None\nsess = None\n\ndef compress(istring):\n return re.sub('[%s]' % string.digits, '', ''.join('%s%s' % (char, sum(1 for _ in group)) for char, group in groupby(istring)))\n\ndef start(update, context):\n update.message.reply_text(\n 'Send /cancel to stop talking to me.\\n\\n'\n 'Send /raw to enter raw queries.\\n\\n'\n 'Send /talk to enter context talk mode. \\n\\n'\n '')\n return START\n\n\ndef talk(update, context):\n logger.info(\"TALK mode started\")\n update.message.reply_text('TALK MODE START. enter your text or /cancel',\n reply_markup=ReplyKeyboardRemove())\n\n return REPLYTALK\n\ndef raw(update, context):\n\n logger.info(\"RAW mode started\")\n update.message.reply_text('RAW MODE START. enter your text or /cancel',\n reply_markup=ReplyKeyboardRemove())\n\n return REPLYRAW\n\ndef replytalk(update, context):\n # carry global vars\n global length\n global sess\n global output\n global model_name\n global batch_size\n global nsamples\n global tfcontext\n global output\n global sample_context\n global persistent_context\n global my_name\n\n user = update.message.from_user\n logger.info(\"REPLYTALK received of %s: %s\", user.first_name, update.message.text)\n\n if not sample_context:\n # initialize. try to inject some context to hint the model\n sample_context = 'Conversation of ' + my_name + ', and a person from internet called ' + user.first_name + '.\\n'\n sample_context = sample_context + persistent_context +'\\n\\n'\n sample_context = sample_context + my_name + ' - Hi ' + user.first_name + '\\n'\n\n raw_text = update.message.text\n sample_context = sample_context + user.first_name + ' - ' + raw_text + '\\n' \n \n enc = encoder.get_encoder(model_name)\n context_tokens = encoder.get_encoder(model_name).encode(sample_context)\n logger.info(\"sample_context: \" + sample_context )\n logger.info(\"sample_context_len: \" + str(len(context_tokens)))\n out = sess.run(output, feed_dict={ tfcontext: [context_tokens for _ in range(1)] })[:, len(context_tokens):]\n text = enc.decode(out[0])\n logger.info(\"Model run complete\")\n\n # parse the response somehow\n logger.info(\"model response\" + text)\n logger.info(\"first line response\" + text.split('\\n')[0])\n model_response_text = ''\n\n if len(text.split('\\n')[0]) < 5 or len(compress(text.split('\\n')[0])) < 5:\n model_response_text = text.split('\\n')[1].lstrip() #+ '\\n'\n else:\n model_response_text = text.split('\\n')[0].lstrip() #+ '\\n' \n\n logger.info(\"guessed response\" + model_response_text)\n\n # if model response starts with correspondent name...\n if (model_response_text.startswith(user.first_name)):\n # v002+ just look for the first line beginning with my name\n for line in text.split('\\n'):\n if line.startswith(my_name + ' - '):\n model_response_text = line.split('-')[1]\n logger.info(\"guessed response (2)\" + model_response_text)\n\n if '<|endoftext|>' in model_response_text:\n model_response_text = model_response_text.split('<|endoftext|>')[0]\n\n # sometimes my name is mentioned on line 1 need to clean that\n if model_response_text.startswith(my_name + ' - '):\n model_response_text = model_response_text.split(my_name + ' - ')[1]\n\n logger.info(\"final response \" + model_response_text)\n\n update.message.reply_text(model_response_text,\n reply_markup=ReplyKeyboardRemove())\n\n sample_context = sample_context + my_name + ' - ' + model_response_text + '\\n'\n\n # truncate the context\n linecount = 0\n count = 0\n for line in sample_context.splitlines():\n linecount += 1\n logger.info(\"ctx length \" + str(linecount) + \" \" + str(len(context_tokens)) + \" tokens\")\n if linecount > 30 or len(context_tokens) > 800:\n #sample_context_new = '';\n sample_context_new = persistent_context + '\\n\\n'\n for line in sample_context.splitlines():\n count += 1\n if count > (linecount - 30):\n sample_context_new = sample_context_new + line + '\\n'\n sample_context = sample_context_new\n\n return REPLYTALK\n\ndef replyraw(update, context):\n # carry global vars\n global length\n global sess\n global output\n global model_name\n global batch_size\n global nsamples\n global tfcontext\n global output\n\n\n\n user = update.message.from_user\n logger.info(\"RAW query received of %s: %s\", user.first_name, update.message.text)\n raw_text = update.message.text\n enc = encoder.get_encoder(model_name)\n context_tokens = encoder.get_encoder(model_name).encode(raw_text)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n tfcontext: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n logger.info(\"Model run complete\")\n logger.info(\"model response\" + text)\n update.message.reply_text(text,\n reply_markup=ReplyKeyboardRemove())\n return REPLYRAW\n\n\ndef cancel(update, context):\n global sample_context\n sample_context = ''\n user = update.message.from_user\n logger.info(\"User %s canceled the conversation.\", user.first_name)\n update.message.reply_text('Bye! I hope we can talk again some day.',\n reply_markup=ReplyKeyboardRemove())\n logger.info(\"sample_context: \" + sample_context)\n\n return ConversationHandler.END\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n\n\ndef main():\n\n # carry global vars\n global apikey\n global length\n global sess\n global output\n global length\n global sess\n global output\n global model_name\n global batch_size\n global nsamples\n global tfcontext\n global owner\n\n # Create the Updater and pass it your bot's token.\n # Make sure to set use_context=True to use the new context based callbacks\n # Post version 12 this will no longer be necessary\n updater = Updater(apikey, use_context=True)\n\n # Get the dispatcher to register handlers\n dp = updater.dispatcher\n\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start, Filters.user(username=owner))],\n\n states={\n TALK: [MessageHandler(Filters.text, talk)],\n REPLYTALK: [MessageHandler(Filters.text, replytalk)],\n REPLYRAW: [MessageHandler(Filters.text, replyraw)]\n },\n\n fallbacks=[CommandHandler('cancel', cancel),\n CommandHandler('raw', raw),\n CommandHandler('talk', talk)]\n )\n\n dp.add_handler(conv_handler)\n dp.add_error_handler(error)\n\n \n # initialize the model, etc\n hparams = model.default_hparams()\n with open(os.path.join('models', model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n if length is None:\n length = hparams.n_ctx // 2\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n with tf.Session(graph=tf.Graph()) as sess:\n tfcontext = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=tfcontext,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k\n )\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))\n saver.restore(sess, ckpt)\n logger.info(\"Model initialized\")\n # Start the Bot\n updater.start_polling()\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n updater.idle()\n\n\nif __name__ == '__main__':\n main()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9060, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 41, "usage_type": "call"}, {"api_name": "string.digits", "line_number": 41, "usage_type": "attribute"}, {"api_name": "itertools.groupby", "line_number": 41, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 55, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 63, "usage_type": "call"}, {"api_name": "encoder.get_encoder", "line_number": 93, "usage_type": "call"}, {"api_name": "encoder.get_encoder", "line_number": 94, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 131, "usage_type": "call"}, {"api_name": "encoder.get_encoder", "line_number": 168, "usage_type": "call"}, {"api_name": "encoder.get_encoder", "line_number": 169, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 181, "usage_type": "call"}, {"api_name": "telegram.ReplyKeyboardRemove", "line_number": 191, "usage_type": "call"}, {"api_name": "telegram.ext.ConversationHandler.END", "line_number": 194, "usage_type": "attribute"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 194, "usage_type": "name"}, {"api_name": "telegram.ext.Updater", "line_number": 220, "usage_type": "call"}, {"api_name": "telegram.ext.ConversationHandler", "line_number": 225, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 226, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.user", "line_number": 226, "usage_type": "call"}, {"api_name": "telegram.ext.Filters", "line_number": 226, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 229, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 229, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 229, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 230, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 230, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 230, "usage_type": "name"}, {"api_name": "telegram.ext.MessageHandler", "line_number": 231, "usage_type": "call"}, {"api_name": "telegram.ext.Filters.text", "line_number": 231, "usage_type": "attribute"}, {"api_name": "telegram.ext.Filters", "line_number": 231, "usage_type": "name"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 234, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 235, "usage_type": "call"}, {"api_name": "telegram.ext.CommandHandler", "line_number": 236, "usage_type": "call"}, {"api_name": "model.default_hparams", "line_number": 244, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path", "line_number": 245, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 246, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.Graph", "line_number": 251, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 252, "usage_type": "call"}, {"api_name": "tensorflow.int32", "line_number": 252, "usage_type": "attribute"}, {"api_name": "numpy.random.seed", "line_number": 253, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 253, "usage_type": "attribute"}, {"api_name": "tensorflow.set_random_seed", "line_number": 254, "usage_type": "call"}, {"api_name": "sample.sample_sequence", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 261, "usage_type": "attribute"}, {"api_name": "tensorflow.train.latest_checkpoint", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 262, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}]} +{"seq_id": "543837287", "text": "from marshmallow import fields,validates, ValidationError\nfrom marshmallow_enum import EnumField\nfrom datetime import date\nfrom connections.extensions import ma\nfrom connections.models.connection import Connection, ConnectionType\nfrom connections.models.person import Person\n\n\nclass BaseModelSchema(ma.ModelSchema):\n def __init__(self, strict=True, **kwargs):\n super().__init__(strict=strict, **kwargs)\n\n\n# date of birth should be less than today's date\n# There is not validation for email. Added validation\nclass PersonSchema(BaseModelSchema):\n email = fields.Email()\n date_of_birth = fields.Date()\n @validates(\"date_of_birth\")\n def validate_date_of_birth(self,value):\n if date.today() < value:\n raise ValidationError(\"Cannot be in the future.\")\n\n class Meta:\n model = Person\n load_instance = True \n\n\n\nclass ConnectionSchema(BaseModelSchema):\n from_person_id = fields.Integer()\n to_person_id = fields.Integer()\n connection_type = EnumField(ConnectionType)\n\n class Meta:\n model = Connection\n load_instance = True \n", "sub_path": "connections/schemas.py", "file_name": "schemas.py", "file_ext": "py", "file_size_in_byte": 1097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "connections.extensions.ma.ModelSchema", "line_number": 9, "usage_type": "attribute"}, {"api_name": "connections.extensions.ma", "line_number": 9, "usage_type": "name"}, {"api_name": "marshmallow.fields.Email", "line_number": 17, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "marshmallow.fields.Date", "line_number": 18, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 21, "usage_type": "name"}, {"api_name": "marshmallow.ValidationError", "line_number": 22, "usage_type": "call"}, {"api_name": "marshmallow.validates", "line_number": 19, "usage_type": "call"}, {"api_name": "connections.models.person.Person", "line_number": 25, "usage_type": "name"}, {"api_name": "marshmallow.fields.Integer", "line_number": 31, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 31, "usage_type": "name"}, {"api_name": "marshmallow.fields.Integer", "line_number": 32, "usage_type": "call"}, {"api_name": "marshmallow.fields", "line_number": 32, "usage_type": "name"}, {"api_name": "marshmallow_enum.EnumField", "line_number": 33, "usage_type": "call"}, {"api_name": "connections.models.connection.ConnectionType", "line_number": 33, "usage_type": "argument"}, {"api_name": "connections.models.connection.Connection", "line_number": 36, "usage_type": "name"}]} +{"seq_id": "69090535", "text": "\"\"\"\nA script to apply the sliding window approach on input time series data. Creates fully prepared datasets for analysis.\n\n@author: Milena Bajic (DTU Compute)\n\"\"\"\n\n\nimport sys,os, glob, time\nimport subprocess\nimport argparse\nimport pickle\nimport random\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport psutil\n\nclass Window_dataset():\n\n def __init__(self, input_dir, filestring, win_size = 2, out_dir = '', is_test = False, df_i_min = 0):\n\n # Initial processing time\n t0=time.time()\n\n # Get from input\n self.input_dir = input_dir\n self.out_dir = out_dir\n self.filestring = filestring\n self.win_size = win_size\n self.test = is_test\n\n # Create output dir for this filetype \n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n \n # Load pickle\n self.input_dataframe = self.load_pickle(input_dir, filestring)\n \n # Change dtypes for df to save RAM\n #print('Initial dtypes: ', self.input_dataframe.dtypes)\n self.input_dataframe = self.input_dataframe.astype({'defect_width': np.float16, 'defect_height': np.float16, 'speed':np.float16})\n #sys.exit(0)\n \n # Remove rows with 0 points recorded, n_points[s] = 3.6*fs*defect_width/v[km/h]\n print('Is test: {0}'.format(is_test))\n if self.test:\n self.input_dataframe = self.remove_samples_with_zero_counts(self.input_dataframe).head(100)\n self.n_split_rows_length = 20\n else:\n self.input_dataframe = self.remove_samples_with_zero_counts(self.input_dataframe)\n self.n_split_rows_length = 1000\n\n # Take only needed columns\n self.input_columns = ['time','distance','speed', 'acceleration', 'severity', 'type', 'defect_width', 'defect_height']\n self.deciding_column = 'type'\n self.filestring = self.filestring\n self.input_dataframe = self.input_dataframe[self.input_columns]\n\n # Get scaler\n scaler_filename = '/'.join(self.out_dir.split('/')[0:-1])+'/train_scaler_speed.pt'\n if os.path.exists(scaler_filename):\n scaler = pickle.load(open(scaler_filename, 'rb'))\n elif self.filestring == 'train':\n print('Getting train scaler')\n speed = self.input_dataframe['speed'].to_numpy()\n speed = speed.reshape(-1,1)\n scaler = MinMaxScaler().fit(speed)\n pickle.dump(scaler, open(scaler_filename, 'wb'))\n \n # Scale speed \n speed = self.input_dataframe['speed'].to_numpy()\n speed = speed.reshape(-1,1)\n self.input_dataframe['scaled_speed'] = scaler.transform(speed)\n self.input_dataframe = self.input_dataframe.astype({'scaled_speed':np.float16})\n \n # Window columns to save\n self.window_columns = [col for col in self.input_dataframe.columns if col not in ['distance','type','time']]\n self.window_columns.append('window_class')\n \n # Split a very large input df into smaller ones to process part by part (fit into RAM more easily)\n self.n_input_rows = self.input_dataframe.shape[0]\n self.last_split = int(self.n_input_rows/self.n_split_rows_length)\n self.index_list = [n*self.n_split_rows_length for n in range(1,self.last_split+1)]\n self.split_input_dataframes = np.split(self.input_dataframe, self.index_list)\n self.n_splits = len(self.split_input_dataframes)\n print('Number of split dataframes: {0}'.format(self.n_splits))\n\n for df_i, df in list(enumerate(self.split_input_dataframes)):\n \n if (df_i_min<=df_i Passing df: ',df_i)\n df.reset_index(inplace=True, drop=True)\n self.make_sliding_window_df(df_i, df)\n \n dt = round(time.time()-t0,1)\n print('Time to process: {0} s'.format(dt))\n\n def load_pickle(self, input_dir, string):\n filename = '{0}/{1}_scaled.pkl'.format(input_dir, string)\n print('Loading: {0}'.format(filename))\n with open(filename, \"rb\") as f:\n df = pickle.load(f)\n return df\n\n def remove_samples_with_zero_counts(self, input_dataframe):\n # Remove samples with too narrow defects so there is no point \"caught\" in type and severity\n input_dataframe['keep'] = input_dataframe.type.apply(lambda row: np.count_nonzero(row)>0)\n input_dataframe = input_dataframe[ input_dataframe['keep']==True ]\n input_dataframe.drop(['keep'],axis=1, inplace = True)\n input_dataframe.reset_index(drop=True, inplace=True)\n return input_dataframe\n\n\n def make_sliding_window_df(self, df_i, input_dataframe_part):\n # Making sliding window (each window: constant in distance, variable length, slide by 1 point)\n \n print('Making sliding window')\n window_df = pd.DataFrame([], columns = self.window_columns)\n \n # Fill Dataframe with windows from initial one\n for index, row in input_dataframe_part.iterrows():\n if (index%100==0):\n print('Processing input df row: {0}/{1}'.format(index,input_dataframe_part.shape[0]))\n #print('Window_df memory usage: ',window_df.info(verbose=False, memory_usage=\"deep\"))\n ram_per = psutil.virtual_memory().percent\n #print('Used RAM[%]: ',ram_per)\n row_df = self.make_sliding_window_row(row)\n window_df = window_df.append(row_df)\n\n window_df.reset_index(inplace=True, drop=True)\n\n # Save pickle\n self.save_pickle(window_df, self.out_dir, self.filestring+'_'+ str(df_i))\n return\n\n def make_sliding_window_row(self, row):\n row_df = pd.DataFrame([], columns = self.window_columns)\n\n end_index = np.where(row.distance > 100 - self.win_size )[0][0]-1 #\n #print(end_index, row.distance[end_index]) # end index in the whole row (so the last sample is 2m)\n\n # Loop over the windows\n for i in range(0, end_index+1):\n try:\n # Get min and max index of this window\n window_start_meters= row.distance[i]\n window_end_meters= window_start_meters + self.win_size\n window_end_index = np.where(row.distance>window_end_meters)[0][0]\n #print(i, window_end_index, window_start_meters, window_end_meters)\n\n # If the window is fully flat, add with a small prob. equal to how probable is each defect\n window_is_flat = np.all(row[self.deciding_column][i: window_end_index]==0)\n remove_window = False\n if window_is_flat:\n remove_window = random.randrange(100)<99 # keep with 2% probability\n if remove_window:\n continue\n\n # Put this window into row df data\n for col in self.window_columns:\n if col=='window_class': # compute window class column\n unique_classes = np.unique(row['type'][i: window_end_index]) #possible are only 1-label windows or windows with 0 (no defect) and 1 defect\n if len(unique_classes)==1:\n row_df.at[i,col] = unique_classes[0]\n elif len(unique_classes)==2:\n row_df.at[i,col] = list(filter(lambda c: c!=0, unique_classes ))[0]\n else:\n raise Error(\"More than 1 defect per window not implemented.\")\n elif isinstance(row[col],np.ndarray): # fill numpy array columns\n row_df.at[i,col] = row[col][i: window_end_index].astype(np.float16)\n else:\n row_df.at[i,col] = row[col] #float or string, just repeat\n #print('....Row_df memory usage: ',row_df.info(verbose=False, memory_usage=\"deep\"))\n except:\n pass\n return row_df\n \n def pickle_exists(self, out_dir, filestring):\n pickle_name = out_dir+'/'+ filestring+'_windows.pkl'\n if os.path.exists(pickle_name):\n return True\n else:\n return False\n \n def save_pickle(self, df, out_dir, df_type):\n print('Saving {0} as pickle.'.format(df_type))\n pickle_name = out_dir+'/'+df_type+'_windows.pkl'\n df = df.astype({'defect_width': np.float16, 'defect_height': np.float16, 'speed':np.float16, 'scaled_speed':np.float16, 'window_class': np.int16})\n #dtypes = df.dtypes\n #print(dtypes)\n df.to_pickle(pickle_name)\n print('Wrote output file to: ',pickle_name)\n return\n\n\n\n#===============================#\n# ============================= #\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Please provide command line arguments.')\n parser.add_argument('--is_test', action='store_true', \n help = 'If test is true, will process 100 rows only (use for testing purposes).') #store_true sets default to False \n parser.add_argument('--window_size', default = 5, type=int,\n help = 'Window size.')\n parser.add_argument('--input_dir', default = '/dtu-compute/mibaj/Golden-car-simulation-August-2020/train-val-test-normalized',\n help = 'Input directory.')\n parser.add_argument('--output_dir_base', default = '/dtu-compute/mibaj/Golden-car-simulation-August-2020',\n help='Directory base where a new directory with output files will be created.')\n parser.add_argument('--df_i_min', default = 0, type=int, help = 'df i minimum')\n\n\n # Parse arguments\n args = parser.parse_args()\n input_dir = args.input_dir\n output_dir = args.output_dir_base\n is_test = args.is_test\n window_size = args.window_size\n df_i_min = args.df_i_min\n \n # Print configuration\n print('Window_size: {0}'.format(window_size))\n print('Is test: {0}'.format(is_test))\n \n #for filetype in ['train','valid','test']:\n for filetype in ['valid']:\n print('Processing: {0}'.format(filetype))\n \n # Make output directory\n out_dir = '{0}/train-val-test-normalized-split-into-windows-size-{1}'.format(output_dir, window_size)\n print('Output directory: {0}'.format(out_dir))\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n # Process\n # ======#\n result = Window_dataset(input_dir, filetype, win_size = window_size, out_dir = out_dir + '/'+str(filetype), is_test = is_test, df_i_min = df_i_min)\n\n", "sub_path": "data_preparation_modules/transform_to_window_dataset_valid.py", "file_name": "transform_to_window_dataset_valid.py", "file_ext": "py", "file_size_in_byte": 11025, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 63, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 68, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 85, "usage_type": "call"}, {"api_name": "time.time", "line_number": 104, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 116, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 127, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 134, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.all", "line_number": 161, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 178, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 179, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.float16", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 197, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 243, "usage_type": "call"}]} +{"seq_id": "349075617", "text": "import unittest\n\nfrom ibm_watson_machine_learning.helpers import DataConnection, CP4DAssetLocation, FSLocation, ConnectionAsset, ConnectionAssetLocation\nfrom ibm_watson_machine_learning.tests.utils import get_cos_credentials, get_wml_credentials, print_test_separators\nfrom ibm_watson_machine_learning.experiment import AutoAI\nfrom ibm_watson_machine_learning.experiment.autoai.optimizers.local_auto_pipelines import LocalAutoPipelines\nfrom lale.operators import TrainedPipeline\nfrom ibm_watson_machine_learning.deployment import WebService\nfrom ibm_watson_machine_learning.utils.autoai.enums import PipelineTypes\nfrom sklearn.pipeline import Pipeline\nimport pandas as pd\n\n\nclass TestLocalOptimizerCPD(unittest.TestCase):\n path = 'bank.csv'\n pipeline_model_auth_endpoint = None\n metadata = None\n metadata_auth_endpoint = None\n pipeline_name = 'Pipeline_1'\n # training_status = 'auto_ml/4f4da88c-3a16-4450-9d3c-7bb3017164ae/wml_data/cf660105-7d43-480c-8ea1-b3d532386ab5/training-status.json'\n training_status = 'auto_ml/auto_ml.4392c2a1-4270-4b01-84f1-80a9b598e5a3/wml_data/00727c93-9fd3-4b5f-bc1a-8223ef4ac1ac/training-status.json'\n # model_location = 'auto_ml/4f4da88c-3a16-4450-9d3c-7bb3017164ae/wml_data/cf660105-7d43-480c-8ea1-b3d532386ab5/data/automl/global_output/Pipeline0/model.pickle'\n model_location = 'auto_ml/auto_ml.4392c2a1-4270-4b01-84f1-80a9b598e5a3/wml_data/00727c93-9fd3-4b5f-bc1a-8223ef4ac1ac/data/automl/global_output/Pipeline0/model.pickle'\n\n local_optimizer: 'LocalAutoPipelines' = None\n connection_id = '66d1d6c9-aa49-4583-b020-f1d7eaf416c1'\n project_id = 'acdee0a5-104d-497e-b6bd-d37601e072f4'\n space_id = 'c6f02cdc-ca64-4285-b248-b5ad788be456'\n\n data_location = './autoai/data/bank.csv'\n data = None\n X = None\n y = None\n\n @classmethod\n def setUp(cls) -> None:\n cls.wml_credentials = get_wml_credentials()\n cls.data = pd.read_csv(cls.data_location)\n cls.X = cls.data.drop(['y'], axis=1)\n cls.y = cls.data['y']\n\n @print_test_separators\n def test_01a_get_optimizer(self):\n print(\"Initializing DataConnections...\")\n\n # training_data_reference = [DataConnection(\n # location=CP4DAssetLocation(asset_id=self.asset_id)\n # )]\n\n training_data_reference = [DataConnection(\n connection=ConnectionAsset(connection_id='66d1d6c9-aa49-4583-b020-f1d7eaf416c1'),\n location=ConnectionAssetLocation(bucket='somebucket2', file_name='bank.csv'))]\n\n training_result_reference = DataConnection(\n location=FSLocation(\n # path='projects/4ae1998c-87be-49df-9baf-c79ce3edbd4b/assets/auto_ml/auto_ml.190f5e37-59f3-4d95-9749-a49b6caf6da8/wml_data/4667839f-ae61-419d-a42c-adb6b70df380/data/automl',\n path='/projects/acdee0a5-104d-497e-b6bd-d37601e072f4/assets/auto_ml/auto_ml.4392c2a1-4270-4b01-84f1-80a9b598e5a3/wml_data/00727c93-9fd3-4b5f-bc1a-8223ef4ac1ac/data/automl',\n ))\n\n TestLocalOptimizerCPD.metadata = dict(\n training_data_reference=training_data_reference,\n training_result_reference=training_result_reference,\n prediction_type='classification',\n prediction_column='y',\n test_size=0.2,\n scoring='roc_auc',\n max_number_of_estimators=1,\n )\n\n print(\"Initializing AutoAI local scenario with metadata...\")\n\n TestLocalOptimizerCPD.local_optimizer = AutoAI(\n wml_credentials=self.wml_credentials,\n project_id=self.project_id\n ).runs.get_optimizer(\n metadata=self.metadata\n )\n\n @print_test_separators\n def test_02_get_pipeline(self):\n print(\"AUTH: Fetching and store pipeline by name, LALE...\")\n\n pipeline_lale = self.local_optimizer.get_pipeline(pipeline_name=self.pipeline_name, persist=True)\n self.assertIsInstance(pipeline_lale, TrainedPipeline, msg=\"Loaded model should be of type TrainedPipeline\")\n\n print(\"AUTH: Fetching pipeline by name, SKLEARN...\")\n\n pipeline_skl = self.local_optimizer.get_pipeline(pipeline_name=self.pipeline_name, astype=PipelineTypes.SKLEARN)\n self.assertIsInstance(pipeline_skl, Pipeline, msg=\"Loaded model should be of type SKLEARN\")\n\n print(\"AUTH: Fetching best pipeline, LALE...\")\n\n try:\n TestLocalOptimizerCPD.pipeline_model_auth_endpoint = self.local_optimizer.get_pipeline()\n except Exception as e:\n self.assertIsInstance(self.pipeline_model_auth_endpoint, TrainedPipeline,\n msg=f\"Cannot load best pipeline model, Error: {e}\")\n\n @print_test_separators\n def test_03_get_params__fetch_dict_with_parameters(self):\n print(\"AUTH: Getting optimizer parameters...\")\n\n params = self.local_optimizer.get_params()\n print(params)\n\n @print_test_separators\n def test_04_fit__not_implemented(self):\n print(\"AUTH: Calling fit() method...\")\n\n with self.assertRaises(NotImplementedError, msg=\"\\\"fit\\\" should raise NotImplementedError\"):\n self.local_optimizer.fit(X=self.X, y=self.y)\n\n @print_test_separators\n def test_05_summary__return_data_frame_with_summary(self):\n print(\"AUTH: Fetching summary of experiment...\")\n summary = self.local_optimizer.summary()\n print(summary)\n\n @print_test_separators\n def test_06_get_pipeline_details(self):\n print(\"AUTH: Fetching best pipeline details...\")\n\n best_pipeline_details = self.local_optimizer.get_pipeline_details()\n print(f\"best pipeline details: {best_pipeline_details}\")\n\n print(\"AUTH: Fetching pipeline details...\")\n\n pipeline_details = self.local_optimizer.get_pipeline_details(pipeline_name=self.pipeline_name)\n print(f\"pipeline details: {pipeline_details}\")\n\n @print_test_separators\n def test_07_predict__on_best_pipeline(self):\n print(\"AUTH: Calling predict on the best pipeline...\")\n\n predictions = self.local_optimizer.predict(X=self.X)\n print(predictions)\n\n @print_test_separators\n def test_08_get_data_connections__return_training_data_connection_and_recreate_holdout_split(self):\n print(\"AUTH: Reading training data with holdout split...\")\n\n training_df, holdout_df = self.local_optimizer.get_data_connections()[0].read(with_holdout_split=True)\n print(f\"Training data frame: {training_df}\")\n print(f\"Holdout data frame: {holdout_df}\")\n\n @print_test_separators\n def test_09_deploy_model_from_object(self):\n print(\"AUTH: Deploying model from object...\")\n\n service = WebService(\n source_wml_credentials=self.wml_credentials,\n source_project_id=self.project_id,\n target_wml_credentials=self.wml_credentials,\n target_space_id=self.space_id)\n\n service.create(\n model=self.pipeline_model_auth_endpoint,\n metadata=self.metadata,\n deployment_name=\"Test deployment from auto-gen notebook\"\n )\n\n print(service)\n print(service.get_params())\n predictions = service.score(payload=self.X.iloc[:10])\n print(predictions)\n print(service.list())\n print(service.delete())\n print(service.list())\n\n @print_test_separators\n def test_10_deploy_model_from_pipeline_name(self):\n print(\"AUTH: Deploying model from pipeline name...\")\n\n service = WebService(\n source_wml_credentials=self.wml_credentials,\n source_project_id=self.project_id,\n target_wml_credentials=self.wml_credentials,\n target_space_id=self.space_id)\n\n service.create(\n model=self.pipeline_name,\n metadata=self.metadata,\n deployment_name=\"Test deployment from auto-gen notebookS\"\n )\n\n print(service)\n print(service.get_params())\n predictions = service.score(payload=self.X.iloc[:10])\n print(predictions)\n print(service.list())\n print(service.delete())\n print(service.list())\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "venv/Lib/site-packages/ibm_watson_machine_learning/tests/autoai/fvt/local_optimizer_for_CP4D35.py", "file_name": "local_optimizer_for_CP4D35.py", "file_ext": "py", "file_size_in_byte": 8141, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "ibm_watson_machine_learning.tests.utils.get_wml_credentials", "line_number": 37, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.helpers.DataConnection", "line_number": 50, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.helpers.ConnectionAsset", "line_number": 51, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.helpers.ConnectionAssetLocation", "line_number": 52, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.helpers.DataConnection", "line_number": 54, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.helpers.FSLocation", "line_number": 55, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.experiment.AutoAI", "line_number": 72, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 42, "usage_type": "name"}, {"api_name": "lale.operators.TrainedPipeline", "line_number": 84, "usage_type": "argument"}, {"api_name": "ibm_watson_machine_learning.utils.autoai.enums.PipelineTypes.SKLEARN", "line_number": 88, "usage_type": "attribute"}, {"api_name": "ibm_watson_machine_learning.utils.autoai.enums.PipelineTypes", "line_number": 88, "usage_type": "name"}, {"api_name": "sklearn.pipeline.Pipeline", "line_number": 89, "usage_type": "argument"}, {"api_name": "lale.operators.TrainedPipeline", "line_number": 96, "usage_type": "argument"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 79, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 99, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 106, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 113, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 119, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 131, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 138, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.deployment.WebService", "line_number": 150, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 146, "usage_type": "name"}, {"api_name": "ibm_watson_machine_learning.deployment.WebService", "line_number": 174, "usage_type": "call"}, {"api_name": "ibm_watson_machine_learning.tests.utils.print_test_separators", "line_number": 170, "usage_type": "name"}, {"api_name": "unittest.main", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "591005288", "text": "from xlrd import open_workbook\n# -*- coding: utf-8 -*-\nimport numpy\nimport sys\nimport scipy;\nreload(sys)\nall=[]\nallUsers=[]\nselTags=[]\nallTags=[]\ncounter=0\ntagsPropagationUsers=[]\ntagsPropagationTimes=[]\nfinalTagsPropagationUsers=[]\nfinalTagsPropagationTimes=[]\nfollows=[]\nlastTags=[]\nlastTimeMean=[]\nlastTimeLast=[]\nlastExpectedFk={}\nlastUsers=[]\nlastTimeStamps=[]\nisFourWord={}\nseen=[]\nseenUsers=[]\ntimeFormat=3600\nunderScoreNum=3\nsys.setdefaultencoding('UTF8')\nbook1 = open_workbook('filtered-user-whole.xlsx')\nsheet1 = book1.sheet_by_name('Sheet1')\nbook2 = open_workbook('tags-frequencies.xlsx')\nsheet2 = book2.sheet_by_name('Sheet1');\noutputFile = open(\"meme-four-word-fan-hour.txt\", 'w');\nnetwork=open(\"Network-four-word-fan-hour.txt\",'w');\nfor line in open(\"users-mapping-whole.txt\"):\n allUsers.append(int(line.split(',')[0]))\nfor row_index in range(sheet2.nrows):\n if((str)(sheet2.cell(row_index, 0).value).count('_') >= underScoreNum):\n selTags.append((str)(sheet2.cell(row_index, 0).value))\nfor row_index in range(sheet1.nrows):\n time=int(sheet1.cell(row_index, 0).value)\n user=int(sheet1.cell(row_index, 1).value)\n tags=((str)(sheet1.cell(row_index, 2).value).split(','))\n for tag in tags:\n if(tag in selTags):\n if str(tag).encode('UTF8') in allTags:\n index=allTags.index(str(tag).encode('UTF8'))\n if user in tagsPropagationUsers[index]:\n i=tagsPropagationUsers[index].index(int(user))\n tagsPropagationTimes[index][i]=min(tagsPropagationTimes[index][i],time)\n\n else:\n tagsPropagationUsers[index].append(int(user))\n tagsPropagationTimes[index].append(time)\n else:\n allTags.append(str(tag).encode('UTF8'))\n index=len(allTags)-1\n tagsPropagationUsers.append([])\n tagsPropagationUsers[index].append(int(user))\n tagsPropagationTimes.append([])\n tagsPropagationTimes[index].append(time)\nfor i in range(len(allTags)):\n users=numpy.array(tagsPropagationUsers[i])\n times=numpy.array(tagsPropagationTimes[i])\n inds=times.argsort()\n users=users[inds]\n times=times[inds]\n if(len(users) >=2 ):\n all=all+list(users)\n lastTags.append(allTags[i])\n lastUsers.append(list((users)))\n lastTimeStamps.append(list(times))\nall=list(set(all))\nfor usr in all:\n ind=allUsers.index(usr)\n outputFile.write(str(ind)+','+str(ind)+'\\n')\noutputFile.write('\\n')\nfor i in range(len(lastTags)):\n a=\"\"\n a=a+str(counter)+';'\n counter=counter+1\n for us in range(len(lastUsers[i])):\n indxs=allUsers.index(lastUsers[i][us])\n a = a +str(indxs) + \",\" + str((lastTimeStamps[i][us])/(timeFormat)) + \",\"\n a = a.strip(',');\n outputFile.write(a + '\\n');\noutputFile.close()\nfor row_index in range(sheet1.nrows):\n follows=str(sheet1.cell(row_index, 3).value).split(',')\n user=int(sheet1.cell(row_index, 1).value)\n if user not in seenUsers:\n seenUsers.append(user)\n if(user in all and user in allUsers):\n for f in follows:\n if int(f) in all and int(f) in allUsers and str(allUsers.index(int(f)))+\",\"+str(allUsers.index(user)) not in seen:\n for h in range(len(lastTags)):\n if int(f) in lastUsers[h] and user in lastUsers[h]:\n indxFol=lastUsers[h].index(int(f))\n indxUser=lastUsers[h].index(user)\n if lastTimeStamps[indxFol] <= lastTimeStamps[indxUser]:\n tmp=str(allUsers.index(int(f)))+\",\"+str(allUsers.index(user))\n network.write(tmp+\"\\n\")\n seen.append(tmp)\n break\nnetwork.close()\n", "sub_path": "codes/memeFourWordFan.py", "file_name": "memeFourWordFan.py", "file_ext": "py", "file_size_in_byte": 3724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.setdefaultencoding", "line_number": 28, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 29, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "224787325", "text": "#!/usr/bin/env python\n'''\n Copyright (C) 2017 by Christopher Cooper\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n THE SOFTWARE.\n'''\n\"\"\"\nThis script reads Tinker input (xyz and key files)\nand spits out a xyzr file, readeable by msms\n\"\"\"\n\nimport sys\nimport numpy\nfrom numpy import *\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser(description='Read in Tinker and output xyzr')\nparser.add_argument('--solute', action='store_true',\n help=\"Set if vdw radii will be read from the SOLUTE keyword\")\nparser.add_argument('-f', '--file', dest='file_in', type=str, default='',\n help='Filename of xyz and key files')\n\n#file_in = sys.argv[1]\nfile_xyz = parser.parse_args().file_in+'.xyz'\nfile_key = parser.parse_args().file_in+'.key'\n\nif parser.parse_args().solute:\n file_out = parser.parse_args().file_in+'_solute.xyzr'\nelse:\n file_out = parser.parse_args().file_in+'.xyzr'\n\n\nwith open(file_xyz, 'r') as f:\n N = int(f.readline().split()[0])\n\nx = numpy.zeros(N)\ny = numpy.zeros(N)\nz = numpy.zeros(N)\nr = numpy.zeros(N)\natom_type = numpy.chararray(N, itemsize=10)\n\ni = 0\nheader = 0\nfor line in file(file_xyz):\n line = line.split()\n\n if header==1:\n x[i] = numpy.float64(line[2])\n y[i] = numpy.float64(line[3])\n z[i] = numpy.float64(line[4])\n atom_type[i] = line[5]\n i+=1\n\n header = 1\n\natom_class = {}\nvdw_radii = {}\n\nwith open(file_key, 'r') as f:\n line = f.readline().split()\n if line[0]=='parameters':\n file_key = line[1]\n print ('Reading parameters from '+file_key)\n\nfor line in file(file_key):\n line = line.split()\n\n if len(line)>0:\n if line[0].lower()=='atom':\n atom_class[line[1]] = line[2]\n\n if parser.parse_args().solute and line[0].lower()=='solute': \n vdw_radii[line[1]] = numpy.float64(line[4])/2.\n\n if line[0].lower()=='vdw' and line[1] not in vdw_radii:\n vdw_radii[line[1]] = numpy.float64(line[2])/2.\n \nfor i in range(N):\n r[i] = vdw_radii[atom_class[atom_type[i]]] \n \ndata = numpy.zeros((N,4))\n\ndata[:,0] = x[:]\ndata[:,1] = y[:]\ndata[:,2] = z[:]\ndata[:,3] = r[:]\n\nnumpy.savetxt(file_out, data, fmt='%5.6f')\n", "sub_path": "scripts/tinker_to_xyzr.py", "file_name": "tinker_to_xyzr.py", "file_ext": "py", "file_size_in_byte": 3203, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.chararray", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 104, "usage_type": "call"}]} +{"seq_id": "543260130", "text": "#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n# author:yuyi\n# datetime:2019/9/23 13:49\nimport numpy as np\nimport copy\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport itertools\nimport copy\n\ndef fast_test(pred, train_label, hidden_in, hidden_out, top_k=1):\n '''\n :param pred: [n,n] matrix, p(i,j) means (i->j)'s probability\n :param train_label: [n,n] matrix\n :param hidden_in:[m,2] list, each row means a edge from hidden_in[i,0] to hidden_in[i,1]\n :param hidden_out: [m,2] list, each row means a edge from hidden_out[i,0] to hidden_out[i,1]\n :param top_k: check top k scores to see if the link in the\n :return:\n hidden_in_recall: float, means the recall rate in hidden in link\n hidden_out_recall: float, means the recall rate in hidden out link\n '''\n\n assert len(hidden_in) > 0\n assert len(hidden_out) > 0\n\n num_node = train_label.shape[0]\n # 将在train label中的边设为0\n pred[np.where(train_label > 0)] = 0\n # 处理hidden out link\n out_have_index = np.array(hidden_out)[:, 0]\n # 删除没有 hidden out的节点\n hidden_out_label = np.array(hidden_out)[:, 1].reshape(-1, 1)\n pred_out = pred[out_have_index]\n\n # 升序排列\n out_argsort = np.argsort(-pred_out, axis=1)\n out_rank = np.where(out_argsort == hidden_out_label)[1]\n recall_out = np.zeros((top_k))\n for i in out_rank:\n if i < top_k:\n recall_out[i:] += 1\n\n # 处理hidden in link\n in_have_index = np.array(hidden_in)[:, 1]\n # in_delete_index = set(range(num_node)) - in_have_index\n # 删除没有hidden in 的节点\n hidden_in_label = np.array(hidden_in)[:, 0].reshape(1, -1)\n pred_in = pred[:, in_have_index]\n # pred_in = np.delete(pred, list(in_delete_index), axis=1)\n # 升序排列\n in_argsort = np.argsort(-pred_in, axis=0)\n in_rank = np.where(in_argsort == hidden_in_label)[0]\n recall_in = np.zeros((top_k))\n for i in in_rank:\n if i j)'s probability\n :param train_label: [n,n] matrix\n :param hidden_in:[m,2] list, each row means a edge from hidden_in[i,0] to hidden_in[i,1]\n :param hidden_out: [m,2] list, each row means a edge from hidden_out[i,0] to hidden_out[i,1]\n :param top_k: check top k scores to see if the link in the\n :return:\n hidden_in_recall: float, means the recall rate in hidden in link\n hidden_out_recall: float, means the recall rate in hidden out link\n '''\n\n assert len(hidden_in) > 0\n assert len(hidden_out) > 0\n num_correct_in = np.zeros((top_k))\n\n for link_in in hidden_in:\n s_node = link_in[0]\n e_node = link_in[1]\n train_node = np.where(train_label[:, e_node] > 0)\n p = copy.deepcopy(pred[:, e_node])\n # exclude train link\n p[train_node[0]] = 0\n node_rank = np.argsort(p)[::-1]\n rank_k = np.where(node_rank == s_node)[0][0]\n if rank_k < top_k:\n num_correct_in[rank_k:] += 1\n\n num_correct_out = np.zeros((top_k))\n for link_out in hidden_out:\n s_node = link_out[0]\n e_node = link_out[1]\n train_node = np.where(train_label[s_node, :] > 0)\n p = copy.deepcopy(pred[s_node, :])\n # exclude train link\n p[train_node[0]] = 0\n node_rank = np.argsort(p)[::-1]\n rank_k = np.where(node_rank == e_node)[0][0]\n if rank_k < top_k:\n num_correct_out[rank_k:] += 1\n return num_correct_in/len(hidden_in), num_correct_out/len(hidden_out)\n\n\ndef plot_lines_fig(df,\n x_index=0,\n x_name='step',\n line_index=[1],\n line_name=['气温'],\n line_range=[26, 36],\n line_y_name='recall',\n if_loss=True,\n table_title='results',\n loss_index=1,\n loss_range=[0, 9]\n ):\n\n fig = plt.figure(figsize=(30, 8))\n ax1 = fig.add_subplot(111)\n num_day = df.shape[0]\n # 画 line\n\n ax1.set_ylim(line_range[0], line_range[1])\n for index, name in zip(line_index, line_name):\n ax1.plot(range(num_day), df.iloc[:, index].astype(float), label=name)\n index = [float(c) + 0.4 for c in range(num_day)]\n plt.xticks(index, df.iloc[:, x_index])\n for label in ax1.get_xticklabels()[::2]:\n label.set_visible(False)\n plt.xticks(rotation=-90)\n # 画 wether\n if if_loss:\n ax2 = ax1.twinx()\n ax2.set_ylim(loss_range[0], loss_range[1])\n ax2.plot(range(num_day), df.iloc[:, loss_index].astype(float), 'g*-', label='loss')\n # 显示坐标轴信息\n ax1.set_xlabel(x_name)\n plt.title(table_title)\n ax1.set_ylabel(line_y_name)\n if if_loss:\n ax2.set_ylabel('loss')\n ax2.legend(loc='upper right')\n ax1.legend(loc='upper left')\n\n plt.savefig('./results/' + table_title+'.png', dpi=300, bbox_inches='tight')\n plt.close('all')\n # plt.show()\n\n\ndef set_configs(**kwargs):\n configs = list()\n base_config = dict()\n base_config['learning_rate'] = 0.005\n base_config['epochs'] = 2000\n # base_config['epochs'] = 60\n base_config['hidden1'] = 32\n base_config['hidden2'] = 16\n base_config['weight_decay'] = 0.\n base_config['dataset'] = 'cora'\n base_config['if_BN'] = True\n base_config['features'] = 1\n base_config['weight_init'] = 'glorot' # 'truncated_normal'\n base_config['result_name'] = ''\n base_config['result_path'] = './results/' + base_config['result_name'] + '.csv'\n allowed_kwargs = {'learning_rate', 'weight_decay', 'if_BN'}\n for kwarg in kwargs.keys():\n assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg\n parameter_list = list(itertools.product(*kwargs.values()))\n for p_set in parameter_list:\n result_name = 'base_line_'\n for i, key in enumerate(kwargs.keys()):\n base_config[key] = p_set[i]\n result_name += '_' + key + '_%s' % str(p_set[i])\n base_config['result_name'] = result_name\n base_config['result_path'] = './results/' + base_config['result_name'] + '.csv'\n configs.append(copy.deepcopy(base_config))\n return configs\n\n\ndef plot_all(result_path, title_name):\n df = pd.read_csv(result_path)\n plot_lines_fig(df,\n x_index=0,\n x_name='step',\n line_index=list(range(1, 21)),\n line_name=df.columns[1:21],\n line_range=[0, 0.1],\n if_loss=True,\n loss_index=-1,\n loss_range=[0, 8],\n table_title='in_link_recall_loss_fig_' + title_name\n )\n plot_lines_fig(df,\n x_index=0,\n x_name='step',\n line_index=list(range(21, 41)),\n line_name=df.columns[21:41],\n line_range=[0, 0.3],\n if_loss=True,\n loss_index=-1,\n loss_range=[0, 8],\n table_title='out_link_recall_loss_fig_' + title_name\n )\n\n\nif __name__ == '__main__':\n # df = pd.read_csv('./results/base_line_lr_0.005_with_weight_init_glorot_weight_decay_5e-4__6.csv')\n # plot_lines_fig(df,\n # x_index=0,\n # x_name='step',\n # line_index=list(range(1, 21)),\n # line_name=df.columns[1:21],\n # line_range=[0, 0.1],\n # if_loss=True,\n # loss_index=-1,\n # loss_range=[0, 8],\n # table_title='in link recall loss fig with glorot weight decay 5e-4 retry6'\n # )\n # plot_lines_fig(df,\n # x_index=0,\n # x_name='step',\n # line_index=list(range(21, 41)),\n # line_name=df.columns[21:41],\n # line_range=[0, 0.3],\n # if_loss=True,\n # loss_index=-1,\n # loss_range=[0, 8],\n # table_title='out link recall loss fig with glorot weight decay 5e-4 retry6'\n # )\n configs = set_configs(weight_decay=[1e-4, 5e-4], weight_init=['truncated_normal', 'glorot'])\n print(1)\n", "sub_path": "model_4/gae/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 8492, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.where", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 76, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 81, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 94, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 168, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "19554975", "text": "\r\nimport bpy\r\nfrom math import radians\r\nfrom mathutils import Matrix\r\ndir=\"home/randall/Model_Name/\"\r\nmodel=\"Model_Name\"\r\nobj = bpy.context.active_object\r\nstep_count = 36\r\nstep_count1 = 18\r\nfor step in range(0,step_count):\r\n\tbpy.data.scenes[\"Scene\"].render.filepath=dir+model+\"_\"+str(step*10)+\"_\"+str(0)+\".png\"\r\n\tbpy.ops.render.render(write_still=True)\r\n\tfor step1 in range(0,step_count1):\r\n\t\trot_mat1 = Matrix.Rotation(radians(10), 4, 'Y')\r\n\t\torig_loc1, orig_rot1, orig_scale1 = obj.matrix_world.decompose()\r\n\t\torig_loc_mat1 = Matrix.Translation(orig_loc1)\r\n\t\torig_rot_mat1 = orig_rot1.to_matrix().to_4x4()\r\n\t\torig_scale_mat1 = Matrix.Scale(orig_scale1[0],4,(1,0,0)) @ Matrix.Scale(orig_scale1[1],4,(0,1,0)) @ Matrix.Scale(orig_scale1[2],4,(0,0,1))\r\n\t\tobj.matrix_world = orig_loc_mat1 @ rot_mat1 @ orig_rot_mat1 @ orig_scale_mat1\r\n\t\tbpy.data.scenes[\"Scene\"].render.filepath=dir+model+\"_\"+str(step*10)+\"_\"+str((step1*10)+10)+\".png\"\r\n\t\tbpy.ops.render.render(write_still=True)\r\n\trot_mat1 = Matrix.Rotation(radians(180), 4, 'Y')\r\n\torig_loc1, orig_rot1, orig_scale1 = obj.matrix_world.decompose()\r\n\torig_loc_mat1 = Matrix.Translation(orig_loc1)\r\n\torig_rot_mat1 = orig_rot1.to_matrix().to_4x4()\r\n\torig_scale_mat1 = Matrix.Scale(orig_scale1[0],4,(1,0,0)) @ Matrix.Scale(orig_scale1[1],4,(0,1,0)) @ Matrix.Scale(orig_scale1[2],4,(0,0,1))\r\n\tobj.matrix_world = orig_loc_mat1 @ rot_mat1 @ orig_rot_mat1 @ orig_scale_mat1\r\n\trot_mat = Matrix.Rotation(radians(10), 4, 'X')\r\n\torig_loc, orig_rot, orig_scale = obj.matrix_world.decompose()\r\n\torig_loc_mat = Matrix.Translation(orig_loc)\r\n\torig_rot_mat = orig_rot.to_matrix().to_4x4()\r\n\torig_scale_mat = Matrix.Scale(orig_scale[0],4,(1,0,0)) @ Matrix.Scale(orig_scale[1],4,(0,1,0)) @ Matrix.Scale(orig_scale[2],4,(0,0,1))\r\n\tobj.matrix_world = orig_loc_mat @ rot_mat @ orig_rot_mat @ orig_scale_mat\r\n\r\n\r\n", "sub_path": "Blender/build/build_prior_lib.py", "file_name": "build_prior_lib.py", "file_ext": "py", "file_size_in_byte": 1835, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "bpy.context", "line_number": 7, "usage_type": "attribute"}, {"api_name": "bpy.data", "line_number": 11, "usage_type": "attribute"}, {"api_name": "bpy.ops.render.render", "line_number": 12, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 12, "usage_type": "attribute"}, {"api_name": "mathutils.Matrix.Rotation", "line_number": 14, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 14, "usage_type": "name"}, {"api_name": "math.radians", "line_number": 14, "usage_type": "call"}, {"api_name": "mathutils.Matrix.Translation", "line_number": 16, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 16, "usage_type": "name"}, {"api_name": "mathutils.Matrix.Scale", "line_number": 18, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 18, "usage_type": "name"}, {"api_name": "bpy.data", "line_number": 20, "usage_type": "attribute"}, {"api_name": "bpy.ops.render.render", "line_number": 21, "usage_type": "call"}, {"api_name": "bpy.ops", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mathutils.Matrix.Rotation", "line_number": 22, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 22, "usage_type": "name"}, {"api_name": "math.radians", "line_number": 22, "usage_type": "call"}, {"api_name": "mathutils.Matrix.Translation", "line_number": 24, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 24, "usage_type": "name"}, {"api_name": "mathutils.Matrix.Scale", "line_number": 26, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 26, "usage_type": "name"}, {"api_name": "mathutils.Matrix.Rotation", "line_number": 28, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 28, "usage_type": "name"}, {"api_name": "math.radians", "line_number": 28, "usage_type": "call"}, {"api_name": "mathutils.Matrix.Translation", "line_number": 30, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 30, "usage_type": "name"}, {"api_name": "mathutils.Matrix.Scale", "line_number": 32, "usage_type": "call"}, {"api_name": "mathutils.Matrix", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "11918528", "text": "import re\nfrom typing import Optional\n\nimport necrobot.exception\nfrom necrobot.condorbot.condorevent import CondorEvent\nfrom necrobot.config import Config\nfrom necrobot.database.dbconnect import DBConnect\n\n\nasync def is_condor_event(schema_name: str) -> bool:\n \"\"\"\n Parameters\n ----------\n schema_name: str\n The name of the schema for the event (and also the event's unique identifier).\n\n Returns\n -------\n bool:\n Whether the given schema refers to a CoNDOR event.\n \"\"\"\n params = (schema_name,)\n async with DBConnect(commit=False) as cursor:\n cursor.execute(\n \"\"\"\n SELECT `schema_name` \n FROM `events`\n WHERE `schema_name` = %s\n LIMIT 1\n \"\"\",\n params\n )\n for _ in cursor:\n return True\n\n return False\n\n\nasync def set_event_params(\n schema_name: str,\n event_name: Optional[str] = None,\n deadline: Optional[str] = None,\n gsheet_id: Optional[str] = None\n):\n \"\"\"\n Parameters\n ----------\n schema_name: str\n The name of the schema to set.\n event_name: str\n The name to set the event to, if not None (otherwise, does nothing)\n deadline: str\n The string representing the deadline, if not None (otherwise, does nothing)\n gsheet_id: str\n The ID of the GSheet, if not None (otherwise, does nothing)\n \"\"\"\n async with DBConnect(commit=True) as cursor:\n if event_name is not None:\n params = (event_name, schema_name,)\n cursor.execute(\n \"\"\"\n UPDATE `events`\n SET `event_name` = %s\n WHERE `schema_name` = %s\n \"\"\",\n params\n )\n if deadline is not None:\n params = (deadline, schema_name,)\n cursor.execute(\n \"\"\"\n UPDATE `events`\n SET `deadline` = %s\n WHERE `schema_name` = %s\n \"\"\",\n params\n )\n if gsheet_id is not None:\n params = (gsheet_id, schema_name,)\n cursor.execute(\n \"\"\"\n UPDATE `events`\n SET `gsheet_id` = %s\n WHERE `schema_name` = %s\n \"\"\",\n params\n )\n\n\nasync def get_event(schema_name: str) -> CondorEvent:\n \"\"\"\n Parameters\n ----------\n schema_name: str\n The name of the schema for the event (and also the event's unique identifier).\n\n Returns\n -------\n str:\n The string representing the deadline\n \"\"\"\n params = (schema_name,)\n async with DBConnect(commit=False) as cursor:\n cursor.execute(\n \"\"\"\n SELECT `event_name`, `deadline`, `gsheet_id`\n FROM `events`\n WHERE `schema_name` = %s\n LIMIT 1\n \"\"\",\n params\n )\n for row in cursor:\n return CondorEvent(schema_name=schema_name, event_name=row[0], deadline_str=row[1], gsheet_id=row[2])\n\n raise necrobot.exception.SchemaDoesNotExist()\n\n\nasync def create_event(schema_name: str) -> CondorEvent:\n \"\"\"Creates a new CoNDOR event with the given schema_name as its database.\n\n Parameters\n ----------\n schema_name: str\n The name of the database schema for this event, and also the unique identifier for this event.\n\n Raises\n ------\n SchemaAlreadyExists\n When the schema_name already exists.\n \"\"\"\n table_name_validator = re.compile(r'^[0-9a-zA-Z_$]+$')\n if not table_name_validator.match(schema_name):\n raise necrobot.exception.InvalidSchemaName()\n\n params = (schema_name,)\n async with DBConnect(commit=True) as cursor:\n cursor.execute(\n \"\"\"\n SELECT `schema_name`\n FROM `events` \n WHERE `schema_name`=%s\n \"\"\",\n params\n )\n for _ in cursor:\n raise necrobot.exception.SchemaAlreadyExists('Event already exists.')\n\n cursor.execute(\n \"\"\"\n SELECT SCHEMA_NAME \n FROM INFORMATION_SCHEMA.SCHEMATA \n WHERE SCHEMA_NAME = %s\n \"\"\",\n params\n )\n for _ in cursor:\n raise necrobot.exception.SchemaAlreadyExists('Schema already exists, but is not a CoNDOR event.')\n\n cursor.execute(\n \"\"\"\n CREATE SCHEMA `{schema_name}` \n DEFAULT CHARACTER SET = utf8 \n DEFAULT COLLATE = utf8_general_ci\n \"\"\".format(schema_name=schema_name)\n )\n cursor.execute(\n \"\"\"\n INSERT INTO `events` \n (`schema_name`) \n VALUES (%s)\n \"\"\",\n params\n )\n\n cursor.execute(\n \"\"\"\n CREATE TABLE `{schema_name}`.`entrants` (\n `user_id` smallint unsigned NOT NULL,\n PRIMARY KEY (`user_id`)\n ) DEFAULT CHARSET=utf8\n \"\"\".format(schema_name=schema_name)\n )\n\n for table_name in ['leagues', 'matches', 'match_races', 'races', 'race_runs']:\n cursor.execute(\n \"CREATE TABLE `{league_schema}`.`{table}` LIKE `{necrobot_schema}`.`{table}`\".format(\n league_schema=schema_name,\n necrobot_schema=Config.MYSQL_DB_NAME,\n table=table_name\n )\n )\n\n def tablename(table):\n return '`{league_schema}`.`{table}`'.format(league_schema=schema_name, table=table)\n\n cursor.execute(\n \"\"\"\n CREATE VIEW {race_summary} AS\n SELECT \n {matches}.`match_id` AS `match_id`,\n {matches}.`league_tag` AS `league_tag`,\n {match_races}.`race_number` AS `race_number`,\n `users_winner`.`user_id` AS `winner_id`,\n `users_loser`.`user_id` AS `loser_id`,\n `race_runs_winner`.`time` AS `winner_time`,\n `race_runs_loser`.`time` AS `loser_time`\n FROM\n {matches}\n JOIN {match_races} ON {matches}.`match_id` = {match_races}.`match_id`\n JOIN `users` `users_winner` ON \n IF( {match_races}.`winner` = 1, \n `users_winner`.`user_id` = {matches}.`racer_1_id`, \n `users_winner`.`user_id` = {matches}.`racer_2_id`\n )\n JOIN `users` `users_loser` ON \n IF( {match_races}.`winner` = 1, \n `users_loser`.`user_id` = {matches}.`racer_2_id`, \n `users_loser`.`user_id` = {matches}.`racer_1_id`\n )\n LEFT JOIN {race_runs} `race_runs_winner` ON \n `race_runs_winner`.`user_id` = `users_winner`.`user_id`\n AND `race_runs_winner`.`race_id` = {match_races}.`race_id`\n LEFT JOIN {race_runs} `race_runs_loser` ON \n `race_runs_loser`.`user_id` = `users_loser`.`user_id`\n AND `race_runs_loser`.`race_id` = {match_races}.`race_id`\n WHERE NOT {match_races}.`canceled`\n \"\"\".format(\n matches=tablename('matches'),\n match_races=tablename('match_races'),\n race_runs=tablename('race_runs'),\n race_summary=tablename('race_summary')\n )\n )\n\n cursor.execute(\n \"\"\"\n CREATE VIEW {match_info} AS\n SELECT \n {matches}.`match_id` AS `match_id`,\n {matches}.`league_tag` AS `league_tag`,\n `ud1`.`twitch_name` AS `racer_1_name`,\n `ud2`.`twitch_name` AS `racer_2_name`,\n {matches}.`suggested_time` AS `scheduled_time`,\n `ud3`.`twitch_name` AS `cawmentator_name`,\n {matches}.`vod` AS `vod`,\n {matches}.`is_best_of` AS `is_best_of`,\n {matches}.`number_of_races` AS `number_of_races`,\n {matches}.`autogenned` AS `autogenned`,\n ({matches}.`r1_confirmed` AND {matches}.`r2_confirmed`) AS `scheduled`,\n COUNT(0) AS `num_finished`,\n SUM((CASE\n WHEN ({match_races}.`winner` = 1) THEN 1\n ELSE 0\n END)) AS `racer_1_wins`,\n SUM((CASE\n WHEN ({match_races}.`winner` = 2) THEN 1\n ELSE 0\n END)) AS `racer_2_wins`,\n (CASE\n WHEN\n {matches}.`is_best_of`\n THEN\n (GREATEST(SUM((CASE\n WHEN ({match_races}.`winner` = 1) THEN 1\n ELSE 0\n END)),\n SUM((CASE\n WHEN ({match_races}.`winner` = 2) THEN 1\n ELSE 0\n END))) >= (({matches}.`number_of_races` DIV 2) + 1))\n ELSE (COUNT(0) >= {matches}.`number_of_races`)\n END) AS `completed`\n FROM\n (((({matches}\n LEFT JOIN {match_races} ON (({matches}.`match_id` = {match_races}.`match_id`)))\n JOIN `necrobot`.`users` `ud1` ON (({matches}.`racer_1_id` = `ud1`.`user_id`)))\n JOIN `necrobot`.`users` `ud2` ON (({matches}.`racer_2_id` = `ud2`.`user_id`)))\n LEFT JOIN `necrobot`.`users` `ud3` ON (({matches}.`cawmentator_id` = `ud3`.`user_id`)))\n WHERE\n ({match_races}.`canceled` = 0 OR {match_races}.`canceled` IS NULL)\n GROUP BY {matches}.`match_id`\n \"\"\".format(\n match_info=tablename('match_info'),\n matches=tablename('matches'),\n match_races=tablename('match_races')\n )\n )\n\n cursor.execute(\n \"\"\"\n CREATE VIEW {event_info} AS\n SELECT *\n FROM `events`\n WHERE (`events`.`schema_name` = %s)\n \"\"\".format(event_info=tablename('event_info')),\n params\n )\n\n return CondorEvent(schema_name=schema_name, event_name=None, deadline_str=None, gsheet_id=None)\n", "sub_path": "necrobot/condorbot/condordb.py", "file_name": "condordb.py", "file_ext": "py", "file_size_in_byte": 10725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "necrobot.database.dbconnect.DBConnect", "line_number": 23, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 43, "usage_type": "name"}, {"api_name": "necrobot.database.dbconnect.DBConnect", "line_number": 57, "usage_type": "call"}, {"api_name": "necrobot.database.dbconnect.DBConnect", "line_number": 103, "usage_type": "call"}, {"api_name": "necrobot.condorbot.condorevent.CondorEvent", "line_number": 114, "usage_type": "call"}, {"api_name": "necrobot.exception.exception.SchemaDoesNotExist", "line_number": 116, "usage_type": "call"}, {"api_name": "necrobot.exception.exception", "line_number": 116, "usage_type": "attribute"}, {"api_name": "necrobot.exception", "line_number": 116, "usage_type": "name"}, {"api_name": "necrobot.condorbot.condorevent.CondorEvent", "line_number": 90, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 132, "usage_type": "call"}, {"api_name": "necrobot.exception.exception.InvalidSchemaName", "line_number": 134, "usage_type": "call"}, {"api_name": "necrobot.exception.exception", "line_number": 134, "usage_type": "attribute"}, {"api_name": "necrobot.exception", "line_number": 134, "usage_type": "name"}, {"api_name": "necrobot.database.dbconnect.DBConnect", "line_number": 137, "usage_type": "call"}, {"api_name": "necrobot.exception.exception.SchemaAlreadyExists", "line_number": 147, "usage_type": "call"}, {"api_name": "necrobot.exception.exception", "line_number": 147, "usage_type": "attribute"}, {"api_name": "necrobot.exception", "line_number": 147, "usage_type": "name"}, {"api_name": "necrobot.exception.exception.SchemaAlreadyExists", "line_number": 158, "usage_type": "call"}, {"api_name": "necrobot.exception.exception", "line_number": 158, "usage_type": "attribute"}, {"api_name": "necrobot.exception", "line_number": 158, "usage_type": "name"}, {"api_name": "necrobot.config.Config.MYSQL_DB_NAME", "line_number": 189, "usage_type": "attribute"}, {"api_name": "necrobot.config.Config", "line_number": 189, "usage_type": "name"}, {"api_name": "necrobot.condorbot.condorevent.CondorEvent", "line_number": 300, "usage_type": "call"}, {"api_name": "necrobot.condorbot.condorevent.CondorEvent", "line_number": 119, "usage_type": "name"}]} +{"seq_id": "106076916", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 8 10:21:19 2018\n\n@author: aristizabal\n\"\"\"\n\n#%% User input\n\n# lat and lon limits\n\n\n# RU22\nlon_min = 125.0\nlon_max = 127.0\nlat_min = 32.0\nlat_max = 34.0\n\n# date limits\ndate_ini = '2018-08-01T00:00:00Z'\ndate_end = '2018-08-26T00:00:00Z'\n\n\n'''\n# ng288\nlon_min = -90.0\nlon_max = -80.0\nlat_min = 26.0\nlat_max = 28.0\n\ndate_ini = '2018-10-07T00:00:00Z'\ndate_end = '2018-10-13T00:00:00Z'\n'''\n\n# Glider dac location\nserver = 'https://data.ioos.us/gliders/erddap'\n\n#GOFS3.1 outout model location\n#catalog31 = 'http://tds.hycom.org/thredds/dodsC/GLBv0.08/expt_93.0/ts3z'\ncatalog31 = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/nc_files/ts3z.nc'\n\n# Bathymetry data\n#bath_data = '/Users/aristizabal/Desktop/MARACOOS_project/Maria_scripts/nc_files/GEBCO_2014_2D_-100.0_0.0_-10.0_70.0.nc'\n\n#%%\n\nfrom erddapy import ERDDAP\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n#from mpl_toolkits.basemap import Basemap\n\nimport netCDF4\nfrom netCDF4 import Dataset\nimport xarray as xr\n\nimport numpy as np\n\nimport datetime\nimport time\n\n#%% Look for datasets \n\ne = ERDDAP(server = server)\n\n# Grab every dataset available\ndatasets = pd.read_csv(e.get_search_url(response='csv', search_for='all'))\n\n# Search constraints\nkw2018 = {\n 'min_lon': lon_min,\n 'max_lon': lon_max,\n 'min_lat': lat_min,\n 'max_lat': lat_max,\n 'min_time': date_ini,\n 'max_time': date_end,\n}\n\nsearch_url = e.get_search_url(response='csv', **kw2018)\n#print(search_url)\n\n# Grab the results\nsearch = pd.read_csv(search_url)\n\n# Extract the IDs\ngliders = search['Dataset ID'].values\n\nmsg = 'Found {} Glider Datasets:\\n\\n{}'.format\nprint(msg(len(gliders), '\\n'.join(gliders)))\n\n#%%\n\nserver = 'https://data.ioos.us/gliders/erddap'\n\ndataset_id = gliders[0]\n\nconstraints = {\n 'time>=': date_ini,\n 'time<=': date_end,\n 'latitude>=': lat_min,\n 'latitude<=': lat_max,\n 'longitude>=': lon_min,\n 'longitude<=': lon_max,\n}\n\nvariables = [\n 'depth',\n 'latitude',\n 'longitude',\n 'salinity',\n 'temperature',\n 'time',\n]\n\n#%%\n\ne = ERDDAP(\n server=server,\n protocol='tabledap',\n response='nc'\n)\n\ne.dataset_id=gliders[0]\ne.constraints=constraints\ne.variables=variables\n\nprint(e.get_download_url())\n\n#%% Using xarray\n'''\nds = e.to_xarray(decode_times=False)\nds.time\n'''\n\n#%%\n\ndf = e.to_pandas(\n index_col='time',\n parse_dates=True,\n skiprows=(1,) # units information can be dropped.\n).dropna()\n\ndf.head()\n\ndff = e.to_pandas(\n index_col='time',\n parse_dates=True,\n skiprows=(1,) # units information can be dropped.\n)\n\n#%% Coverting glider vectors into arrays\n\ntimeg, ind = np.unique(df.index.values,return_index=True)\nlatg = np.unique(df.latitude.values)\nlong = np.unique(df.longitude.values)\n\ndg = df.depth.values\ntg = df.temperature.values\nsg = df.salinity.values\n\nzn = np.int(np.max(dg)/0.3)\n\ndepthg = np.empty((zn,len(timeg)))\ndepthg[:] = np.nan\ntempg = np.empty((zn,len(timeg)))\ntempg[:] = np.nan\nsaltg = np.empty((zn,len(timeg)))\nsaltg[:] = np.nan\nfor i,ii in enumerate(ind):\n print(i)\n if i < len(timeg)-1:\n depthg[0:len(dg[ind[i]:ind[i+1]]),i] = dg[ind[i]:ind[i+1]] \n tempg[0:len(tg[ind[i]:ind[i+1]]),i] = tg[ind[i]:ind[i+1]]\n saltg[0:len(sg[ind[i]:ind[i+1]]),i] = sg[ind[i]:ind[i+1]]\n else:\n depthg[0:len(dg[ind[i]:len(dg)]),i] = dg[ind[i]:len(dg)] \n tempg[0:len(tg[ind[i]:len(tg)]),i] = tg[ind[i]:len(tg)]\n saltg[0:len(sg[ind[i]:len(sg)]),i] = sg[ind[i]:len(sg)]\n \n#%% Grid variables\n\ndepthg_gridded = np.arange(0,np.nanmax(depthg),0.5)\ntempg_gridded = np.empty((len(depthg_gridded),len(timeg)))\ntempg_gridded[:] = np.nan\n\nfor t,tt in enumerate(timeg):\n depthu,oku = np.unique(depthg[:,t],return_index=True)\n tempu = tempg[oku,t]\n okdd = np.isfinite(depthu)\n depthf = depthu[okdd]\n tempf = tempu[okdd]\n ok = np.isfinite(tempf)\n if np.sum(ok) < 3:\n tempg_gridded[:,t] = np.nan\n else:\n okd = depthg_gridded < np.max(depthf[ok])\n tempg_gridded[okd,t] = np.interp(depthg_gridded[okd],depthf[ok],tempf[ok])\n \n#%% Read Bathymetry data\n'''\nBathy = Dataset(bath_data)\n\nlatbath = Bathy.variables['lat'][:]\nlonbath = Bathy.variables['lon'][:]\nelevbath = Bathy.variables['elevation'][:]\n'''\n\n#%% Read GOFS 3.1 output\n\nGOFS31 = xr.open_dataset(catalog31,decode_times=False)\n\nlat31 = GOFS31.lat\nlon31 = GOFS31.variables['lon'][:]\ndepth31 = GOFS31.variables['depth'][:]\ntt31 = GOFS31.variables['time']\n#t31 = netCDF4.num2date(tt31[:],tt31.units) \nt31 = netCDF4.num2date(tt31[:],'hours since 2000-01-01 00:00:00') \n\ntmin = datetime.datetime.strptime(date_ini,'%Y-%m-%dT%H:%M:%SZ')\ntmax = datetime.datetime.strptime(date_end,'%Y-%m-%dT%H:%M:%SZ')\n\noktime31 = np.where(np.logical_and(t31 >= tmin, t31 <= tmax))\n\n# Conversion from glider longitude and latitude to GOFS convention\ntarget_lon = np.empty((len(df['longitude']),))\ntarget_lon[:] = np.nan\nfor i in range(len(df['longitude'])):\n if df['longitude'][i] < 0: \n target_lon[i] = 360 + df['longitude'][i]\n else:\n target_lon[i] = df['longitude'][i]\ntarget_lat = df['latitude'][:]\n\n################\n'''\nncbath = xr.open_dataset(bath_file)\nbath_lat = ncbath.lat\nbath_lon = ncbath.lon\nbath_elev = ncbath.elevation\n\noklatbath = np.logical_and(bath_lat >= lat_lim[0],bath_lat <= lat_lim[-1])\noklonbath = np.logical_and(bath_lon >= lon_lim[0],bath_lon <= lon_lim[-1])\n\nbath_latsub = bath_lat[oklatbath]\nbath_lonsub = bath_lon[oklonbath]\nbath_elevsub = bath_elev[oklatbath,oklonbath]\n'''\n\n#%% Changing times to timestamp\n\ntimeg = [time.mktime(df.index[i].timetuple()) for i in np.arange(len(df))]\ntime31 = [time.mktime(t31[i].timetuple()) for i in np.arange(len(t31))]\n\n# interpolating glider lon and lat to lat and lon on model time\nsublon31=np.interp(time31,timeg,target_lon)\nsublat31=np.interp(time31,timeg,target_lat)\n\n# getting the model grid positions for sublon31 and sublat31\noklon31=np.round(np.interp(sublon31,lon31,np.arange(len(lon31)))).astype(int)\noklat31=np.round(np.interp(sublat31,lat31,np.arange(len(lat31)))).astype(int)\n\n#mat_lat_lon = np.array([oklon31,oklat31])\n#n_grid_points1 = len(np.unique(mat_lat_lon.T,axis=0))\n\n#%%\n\ntarget_temp31 = np.empty((len(depth31),len(oktime31[0])))\ntarget_temp31[:] = np.nan\nfor i in range(len(oktime31[0])):\n target_temp31[:,i] = GOFS31.variables['water_temp'][oktime31[0][i],:,oklat31[i],oklon31[i]]\n\ntarget_temp31[target_temp31 < -100] = np.nan\n\n#%%\n\nfig, ax=plt.subplots(figsize=(10, 6), facecolor='w', edgecolor='w')\n\nkw = dict(s=30, c=df['temperature'], marker='*', edgecolor='none')\ncs = ax.scatter(df.index, -df['depth'], **kw, cmap='RdYlBu_r')\n\nax.set_xlim(df.index[0], df.index[-1])\nxfmt = mdates.DateFormatter('%H:%Mh\\n%d-%b')\nax.xaxis.set_major_formatter(xfmt)\n\ncbar = fig.colorbar(cs, orientation='vertical')\ncbar.ax.set_ylabel('Temperature ($^\\circ$C)')\nax.set_ylabel('Depth (m)');\n\nplt.show()\n\n#%%\n\nfig, ax=plt.subplots(figsize=(10, 6), facecolor='w', edgecolor='w')\n\nkw = dict(levels = np.linspace(np.nanmin(tempg_gridded),np.nanmax(tempg_gridded),10))\nax.contour(timeg,-depthg_gridded,tempg_gridded,colors = 'lightgrey',**kw)\nax.contourf(timeg,-depthg_gridded,tempg_gridded,cmap='RdYlBu_r',**kw)\n\n\nax.set_xlim(df.index[0], df.index[-1])\nxfmt = mdates.DateFormatter('%H:%Mh\\n%d-%b')\nax.xaxis.set_major_formatter(xfmt)\n\n#cb = plt.colorbar()\n#cb.set_label('Salinity',rotation=270, labelpad=25, fontsize=12)\ncbar = fig.colorbar(cs, orientation='vertical')\ncbar.ax.set_ylabel('Temperature ($^\\circ$C)')\nax.set_ylabel('Depth (m)');\n\nplt.show()\n\n#%%\nfig, ax=plt.subplots(figsize=(10, 6), facecolor='w', edgecolor='w')\n\nax.contourf(t31[oktime31],depth31,target_temp31,cmap='RdYlBu_r')\nplt.ylim(0,100)\n\nax.invert_yaxis()\n#ax2.set_xlim(df.index[0], df.index[-1])\nxfmt = mdates.DateFormatter('%H:%Mh\\n%d-%b')\nax.xaxis.set_major_formatter(xfmt)\n\ncbar = fig.colorbar(cs, orientation='vertical')\ncbar.ax.set_ylabel('Temperature ($^\\circ$C)')\nax.set_ylabel('Depth (m)');\n", "sub_path": "along_track_glider_model_com.py", "file_name": "along_track_glider_model_com.py", "file_ext": "py", "file_size_in_byte": 8002, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "erddapy.ERDDAP", "line_number": 67, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 70, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 86, "usage_type": "call"}, {"api_name": "erddapy.ERDDAP", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 157, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 167, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 169, "usage_type": "attribute"}, {"api_name": "numpy.empty", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 185, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 197, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 200, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 213, "usage_type": "call"}, {"api_name": "netCDF4.num2date", "line_number": 220, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 222, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 222, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 223, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 223, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 225, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 229, "usage_type": "attribute"}, {"api_name": "time.mktime", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 254, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 255, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.interp", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 270, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 271, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 275, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 296, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 296, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.nanmin", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.nanmax", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 304, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 313, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 313, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 319, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 319, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 323, "usage_type": "name"}]} +{"seq_id": "343194715", "text": "import pygame\r\nimport math\r\nimport random\r\nimport text\r\nimport time\r\npygame.init()\r\n#####<< colors >>####\r\nwhite=(255,255,255)\r\nblack=(0,0,0)\r\nred=(255,0,0)\r\ngreen=(0,255,0)\r\nblue=(0,0,255)\r\n#####<<< variable >>>#####\r\nwall_crash=pygame.mixer.Sound(\"wall_crash.wav\")\r\nscore_up=pygame.mixer.Sound(\"score_up.wav\")\r\nscore_down=pygame.mixer.Sound(\"score_down.wav\")\r\nlevel_up=pygame.mixer.Sound(\"level_up.wav\")\r\ngame_over=pygame.mixer.Sound(\"game_over.wav\")\r\nmove_speed=1\r\nsheet_w=500\r\nsheet_h=700\r\nclock=pygame.time.Clock()\r\nsheet=pygame.display.set_mode((sheet_w,sheet_h))\r\npygame.display.set_caption('Air Hockey')\r\n#####<<< def >>>###\r\ndef dist_poin_to_point(x_1,y_1,x_2,y_2):\r\n dist=math.sqrt((x_1-x_2)**2+(y_1-y_2)**2)\r\n return dist\r\n###########################\r\ndef game_level_3():\r\n move_angle=random.randrange(45,315)\r\n ball_x=int(sheet_w/2)\r\n ball_x_change=0\r\n ball_y=int(sheet_h/2)\r\n ball_y_change=0\r\n line_angle=0\r\n line_lenght=70\r\n other_line_lenght=80\r\n my_door_lenght=100\r\n other_door_lenght=80\r\n score=0\r\n intro=True\r\n i=0\r\n start_other_line_x=0\r\n start_other_line_y=30\r\n end_other_line_x=other_line_lenght\r\n end_other_line_y=30\r\n other_line_change=0\r\n while intro:\r\n if i==0:\r\n text.message_display(\"Level 3\",sheet_w/2,sheet_h/2,white,50)\r\n time.sleep(2)\r\n ball_x_change=math.cos(move_angle*math.pi/180)*move_speed\r\n ball_y_change=math.sin(move_angle*math.pi/180)*move_speed\r\n ball_x=ball_x_change+ball_x\r\n ball_y=ball_y+ball_y_change\r\n mouse=pygame.mouse.get_pos()\r\n click=pygame.mouse.get_pressed()\r\n start_line_x=mouse[0]-line_lenght/2\r\n end_line_x=mouse[0]+line_lenght/2\r\n start_line_y=sheet_h-40\r\n end_line_y=sheet_h-40\r\n if end_other_line_x<=290:\r\n other_line_change+=0.0005\r\n start_other_line_x+=other_line_change\r\n end_other_line_x+=other_line_change\r\n elif start_other_line_x>=100:\r\n other_line_change-=0.0005\r\n start_other_line_x+=other_line_change\r\n end_other_line_x+=other_line_change\r\n if int(ball_y)-15==0:\r\n move_angle=move_angle*(-1)\r\n if int(ball_y)+15==sheet_h:\r\n move_angle=move_angle*(-1)\r\n if int(ball_x)-15==0:\r\n move_angle=180-move_angle\r\n if int(ball_x)+15==sheet_w:\r\n move_angle=180-move_angle\r\n if click[0]==1:\r\n line_angle+=0.2\r\n start_line_x=mouse[0]-(line_lenght/2)*math.cos(line_angle*math.pi/180)\r\n end_line_x=mouse[0]+(line_lenght/2)*math.cos(line_angle*math.pi/180)\r\n start_line_y=sheet_h-40-(line_lenght/2)*math.sin(line_angle*math.pi/180)\r\n end_line_y=sheet_h-40+(line_lenght/2)*math.sin(line_angle*math.pi/180)\r\n elif click[2]==1:\r\n line_angle-=0.2\r\n start_line_x=mouse[0]-(line_lenght/2)*math.cos(line_angle*math.pi/180)\r\n end_line_x=mouse[0]+(line_lenght/2)*math.cos(line_angle*math.pi/180)\r\n start_line_y=sheet_h-40-(line_lenght/2)*math.sin(line_angle*math.pi/180)\r\n end_line_y=sheet_h-40+(line_lenght/2)*math.sin(line_angle*math.pi/180)\r\n else:\r\n start_line_x=mouse[0]-(line_lenght/2)\r\n start_line_y=sheet_h-40\r\n line_angle=0\r\n if start_line_x<=int(ball_x)<=end_line_x and start_line_y<=int(ball_y)+15<=end_line_y or end_line_x<=int(ball_x)<=start_line_x and end_line_y<=int(ball_y)+15<=start_line_y or start_line_x<=int(ball_x)<=end_line_x and end_line_y<=int(ball_y)+15<=start_line_y or end_line_x<=int(ball_x)<=start_line_x and start_line_y<=int(ball_y)+15<=end_line_y:\r\n move_angle=2*line_angle-move_angle\r\n pygame.mixer.Sound.play(wall_crash)\r\n if start_other_line_x<=int(ball_x)<=end_other_line_x and start_other_line_y>=int(ball_y)-15:\r\n move_angle=move_angle*(-1)\r\n if sheet_w/2-my_door_lenght/2-15<=int(ball_x)<=sheet_w/2+my_door_lenght/2+15 and sheet_h==int(ball_y)+15:\r\n score-=1\r\n pygame.mixer.Sound.play(score_down)\r\n print(score)\r\n if sheet_w/2-other_door_lenght/2-15<=int(ball_x)<=sheet_w/2+other_door_lenght/2+15 and 0==int(ball_y)-15:\r\n score+=1\r\n pygame.mixer.Sound.play(score_up)\r\n print(score)\r\n if score<-5:\r\n text.message_display(\"Game Over\",sheet_w/2,sheet_h/2,red,50)\r\n pygame.mixer.Sound.play(game_over)\r\n time.sleep(2)\r\n intro=False\r\n import Air_Hockey_level_1\r\n elif score>5: \r\n text.message_display(\"Level Up\",sheet_w/2,sheet_h/2,red,50)\r\n pygame.mixer.Sound.play(level_up)\r\n time.sleep(2)\r\n import Air_Hockey_level_4\r\n break \r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n sheet.fill(black)\r\n pygame.draw.circle(sheet,red,(int(ball_x),int(ball_y)),15)\r\n pygame.draw.circle(sheet,green,(int(ball_x),int(ball_y)),5)\r\n pygame.draw.line(sheet,white,(start_line_x,start_line_y),(end_line_x,end_line_y),5)\r\n pygame.draw.line(sheet,white,(start_other_line_x,start_other_line_y),(end_other_line_x,end_other_line_y),5)\r\n pygame.draw.rect(sheet,green,(sheet_w/2-my_door_lenght/2,sheet_h-20,my_door_lenght,20))\r\n pygame.draw.rect(sheet,green,(sheet_w/2-other_door_lenght/2,0,other_door_lenght,20))\r\n pygame.draw.rect(sheet,blue,(20,520,20,20))\r\n pygame.draw.line(sheet,white,(30,40),(30,520),2)\r\n if score>=0:\r\n pygame.draw.line(sheet,green,(30,270),(30,270-score*50),8)\r\n elif score<0:\r\n pygame.draw.line(sheet,red,(30,270),(30,270-score*50),8)\r\n pygame.draw.rect(sheet,blue,(20,20,20,20))\r\n pygame.display.update()\r\n i+=1 \r\ngame_level_3()", "sub_path": "Air_Hockey_level_3.py", "file_name": "Air_Hockey_level_3.py", "file_ext": "py", "file_size_in_byte": 5954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.init", "line_number": 6, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 23, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 31, "usage_type": "call"}, {"api_name": "text.message_display", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 53, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 53, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 54, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pressed", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 58, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 81, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 82, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 82, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 83, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 83, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 84, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 84, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 87, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 87, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 88, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 88, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 89, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 89, "usage_type": "attribute"}, {"api_name": "math.sin", "line_number": 90, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 97, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 102, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 102, "usage_type": "attribute"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 106, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 106, "usage_type": "attribute"}, {"api_name": "text.message_display", "line_number": 109, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 110, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 110, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 111, "usage_type": "call"}, {"api_name": "text.message_display", "line_number": 115, "usage_type": "call"}, {"api_name": "pygame.mixer.Sound.play", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 116, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 120, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 122, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 125, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 125, "usage_type": "attribute"}, {"api_name": "pygame.draw.circle", "line_number": 126, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 131, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 132, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 132, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 134, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pygame.draw.line", "line_number": 136, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 137, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 138, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 138, "usage_type": "attribute"}]} +{"seq_id": "196985660", "text": "from fastapi import APIRouter\n\nfrom app.api.routes import example, files\n\nrouter = APIRouter()\nrouter.include_router(example.router, tags=[\"example\"], prefix=\"/examples\")\nrouter.include_router(files.router, tags=[\"files\"], prefix=\"/files\")\n\n\n@router.get(\"/\")\nasync def hello():\n return {\"text\": \"hello\"}\n", "sub_path": "app/api/routes/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "fastapi.APIRouter", "line_number": 5, "usage_type": "call"}, {"api_name": "app.api.routes.example.router", "line_number": 6, "usage_type": "attribute"}, {"api_name": "app.api.routes.example", "line_number": 6, "usage_type": "name"}, {"api_name": "app.api.routes.files.router", "line_number": 7, "usage_type": "attribute"}, {"api_name": "app.api.routes.files", "line_number": 7, "usage_type": "name"}]} +{"seq_id": "439740641", "text": "from app import apfell, links, use_ssl, db_objects\nfrom sanic import response\nfrom jinja2 import Environment, PackageLoader\nfrom app.forms.payloads_form import Payloads_JXA_Form\nfrom app.api import payloads_api\nfrom app.database_models.model import Payload\nimport pathlib\nfrom app.api.c2profiles_api import get_c2profiles_by_type_function\nfrom app.api.payloads_api import write_jxa_payload_func\nfrom sanic_jwt.decorators import protected, inject_user\n\nenv = Environment(loader=PackageLoader('app', 'templates'))\n\n\n@apfell.route(\"/payloads/jxa\", methods=['GET', 'POST'])\n@inject_user()\n@protected()\nasync def payloads_jxa(request, user):\n form = Payloads_JXA_Form(request)\n errors = {}\n success = \"\"\n try:\n # pass True as the 3rd parameter indicating we only want to do this for our current operation's c2 profiles\n jxa_choices = await get_c2profiles_by_type_function('apfell-jxa', user, True)\n except Exception as e:\n jxa_choices = []\n if jxa_choices is None:\n jxa_choices = []\n form.c2_profile.choices = [(p['c2_profile'], p['c2_profile'] + \": \" + p['c2_profile_description']) for p in jxa_choices]\n if request.method == 'POST' and form.validate():\n callback_host = form.callback_host.data\n callback_port = form.callback_port.data\n obfuscation = form.obfuscation.data # not used yet, but in the future it will be\n output_directory = form.output_directory.data\n callback_interval = form.callback_interval.data\n default_tag = form.default_tag.data\n c2_profile = form.c2_profile.data\n # Now that we have the data, we need to register it\n data = {\"tag\": default_tag, \"operator\": user['username'], \"payload_type\": \"apfell-jxa\",\n \"callback_host\": callback_host, \"callback_port\": callback_port,\n \"callback_interval\": callback_interval, \"obfuscation\": obfuscation,\n \"use_ssl\": use_ssl, \"location\": output_directory, \"c2_profile\": c2_profile,\n \"current_operation\": user['current_operation']}\n resp = await payloads_api.register_payload_func(data) # process this with our api\n if resp['status'] == \"success\":\n try:\n create_rsp = await write_jxa_payload_func({'uuid': resp['uuid'], 'loc': output_directory})\n if create_rsp['status'] == \"success\":\n # now that we have a payload on disk, update the corresponding Payload object\n payload = await db_objects.get(Payload, uuid=resp['uuid'])\n payload.location = str(pathlib.Path(output_directory).resolve())\n await db_objects.update(payload)\n success = \"true\"\n errors['uuid'] = resp['uuid'] # kind of hacky, but it works\n else:\n success = \"false\"\n print(create_rsp['error'])\n except Exception as e:\n print(e)\n errors['validate_errors'] = \"Failed to create payload\"\n else:\n print(resp)\n\n errors['callback_host_errors'] = '
'.join(form.callback_host.errors)\n errors['callback_port_errors'] = '
'.join(form.callback_port.errors)\n errors['obfuscation_errors'] = '
'.join(form.obfuscation.errors)\n errors['output_directory_errors'] = '
'.join(form.output_directory.errors)\n errors['default_tag_errors'] = '
'.join(form.default_tag.errors)\n errors['callback_interval_errors'] = '
'.join(form.callback_interval.errors)\n errors['c2_profile_errors'] = '
'.join(form.c2_profile.errors)\n\n template = env.get_template('payloads_jxa.html')\n content = template.render(name=user['username'], links=links, form=form, errors=errors, success=success)\n return response.html(content)\n\n# add links to the routes in this file at the bottom\nlinks['payloads_jxa'] = apfell.url_for('payloads_jxa')\n", "sub_path": "app/routes/payloads_routes.py", "file_name": "payloads_routes.py", "file_ext": "py", "file_size_in_byte": 3917, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "jinja2.Environment", "line_number": 12, "usage_type": "call"}, {"api_name": "jinja2.PackageLoader", "line_number": 12, "usage_type": "call"}, {"api_name": "app.forms.payloads_form.Payloads_JXA_Form", "line_number": 19, "usage_type": "call"}, {"api_name": "app.api.c2profiles_api.get_c2profiles_by_type_function", "line_number": 24, "usage_type": "call"}, {"api_name": "app.use_ssl", "line_number": 42, "usage_type": "name"}, {"api_name": "app.api.payloads_api.register_payload_func", "line_number": 44, "usage_type": "call"}, {"api_name": "app.api.payloads_api", "line_number": 44, "usage_type": "name"}, {"api_name": "app.api.payloads_api.write_jxa_payload_func", "line_number": 47, "usage_type": "call"}, {"api_name": "app.db_objects.get", "line_number": 50, "usage_type": "call"}, {"api_name": "app.database_models.model.Payload", "line_number": 50, "usage_type": "argument"}, {"api_name": "app.db_objects", "line_number": 50, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 51, "usage_type": "call"}, {"api_name": "app.db_objects.update", "line_number": 52, "usage_type": "call"}, {"api_name": "app.db_objects", "line_number": 52, "usage_type": "name"}, {"api_name": "app.links", "line_number": 73, "usage_type": "name"}, {"api_name": "sanic.response.html", "line_number": 74, "usage_type": "call"}, {"api_name": "sanic.response", "line_number": 74, "usage_type": "name"}, {"api_name": "app.apfell.route", "line_number": 15, "usage_type": "call"}, {"api_name": "app.apfell", "line_number": 15, "usage_type": "name"}, {"api_name": "sanic_jwt.decorators.inject_user", "line_number": 16, "usage_type": "call"}, {"api_name": "sanic_jwt.decorators.protected", "line_number": 17, "usage_type": "call"}, {"api_name": "app.links", "line_number": 77, "usage_type": "name"}, {"api_name": "app.apfell.url_for", "line_number": 77, "usage_type": "call"}, {"api_name": "app.apfell", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "267905092", "text": "import copy\nfrom itertools import cycle\nfrom typing import Union, Sequence, Generator, Tuple\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import IterableDataset\n\nfrom ...torchio import DATA\nfrom ...utils import to_tuple, is_image_dict, check_consistent_shape\n\n\nclass ImageSampler(IterableDataset):\n r\"\"\"Extract random patches from a volume.\n\n Args:\n sample: Sample generated by a\n :py:class:`~torchio.data.images.ImagesDataset`, from which image\n patches will be extracted.\n patch_size: Tuple of integers :math:`(d, h, w)` to generate patches\n of size :math:`d \\times h \\times w`.\n If a single number :math:`n` is provided, :math:`d = h = w = n`.\n \"\"\"\n def __init__(self, sample: dict, patch_size: Union[int, Sequence[int]]):\n self.sample = sample\n self.patch_size = np.array(to_tuple(patch_size, length=3), dtype=np.uint16)\n\n def __iter__(self):\n return self.get_stream(self.sample, self.patch_size)\n\n def get_stream(self, sample: dict, patch_size: Tuple[int, int, int]):\n # Is cycle neccesary?\n return cycle(self.extract_patch_generator(sample, patch_size))\n\n def extract_patch_generator(\n self,\n sample: dict,\n patch_size: Tuple[int, int, int],\n ) -> Generator[dict, None, None]:\n while True:\n yield self.extract_patch(sample, patch_size)\n\n def extract_patch(\n self,\n sample: dict,\n patch_size: Tuple[int, int, int],\n ) -> dict:\n index_ini, index_fin = self.get_random_indices(sample, patch_size)\n cropped_sample = self.copy_and_crop(\n sample,\n index_ini,\n index_fin,\n )\n return cropped_sample\n\n @staticmethod\n def get_random_indices(sample: dict, patch_size: Tuple[int, int, int]):\n # Assume all images in sample have the same shape\n check_consistent_shape(sample)\n first_image_name = list(sample.keys())[0]\n first_image_array = sample[first_image_name][DATA]\n # first_image_array should have shape (1, H, W, D)\n shape = np.array(first_image_array.shape[1:], dtype=np.uint16)\n return get_random_indices_from_shape(shape, patch_size)\n\n @staticmethod\n def copy_and_crop(\n sample: dict,\n index_ini: np.ndarray,\n index_fin: np.ndarray,\n ) -> dict:\n cropped_sample = {}\n for key, value in sample.items():\n cropped_sample[key] = copy.copy(value)\n if is_image_dict(value):\n sample_image_dict = value\n cropped_image_dict = cropped_sample[key]\n cropped_image_dict[DATA] = crop(\n sample_image_dict[DATA], index_ini, index_fin)\n # torch doesn't like uint16\n cropped_sample['index_ini'] = index_ini.astype(int)\n return cropped_sample\n\n\ndef crop(\n image: Union[np.ndarray, torch.Tensor],\n index_ini: np.ndarray,\n index_fin: np.ndarray,\n ) -> Union[np.ndarray, torch.Tensor]:\n i_ini, j_ini, k_ini = index_ini\n i_fin, j_fin, k_fin = index_fin\n return image[..., i_ini:i_fin, j_ini:j_fin, k_ini:k_fin]\n\n\ndef get_random_indices_from_shape(\n shape: Tuple[int, int, int],\n patch_size: Tuple[int, int, int],\n ) -> Tuple[np.ndarray, np.ndarray]:\n shape_array = np.array(shape, dtype=np.uint16)\n patch_size_array = np.array(patch_size, dtype=np.uint16)\n max_index_ini = shape_array - patch_size_array\n if (max_index_ini < 0).any():\n message = (\n f'Patch size {patch_size_array} must not be'\n f' larger than image size {tuple(shape_array)}'\n )\n raise ValueError(message)\n coordinates = []\n for max_coordinate in max_index_ini.tolist():\n if max_coordinate == 0:\n coordinate = 0\n else:\n coordinate = torch.randint(max_coordinate, size=(1,)).item()\n coordinates.append(coordinate)\n index_ini = np.array(coordinates, np.uint16)\n index_fin = index_ini + patch_size_array\n return index_ini, index_fin\n", "sub_path": "torchio/data/sampler/sampler.py", "file_name": "sampler.py", "file_ext": "py", "file_size_in_byte": 4167, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.utils.data.IterableDataset", "line_number": 13, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Sequence", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.to_tuple", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 26, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 31, "usage_type": "name"}, {"api_name": "itertools.cycle", "line_number": 33, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Generator", "line_number": 39, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 46, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.check_consistent_shape", "line_number": 59, "usage_type": "call"}, {"api_name": "torchio.DATA", "line_number": 61, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 70, "usage_type": "attribute"}, {"api_name": "copy.copy", "line_number": 74, "usage_type": "call"}, {"api_name": "utils.is_image_dict", "line_number": 75, "usage_type": "call"}, {"api_name": "torchio.DATA", "line_number": 78, "usage_type": "name"}, {"api_name": "torchio.DATA", "line_number": 79, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 87, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 88, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 89, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 89, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 89, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 96, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 100, "usage_type": "attribute"}, {"api_name": "torch.randint", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.uint16", "line_number": 115, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 98, "usage_type": "attribute"}]} +{"seq_id": "113288020", "text": "from flask import Flask\nfrom flask import request\nfrom flask import jsonify\nfrom flask_cors import CORS\nimport os\n\n\nfrom BusinessObject import Customer as CustomerEntity\nfrom DataObject import Customer\n\nfrom BusinessObject import Categories as CategoriesEntity\nfrom DataObject import Categories\n\nfrom BusinessObject import Employees as EmployeesEntity\nfrom DataObject import Employees\n\nfrom BusinessObject import OrderDetails as OrderDetailsEntity\nfrom DataObject import OrderDetails\n\nfrom BusinessObject import Orders as OrdersEntity \nfrom DataObject import Orders\n\nfrom BusinessObject import Products as ProductsEntity \nfrom DataObject import Products\n\nfrom BusinessObject import Shippers as ShippersEntity \nfrom DataObject import Shippers\n\nfrom BusinessObject import Suppliers as SuppliersEntity \nfrom DataObject import Suppliers\n\napp = Flask(__name__)\nCORS(app)\n\n\nconnection_data = dict()\nconnection_data['host'] = os.getenv('host')\nconnection_data['user'] = os.getenv('user')\nconnection_data['password'] = os.getenv('password')\nconnection_data['port'] = os.getenv('port')\nconnection_data['database'] = os.getenv('database')\n\n@app.route('/')\ndef home():\n return 'This is backend'\n\n@app.route('/index', methods=['GET'])\ndef index():\n return 'This is page'\n\n# CRUD(Create, Read, Update, Delete)\n# POST, GET, PUT, DELETE\n\n@app.route('/customers', methods=['POST'])\ndef add_customer():\n data = request.json\n customer = CustomerEntity(customer_name=data['customer_name'],\n contact_name=data['contact_name'],\n address=data['address'],\n city=data['city'],\n postal_code=data['postal_code'],\n country=data['country'])\n c = Customer(connection_data)\n message = c.insert(customer)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/customers/all')\ndef get_all_customer():\n c = Customer(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/customers/', methods=['DELETE', 'PUT'])\ndef delete_customer_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n customer = CustomerEntity(customer_id=id)\n c = Customer(connection_data)\n result = c.delete(customer)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n customer = CustomerEntity(customer_id=id,\n customer_name=data['customer_name'],\n contact_name=data['contact_name'], #contact_name\n address=data['address'],\n city=data['city'],\n postal_code=data['postal_code'],\n country=data['country'])\n c = Customer(connection_data)\n result = c.update(customer)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n#=======================================================================\n\n@app.route('/categories', methods=['POST'])\ndef add_categories():\n data = request.json\n categories = CategoriesEntity(category_name=data['category_name'],\n description=data['description']\n )\n c = Categories(connection_data)\n message = c.insert(categories)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/categories/all')\ndef get_all_categories():\n c = Categories(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/categories/', methods=['DELETE', 'PUT'])\ndef delete_categories_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n categories = CategoriesEntity(category_id=id)\n c = Categories(connection_data)\n result = c.delete(categories)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n categories = CategoriesEntity(category_id=id,\n category_name=data['category_name'],\n description=data['description'])\n c = Categories(connection_data)\n result = c.update(categories)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n#=============================================================================\n\n\n@app.route('/employees', methods=['POST'])\ndef add_employees():\n data = request.json\n employees = EmployeesEntity(lastname =data['lastname'],\n firstname=data['firstname'],\n birthdate=data['birthdate'],\n photo=data['photo'], \n notes=data['notes'])\n c = Employees(connection_data)\n message = c.insert(employees)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/employees/all')\ndef get_all_employees():\n c = Employees(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/employees/', methods=['DELETE', 'PUT'])\ndef delete_employees_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n employees = EmployeesEntity(employee_id=id)\n c = Employees(connection_data)\n result = c.delete(employees)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n employees = EmployeesEntity(employee_id=id,\n lastname =data['lastname'],\n firstname=data['firstname'],\n birthdate=data['birthdate'],\n photo=data['photo'], \n notes=data['notes'])\n c = Employees(connection_data)\n result = c.update(employees)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n#=============================================================================\n\n\n\n@app.route('/orderdetails', methods=['POST'])\ndef add_orderdetails():\n data = request.json\n orderdetails = OrderDetailsEntity(order_id =data['order_id'],\n product_id=data['product_id'],\n quantity=data['quantity'])\n c = OrderDetails(connection_data)\n message = c.insert(orderdetails)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/orderdetails/all')\ndef get_all_orderdetails():\n c = OrderDetails(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/orderdetails/', methods=['DELETE', 'PUT'])\ndef delete_orderDetails_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n orderdetails = OrderDetailsEntity(orderdetail_id=id)\n c = OrderDetails(connection_data)\n result = c.delete(orderdetails)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n orderdetails = OrderDetailsEntity(orderdetail_id=id,\n order_id =data['order_id'],\n product_id=data['product_id'],\n quantity=data['quantity']\n )\n c = OrderDetails(connection_data)\n result = c.update(orderdetails)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n#=============================================================================\n\n\n@app.route('/orders', methods=['POST'])\ndef add_orders():\n data = request.json\n orders = OrdersEntity(customer_id =data['customer_id'],\n employee_id=data['employee_id'],\n orderdate=data['orderdate'],\n shipper_id=data['shipper_id'] \n )\n c = Orders(connection_data)\n message = c.insert(orders)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/orders/all')\ndef get_all_orders():\n c = Orders(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/orders/', methods=['DELETE', 'PUT'])\ndef delete_orders_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n orders = OrdersEntity(order_id=id)\n c = Orders(connection_data)\n result = c.delete(orders)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n orders= OrdersEntity(order_id=id,\n customer_id =data['customer_id'],\n employee_id=data['employee_id'],\n orderdate=data['orderdate'],\n shipper_id=data['shipper_id'] )\n c = Orders(connection_data)\n result = c.update(orders)\n return jsonify({\n 'message': result[0]\n }), result[1]\n \n\n\n#=============================================================================\n\n\n\n@app.route('/products', methods=['POST'])\ndef add_products():\n data = request.json\n products = ProductsEntity(product_name =data['product_name'],\n supplier_id=data['supplier_id'],\n category_id=data['category_id'],\n unit=data['unit'], \n price=data['price'])\n c = Products(connection_data)\n message = c.insert(products)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n\n\n@app.route('/products/all')\ndef get_all_products():\n c = Products(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/products/', methods=['DELETE', 'PUT'])\ndef delete_products_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n products = ProductsEntity(product_id=id)\n c = Products(connection_data)\n result = c.delete(products)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n data = request.json\n products = ProductsEntity(product_id=id,\n product_name =data['product_name'],\n supplier_id=data['supplier_id'],\n category_id=data['category_id'],\n unit=data['unit'], \n price=data['price'])\n c = Products(connection_data)\n result = c.update(products)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n\n#=======================================================================\n\n@app.route('/shippers', methods=['POST'])\ndef add_shippers():\n data = request.json\n shippers = ShippersEntity(shipper_name=data['shipper_name'],\n phone=data['phone']\n )\n c = Shippers(connection_data)\n message = c.insert(shippers)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/shippers/all')\ndef get_all_shippers():\n c = Shippers(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/shippers/', methods=['DELETE', 'PUT'])\ndef delete_shippers_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n shippers = ShippersEntity(shipper_id=id)\n c = Shippers(connection_data)\n result = c.delete(shippers)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n shippers = ShippersEntity(shipper_id=id,\n shipper_name=data['shipper_name'],\n phone=data['phone'])\n c = Shippers(connection_data)\n result = c.update(shippers)\n return jsonify({\n 'message': result[0]\n }), result[1]\n\n\n\n#=======================================================================\n\n\n@app.route('/suppliers', methods=['POST'])\ndef add_suppliers():\n data = request.json\n suppliers = SuppliersEntity(supplier_name=data['supplier_name'],\n contact_name=data['contact_name'],\n address=data['address'],\n city=data['city'],\n postal_code=data['postal_code'],\n country=data['country'],\n phone=data['phone'])\n c = Suppliers(connection_data)\n message = c.insert(suppliers)\n if message is None:\n return jsonify({\n 'message': 'Cannot insert data'\n }), 500\n return jsonify({\n 'message': message\n })\n\n@app.route('/suppliers/all')\ndef get_all_suppliers():\n c = Suppliers(connection_data)\n result = c.get_all()\n return jsonify({\n 'data': result\n })\n\n@app.route('/suppliers/', methods=['DELETE', 'PUT'])\ndef delete_suppliers_by_id(id):\n if request.method == 'DELETE':\n # Delete user by id\n suppliers = SuppliersEntity(supplier_id =id)\n c = Suppliers(connection_data)\n result = c.delete(suppliers)\n return jsonify({\n 'message': result[0]\n }), result[1]\n else:\n # Update user by id\n data = request.json\n suppliers = SuppliersEntity(supplier_id =id,\n supplier_name=data['supplier_name'],\n contact_name=data['contact_name'],\n address=data['address'],\n city=data['city'],\n postal_code=data['postal_code'],\n country=data['country'],\n phone=data['phone'])\n c = Suppliers(connection_data)\n result = c.update(suppliers)\n return jsonify({\n 'message': result[0]\n }), result[1]", "sub_path": "Backend/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 15372, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 32, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 38, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 40, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 41, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "BusinessObject.Customer", "line_number": 57, "usage_type": "call"}, {"api_name": "DataObject.Customer", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 69, "usage_type": "call"}, {"api_name": "DataObject.Customer", "line_number": 75, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 83, "usage_type": "name"}, {"api_name": "BusinessObject.Customer", "line_number": 85, "usage_type": "call"}, {"api_name": "DataObject.Customer", "line_number": 86, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 93, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 93, "usage_type": "name"}, {"api_name": "BusinessObject.Customer", "line_number": 94, "usage_type": "call"}, {"api_name": "DataObject.Customer", "line_number": 101, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "BusinessObject.Categories", "line_number": 113, "usage_type": "call"}, {"api_name": "DataObject.Categories", "line_number": 116, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 122, "usage_type": "call"}, {"api_name": "DataObject.Categories", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 136, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 136, "usage_type": "name"}, {"api_name": "BusinessObject.Categories", "line_number": 138, "usage_type": "call"}, {"api_name": "DataObject.Categories", "line_number": 139, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 141, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 146, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 146, "usage_type": "name"}, {"api_name": "BusinessObject.Categories", "line_number": 147, "usage_type": "call"}, {"api_name": "DataObject.Categories", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 152, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 162, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 162, "usage_type": "name"}, {"api_name": "BusinessObject.Employees", "line_number": 163, "usage_type": "call"}, {"api_name": "DataObject.Employees", "line_number": 168, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 171, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 174, "usage_type": "call"}, {"api_name": "DataObject.Employees", "line_number": 180, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 182, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 188, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 188, "usage_type": "name"}, {"api_name": "BusinessObject.Employees", "line_number": 190, "usage_type": "call"}, {"api_name": "DataObject.Employees", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 193, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 198, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 198, "usage_type": "name"}, {"api_name": "BusinessObject.Employees", "line_number": 199, "usage_type": "call"}, {"api_name": "DataObject.Employees", "line_number": 205, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 207, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 218, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 218, "usage_type": "name"}, {"api_name": "BusinessObject.OrderDetails", "line_number": 219, "usage_type": "call"}, {"api_name": "DataObject.OrderDetails", "line_number": 222, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 225, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 228, "usage_type": "call"}, {"api_name": "DataObject.OrderDetails", "line_number": 234, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 242, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 242, "usage_type": "name"}, {"api_name": "BusinessObject.OrderDetails", "line_number": 244, "usage_type": "call"}, {"api_name": "DataObject.OrderDetails", "line_number": 245, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 247, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 252, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 252, "usage_type": "name"}, {"api_name": "BusinessObject.OrderDetails", "line_number": 253, "usage_type": "call"}, {"api_name": "DataObject.OrderDetails", "line_number": 258, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 270, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 270, "usage_type": "name"}, {"api_name": "BusinessObject.Orders", "line_number": 271, "usage_type": "call"}, {"api_name": "DataObject.Orders", "line_number": 276, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 282, "usage_type": "call"}, {"api_name": "DataObject.Orders", "line_number": 288, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 290, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 296, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 296, "usage_type": "name"}, {"api_name": "BusinessObject.Orders", "line_number": 298, "usage_type": "call"}, {"api_name": "DataObject.Orders", "line_number": 299, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 301, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 306, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 306, "usage_type": "name"}, {"api_name": "BusinessObject.Orders", "line_number": 307, "usage_type": "call"}, {"api_name": "DataObject.Orders", "line_number": 312, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 314, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 326, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 326, "usage_type": "name"}, {"api_name": "BusinessObject.Products", "line_number": 327, "usage_type": "call"}, {"api_name": "DataObject.Products", "line_number": 332, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 335, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 338, "usage_type": "call"}, {"api_name": "DataObject.Products", "line_number": 346, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 348, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 354, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 354, "usage_type": "name"}, {"api_name": "BusinessObject.Products", "line_number": 356, "usage_type": "call"}, {"api_name": "DataObject.Products", "line_number": 357, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 359, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 363, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 363, "usage_type": "name"}, {"api_name": "BusinessObject.Products", "line_number": 364, "usage_type": "call"}, {"api_name": "DataObject.Products", "line_number": 370, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 372, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 382, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 382, "usage_type": "name"}, {"api_name": "BusinessObject.Shippers", "line_number": 383, "usage_type": "call"}, {"api_name": "DataObject.Shippers", "line_number": 386, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 389, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 392, "usage_type": "call"}, {"api_name": "DataObject.Shippers", "line_number": 398, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 400, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 406, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 406, "usage_type": "name"}, {"api_name": "BusinessObject.Shippers", "line_number": 408, "usage_type": "call"}, {"api_name": "DataObject.Shippers", "line_number": 409, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 411, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 416, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 416, "usage_type": "name"}, {"api_name": "BusinessObject.Shippers", "line_number": 417, "usage_type": "call"}, {"api_name": "DataObject.Shippers", "line_number": 420, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 422, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 433, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 433, "usage_type": "name"}, {"api_name": "BusinessObject.Suppliers", "line_number": 434, "usage_type": "call"}, {"api_name": "DataObject.Suppliers", "line_number": 441, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 444, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 447, "usage_type": "call"}, {"api_name": "DataObject.Suppliers", "line_number": 453, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 455, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 461, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 461, "usage_type": "name"}, {"api_name": "BusinessObject.Suppliers", "line_number": 463, "usage_type": "call"}, {"api_name": "DataObject.Suppliers", "line_number": 464, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 466, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 471, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 471, "usage_type": "name"}, {"api_name": "BusinessObject.Suppliers", "line_number": 472, "usage_type": "call"}, {"api_name": "DataObject.Suppliers", "line_number": 480, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 482, "usage_type": "call"}]} +{"seq_id": "521947773", "text": "######################################################################\n#\n#\n# export_layers.py for QGIS 3.* with Script Runner extension\n#\n# Open your vector layers in qgis and run it to export them\n# into a \"export\" directory located in layers directory.\n#\n# Selected features will be extracted.\n#\n# You can reproject your data during the process:\n# Set EPSG value. Set None if you don't want reproject your data\n#\n######################################################################\n\nimport os\nfrom pathlib import Path\n\nfrom qgis.core import QgsVectorLayer\nfrom qgis.core import QgsProject\nfrom qgis.core import QgsVectorFileWriter\nfrom qgis.core import QgsMessageLog\nfrom qgis.core import QgsCoordinateReferenceSystem\nfrom qgis.core import QgsCoordinateTransform\n\n\ngdal_driver_formats = {\n \"shp\": \"ESRI Shapefile\",\n \"geojson\": \"GeoJSON\",\n \"csv\": \"CSV\",\n \"gpkg\": \"GPKG\",\n None: \"geojson\"\n}\n\n\nclass ExportLayers:\n \"\"\"\n\n Class ExportLayers\n\n \"\"\"\n _ENCODING_VALUE = \"utf-8\"\n\n def __init__(self, qgs_layer, output_format, to_epsg):\n \"\"\"\n\n Constructor\n\n :type qgs_layer: QgsVectorLayer\n :type to_espg: int, default: None\n\n \"\"\"\n self.layer = qgs_layer\n if to_epsg == 0:\n to_epsg = None\n self._to_epsg = to_epsg\n self._selection = False\n\n self._export_dir = 'export'\n self._format = gdal_driver_formats[output_format]\n\n def run(self):\n self.save_file()\n\n def create_dest_dir(self):\n \"\"\"\n create_dest_layer_dir\n\n :type layer: QgsVectorLayer\n \"\"\"\n raw_source_dir = self.layer.dataProvider().dataSourceUri()\n source_dir = os.path.dirname(raw_source_dir.split('|')[0])\n\n # create export directory\n self._export_dir = Path(f'{source_dir}/{self._export_dir}')\n\n if not os.path.exists(self._export_dir):\n os.makedirs(self._export_dir)\n\n def reproject(self):\n \"\"\"\n reproject\n\n :type layer: QgsVectorLayer\n \"\"\"\n\n if self._to_epsg is not None:\n self._qgs_epsg = QgsCoordinateTransform(\n self.layer.crs(),\n QgsCoordinateReferenceSystem(self._to_epsg),\n QgsProject.instance()\n )\n\n def save_file(self):\n \"\"\"\n save_file\n\n :type layer: QgsVectorLayer\n \"\"\"\n\n self.reproject()\n self.create_dest_dir()\n\n count_features = self.layer.selectedFeatureCount()\n if count_features > 0:\n self._selection = True\n else:\n count_features = self.layer.featureCount()\n\n output_file_path = self._format_output_file_path()\n\n #write file\n error = self._write_data_func(self.layer, output_file_path, \"update\")\n if error == QgsVectorFileWriter.ErrCreateDataSource :\n error = self._write_data_func(self.layer, output_file_path, \"create\")\n\n\n if error == QgsVectorFileWriter.NoError:\n self.export_style(output_file_path)\n print(f'Success: {self.layer.name()} - {self._format} ({count_features} features) !\\n=> {self._export_dir}\\n')\n else:\n print(f'Error: {error}')\n\n def _format_output_file_path(self):\n suffix_value = self._to_epsg if self._to_epsg is not None else self.layer.crs().authid().partition(':')[-1]\n if self._format == \"GPKG\":\n output_file_path = Path(f'{self._export_dir}_{suffix_value}.gpkg')\n else:\n output_file_path = Path(f'{self._export_dir}/{self.layer.name()}_{suffix_value}')\n\n return output_file_path\n\n def _write_data_func(self, layer, output_file_path, mode):\n options = self._format_options(mode)\n error, _error_string = QgsVectorFileWriter.writeAsVectorFormat(\n self.layer,\n str(output_file_path),\n options\n )\n\n return error\n\n def export_style(self, output_file_path):\n self.layer.saveNamedStyle(f\"{output_file_path}.qml\")\n self.layer.saveSldStyle(f\"{output_file_path}.sld\")\n\n if self._format == \"GPKG\":\n gpkg_layer = QgsVectorLayer(f\"{output_file_path}|layername={self.layer.name()}\", self.layer.name(), 'ogr')\n gpkg_layer.loadNamedStyle(f\"{output_file_path}.qml\")\n gpkg_layer.saveStyleToDatabase(\n name=self.layer.name(),\n description=\"\",\n useAsDefault=True,\n uiFileContent=\"\"\n )\n\n os.remove(f\"{output_file_path}.qml\")\n os.remove(f\"{output_file_path}.sld\")\n\n def _format_options(self, mode):\n options = QgsVectorFileWriter.SaveVectorOptions()\n options.driverName = self._format\n options.fileEncoding = self._ENCODING_VALUE\n options.onlySelectedFeatures = self._selection\n options.includeZ = True\n options.layerName = self.layer.name()\n\n if self._to_epsg is not None:\n options.ct = self._qgs_epsg\n\n\n if self._format == \"GPKG\":\n if mode == \"create\":\n options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteFile \n elif mode == \"update\":\n options.actionOnExistingFile = QgsVectorFileWriter.CreateOrOverwriteLayer \n\n return options\n\n\ndef run_script(iface, epsg=None, output_format=None):\n\n layers = QgsProject.instance().mapLayers()\n\n for _, qgslayer in layers.items():\n ExportLayers(qgslayer, output_format, epsg).run()\n \n\n", "sub_path": "script_runner/export_layers.py", "file_name": "export_layers.py", "file_ext": "py", "file_size_in_byte": 5536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.dirname", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 78, "usage_type": "call"}, {"api_name": "qgis.core.QgsCoordinateTransform", "line_number": 88, "usage_type": "call"}, {"api_name": "qgis.core.QgsCoordinateReferenceSystem", "line_number": 90, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 91, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 91, "usage_type": "name"}, {"api_name": "qgis.core.QgsVectorFileWriter.ErrCreateDataSource", "line_number": 114, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 114, "usage_type": "name"}, {"api_name": "qgis.core.QgsVectorFileWriter.NoError", "line_number": 118, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 118, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 127, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 129, "usage_type": "call"}, {"api_name": "qgis.core.QgsVectorFileWriter.writeAsVectorFormat", "line_number": 135, "usage_type": "call"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 135, "usage_type": "name"}, {"api_name": "qgis.core.QgsVectorLayer", "line_number": 148, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 157, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 158, "usage_type": "call"}, {"api_name": "qgis.core.QgsVectorFileWriter.SaveVectorOptions", "line_number": 161, "usage_type": "call"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 161, "usage_type": "name"}, {"api_name": "qgis.core.QgsVectorFileWriter.CreateOrOverwriteFile", "line_number": 174, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 174, "usage_type": "name"}, {"api_name": "qgis.core.QgsVectorFileWriter.CreateOrOverwriteLayer", "line_number": 176, "usage_type": "attribute"}, {"api_name": "qgis.core.QgsVectorFileWriter", "line_number": 176, "usage_type": "name"}, {"api_name": "qgis.core.QgsProject.instance", "line_number": 183, "usage_type": "call"}, {"api_name": "qgis.core.QgsProject", "line_number": 183, "usage_type": "name"}]} +{"seq_id": "18342744", "text": "import urllib\nimport json\nfrom bs4 import BeautifulSoup\nfrom collections import namedtuple\n\nVideo = namedtuple(\"Video\", \"video_id title duration views thumbnail\")\n\ndef parse_video_div(div):\n video_id = div.get(\"data-context-item-id\", \"\")\n title = div.find(\"a\", \"yt-uix-tile-link\").text\n duration = div.find(\"span\", \"video-time\").contents[0].text\n views = int(div.find(\"ul\", \"yt-lockup-meta-info\").contents[0].text.rstrip(\" views\").replace(\",\", \"\"))\n img = div.find(\"img\")\n thumbnail = \"http:\" + img.get(\"src\", \"\") if img else \"\"\n return Video(video_id, title, duration, views, thumbnail)\n\ndef parse_videos_page(page):\n video_divs = page.find_all(\"div\", \"yt-lockup-video\")\n return [parse_video_div(div) for div in video_divs]\n\ndef find_load_more_url(page):\n for button in page.find_all(\"button\"):\n url = button.get(\"data-uix-load-more-href\")\n if url:\n return \"http://www.youtube.com\" + url\n\ndef download_page(url):\n print(\"Downloading {0}\".format(url))\n return urllib.urlopen(url).read()\n\ndef get_videos(username):\n page_url = \"http://www.youtube.com/user/{0}/videos\".format(username)\n page = BeautifulSoup(download_page(page_url))\n videos = parse_videos_page(page)\n page_url = find_load_more_url(page)\n while page_url:\n json_data = json.loads(download_page(page_url))\n page = BeautifulSoup(json_data.get(\"content_html\", \"\"))\n videos.extend(parse_videos_page(page))\n page_url = find_load_more_url(BeautifulSoup(json_data.get(\"load_more_widget_html\", \"\")))\n return videos\n\nif __name__ == \"__main__\":\n videos = get_videos(\"jimmydiresta\")\n for video in videos:\n print(video)\n print(\"{0} videos\".format(len(videos)))\n", "sub_path": "all-gists/6796103/snippet.py", "file_name": "snippet.py", "file_ext": "py", "file_size_in_byte": 1735, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.namedtuple", "line_number": 6, "usage_type": "call"}, {"api_name": "urllib.urlopen", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 37, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "522021417", "text": "import sqlite3 as sl\n\n# connecting to database\ndef db_connection(db_file):\n try:\n conn = sl.connect(db_file)\n except:\n print('Unable to connect to Database')\n return conn\n\n# selecting from playground table\ndef select_from_playground(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM playgrounds\")\n playground_data = cur.fetchall()\n conn.close()\n search_data = []\n for i in playground_data:\n x = []\n x.append(i[0])\n x.append(i[1])\n search_data.append(x)\n return search_data\n\ndef retrive_f_id(conn, s):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM playgrounds\")\n playground_data = cur.fetchall()\n conn.close()\n for i in playground_data:\n if(i[1] == s):\n x = i[0]\n return x\n", "sub_path": "functions/database_operations.py", "file_name": "database_operations.py", "file_ext": "py", "file_size_in_byte": 797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlite3.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "67977134", "text": "# coding: UTF-8\n\nimport sys\n\nfrom hyperopt import fmin, tpe, STATUS_OK, STATUS_FAIL, Trials\n\nfrom src import logger, notify\nfrom src.bitmex import BitMex\nfrom src.bitmex_stub import BitMexStub\nfrom src.bitmex_backtest import BitMexBackTest\n\n\nclass Bot:\n # パラメータ\n params = {}\n # 取引所\n exchange = None\n # 時間足\n bin_size = '1h'\n # 足の期間\n periods = 20\n # テストネットを利用するか\n test_net = False\n # バックテストか\n back_test = False\n # スタブ取引か\n stub_test = False\n # パラメータ探索か\n hyperopt = False\n\n def __init__(self, bin_size):\n \"\"\"\n コンストラクタ。\n :param bin_size: 時間足\n :param periods: 期間\n \"\"\"\n self.bin_size = bin_size\n\n def options(self):\n \"\"\"\n パレメータ探索用の値を取得する関数。\n \"\"\"\n pass\n\n def ohlcv_len(self):\n \"\"\"\n 戦略にわたすOHLCの長さ\n \"\"\"\n return 100\n\n def input(self, title, type, defval):\n \"\"\"\n パレメータを取得する関数。\n :param title: パレメータ名\n :param defval: デフォルト値\n :return: 値\n \"\"\"\n p = {} if self.params is None else self.params\n if title in p:\n return type(p[title])\n else:\n return defval\n\n def strategy(self, open, close, high, low, volume):\n \"\"\"\n 戦略関数。Botを作成する際は、この関数を継承して実装してください。\n :param open: 始値\n :param close: 終値\n :param high: 高値\n :param low: 安値\n :param volume: 出来高\n \"\"\"\n pass\n\n def params_search(self):\n \"\"\"\n ˜ パラメータ検索を行う関数。\n \"\"\"\n def objective(args):\n logger.info(f\"Params : {args}\")\n try:\n self.params = args\n self.exchange = BitMexBackTest()\n self.exchange.on_update(self.bin_size, self.strategy)\n profit_factor = self.exchange.win_profit/self.exchange.lose_loss\n logger.info(f\"Profit Factor : {profit_factor}\")\n ret = {\n 'status': STATUS_OK,\n 'loss': 1/profit_factor\n }\n except Exception as e:\n ret = {\n 'status': STATUS_FAIL\n }\n\n return ret\n\n trials = Trials()\n best_params = fmin(objective, self.options(), algo=tpe.suggest, trials=trials, max_evals=200)\n logger.info(f\"Best params is {best_params}\")\n logger.info(f\"Best profit factor is {1/trials.best_trial['result']['loss']}\")\n\n def run(self):\n \"\"\"\n˜ Botを起動する関数。\n \"\"\"\n if self.hyperopt:\n logger.info(f\"Bot Mode : Hyperopt\")\n self.params_search()\n return\n\n elif self.stub_test:\n logger.info(f\"Bot Mode : Stub\")\n self.exchange = BitMexStub()\n elif self.back_test:\n logger.info(f\"Bot Mode : Back test\")\n self.exchange = BitMexBackTest()\n else:\n logger.info(f\"Bot Mode : Trade\")\n self.exchange = BitMex(demo=self.test_net)\n\n self.exchange.ohlcv_len = self.ohlcv_len()\n self.exchange.on_update(self.bin_size, self.strategy)\n\n logger.info(f\"Starting Bot\")\n logger.info(f\"Strategy : {type(self).__name__}\")\n logger.info(f\"Balance : {self.exchange.get_balance()}\")\n\n notify(f\"Starting Bot\\n\"\n f\"Strategy : {type(self).__name__}\\n\"\n f\"Balance : {self.exchange.get_balance()/100000000} XBT\")\n\n self.exchange.show_result()\n\n def stop(self):\n \"\"\"\n˜ Botを停止する関数。Openしている注文は、キャンセルする。\n \"\"\"\n if self.exchange is None:\n return\n\n logger.info(f\"Stopping Bot\")\n\n self.exchange.stop()\n self.exchange.cancel_all()\n sys.exit()\n", "sub_path": "src/bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 4119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "src.logger.info", "line_number": 80, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 80, "usage_type": "name"}, {"api_name": "src.bitmex_backtest.BitMexBackTest", "line_number": 83, "usage_type": "call"}, {"api_name": "src.logger.info", "line_number": 86, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 86, "usage_type": "name"}, {"api_name": "hyperopt.STATUS_OK", "line_number": 88, "usage_type": "name"}, {"api_name": "hyperopt.STATUS_FAIL", "line_number": 93, "usage_type": "name"}, {"api_name": "hyperopt.Trials", "line_number": 98, "usage_type": "call"}, {"api_name": "hyperopt.fmin", "line_number": 99, "usage_type": "call"}, {"api_name": "hyperopt.tpe.suggest", "line_number": 99, "usage_type": "attribute"}, {"api_name": "hyperopt.tpe", "line_number": 99, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 100, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 100, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 101, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 101, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 108, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 108, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 113, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 113, "usage_type": "name"}, {"api_name": "src.bitmex_stub.BitMexStub", "line_number": 114, "usage_type": "call"}, {"api_name": "src.logger.info", "line_number": 116, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 116, "usage_type": "name"}, {"api_name": "src.bitmex_backtest.BitMexBackTest", "line_number": 117, "usage_type": "call"}, {"api_name": "src.logger.info", "line_number": 119, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 119, "usage_type": "name"}, {"api_name": "src.bitmex.BitMex", "line_number": 120, "usage_type": "call"}, {"api_name": "src.logger.info", "line_number": 125, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 125, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 126, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 126, "usage_type": "name"}, {"api_name": "src.logger.info", "line_number": 127, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 127, "usage_type": "name"}, {"api_name": "src.notify", "line_number": 129, "usage_type": "call"}, {"api_name": "src.logger.info", "line_number": 142, "usage_type": "call"}, {"api_name": "src.logger", "line_number": 142, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 146, "usage_type": "call"}]} +{"seq_id": "423130602", "text": "from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom requests_html import HTMLSession\nimport time\n\nfrom bot import ShoppingBotInterface\n\n# Reference: http://www.michaelfxu.com/tools%20and%20infrastructures/building-a-sniping-bot/\n'''\npython bestbuy.py --name=\"Play Station 5\"\n'''\n\n\nclass BestBuyBot(ShoppingBotInterface):\n\n def __init__(self, config):\n self.config = config\n\n def check_can_buy(self, url) -> bool:\n try:\n session = HTMLSession()\n try:\n r = session.get(url)\n btn = r.html.find('button[class=\"btn btn-primary btn-lg btn-block btn-leading-ficon add-to-cart-button\"]')\n\n return len(btn) == 1\n\n finally:\n session.close()\n\n except Exception as e:\n print(\"Unable to connect. Waiting 1 minute.\")\n time.sleep(60)\n return False\n\n def perform_purchase(self, url, test=False) -> bool:\n driver = webdriver.Chrome('../chromedriver.exe')\n try:\n driver.get(url)\n btn = driver.find_element_by_class_name('add-to-cart-button')\n\n btn.click()\n time.sleep(1)\n\n print(\"Successfully added to cart.\")\n\n checkout_url = 'https://www.bestbuy.com/checkout/r/fulfillment'\n driver.get(checkout_url)\n\n # if we are currently on store pickup, switch to shipping\n try:\n shipping_button = driver.find_element_by_xpath(\"//a[class='ispu-card__switch']\")\n shipping_button.click()\n print(\"Switching to shipping\")\n except Exception as e:\n print(\"Started on shipping page.\")\n\n # fill in general info and shipping info\n driver.find_element_by_xpath(\"//input[contains(@id,'firstName')]\") \\\n .send_keys(self.config.FIRST_NAME)\n driver.find_element_by_xpath(\"//input[contains(@id,'lastName')]\") \\\n .send_keys(self.config.LAST_NAME)\n driver.find_element_by_xpath(\"//input[contains(@id,'street')]\") \\\n .send_keys(self.config.ADDRESS)\n time.sleep(0.5)\n driver.find_element_by_xpath(\"//input[contains(@id,'city')]\") \\\n .send_keys(self.config.CITY)\n\n drpState = Select(driver.find_element_by_xpath(\n \"//select[contains(@id,'state')]\"))\n drpState.select_by_visible_text(self.config.STATE)\n\n driver.find_element_by_xpath(\"//input[contains(@id,'zipcode')]\") \\\n .send_keys(self.config.ZIPCODE)\n\n driver.find_element_by_id('user.emailAddress').send_keys(\n self.config.EMAIL)\n driver.find_element_by_id('user.phone') \\\n .send_keys(self.config.PHONE)\n\n print(\"Successfully filled out general info and shipping.\")\n\n # move on to payment info\n continue_button = driver.find_element_by_class_name(\n 'btn-secondary')\n continue_button.click()\n print(\"Continuing to payment info.\")\n\n time.sleep(5)\n\n # fill in payment info\n driver.find_element_by_id('optimized-cc-card-number') \\\n .send_keys(self.config.CREDIT_NUMBER)\n\n exp_month = Select(driver.find_element_by_name('expiration-month'))\n exp_month.select_by_visible_text(self.config.EXP_MONTH)\n\n exp_year = Select(driver.find_element_by_name('expiration-year'))\n exp_year.select_by_visible_text(self.config.EXP_YEAR)\n\n driver.find_element_by_id('credit-card-cvv') \\\n .send_keys(self.config.CVV)\n\n print(\"Successfully entered credit card information.\")\n\n # Place the order\n place_order = driver.find_element_by_class_name('btn-block')\n\n print(\"Ready to place order.\")\n\n if not test:\n place_order.click()\n print(\"Order placed.\")\n return True\n\n except Exception as e:\n print(e)\n print(\"Unable to purchase.\")\n return False\n\n finally:\n driver.quit()\n", "sub_path": "src/bestbuy.py", "file_name": "bestbuy.py", "file_ext": "py", "file_size_in_byte": 4205, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "bot.ShoppingBotInterface", "line_number": 14, "usage_type": "name"}, {"api_name": "requests_html.HTMLSession", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 37, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 37, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 95, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.Select", "line_number": 98, "usage_type": "call"}]} +{"seq_id": "253589505", "text": "import sqlite3\nconn = sqlite3.connect('growls.db')\n\nc = conn.cursor()\nc.execute(\"CREATE TABLE growls (name, datetime, growl)\")\nc.execute(\"INSERT INTO growls VALUES ('richie', '100', 'Hello world!')\")\nc.execute(\"SELECT * FROM growls\")\nprint(c.fetchall())\nconn.commit()\nconn.close()\n", "sub_path": "basic/solution/init_db.py", "file_name": "init_db.py", "file_ext": "py", "file_size_in_byte": 281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlite3.connect", "line_number": 2, "usage_type": "call"}]} +{"seq_id": "232260213", "text": "import datetime\n\n# Define the get_verified_integer function\ndef get_verified_integer(question, min, max) :\n print(question)\n while True :\n answer = input(\">\")\n if answer.isdigit() :\n answer = int(answer)\n if (answer >= min or answer <= max) :\n return answer\n break\n else :\n print(\"That is not a valid value, please try again.\")\n \n# main program starts here\nmonth = get_verified_integer(\"Please enter today's month (1-12): \",1,12)\nday = get_verified_integer(\"Please enter today's day (1-31): \",1,31)\nyear = get_verified_integer(\"Please enter today's year (2000 - 2030): \",2000,2030)\n\n# build date object and print out the day of the week\ntoday = datetime.date(year,month,day)\nprint(\"Today is a \" + today.strftime(\"%A\"))\n", "sub_path": "Python Class/verfunc.py", "file_name": "verfunc.py", "file_ext": "py", "file_size_in_byte": 823, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.date", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "294829143", "text": "# encoding: utf-8\n# Created by David Rideout on 2/7/14 4:58 PM\n# Copyright (c) 2013 Safari Books Online, LLC. All rights reserved.\n'''A tool module for Books to proccess the book elements'''\n\nfrom storage.models import Book\nfrom django.db.utils import IntegrityError\n\n\ndef process_book_element(book_element):\n \"\"\"\n Process a book element into the database.\n\n :param book: book element\n :returns:\n \"\"\"\n\n book, created = Book.objects.get_or_create(pk=book_element.get('id'))\n book.title = book_element.findtext('title')\n book.description = book_element.findtext('description')\n aliases = {}\n same_aliases = False\n book_aliases = {}\n for alias in book.aliases.values():\n book_aliases[alias['value']] = True\n\n for alias in book_element.xpath('aliases/alias'):\n scheme = alias.get('scheme')\n value = alias.get('value')\n aliases[scheme] = value\n if value in book_aliases:\n same_aliases = True\n\n if same_aliases == False and len(book_aliases) > 0:\n book, created = Book.objects.get_or_create(pk=aliases.values()[0])\n book.title = book_element.findtext('title')\n book.description = book_element.findtext('description')\n\n for scheme, value in aliases.items():\n try:\n book.aliases.get_or_create(scheme=scheme, value=value)\n except IntegrityError as e:\n pass\n\n book.save()\n\n", "sub_path": "storage/tools.py", "file_name": "tools.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "storage.models.Book.objects.get_or_create", "line_number": 18, "usage_type": "call"}, {"api_name": "storage.models.Book.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "storage.models.Book", "line_number": 18, "usage_type": "name"}, {"api_name": "storage.models.Book.objects.get_or_create", "line_number": 35, "usage_type": "call"}, {"api_name": "storage.models.Book.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "storage.models.Book", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.utils.IntegrityError", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "321922607", "text": "import boto3\nimport os\nimport sys\n\ndef main():\n local_directory, bucket_name = sys.argv[1:] \n upload_directory(local_directory, bucket_name) \n \n\ndef upload_directory(path,bucketname):\n s3 = boto3.client('s3')\n for root,dirs,files in os.walk(path):\n for filename in files:\n if filename.endswith('.html') or filename.endswith('.htm'):\n\n extra_args={'ContentType': \"text/html\", 'ACL': \"public-read\"} \n s3.upload_file(os.path.join(root,filename),bucketname,filename, ExtraArgs=extra_args)\n else:\n\n s3.upload_file(os.path.join(root,filename),bucketname,filename)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "sync_directory_to_s3.py", "file_name": "sync_directory_to_s3.py", "file_ext": "py", "file_size_in_byte": 702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 11, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}]} +{"seq_id": "612072735", "text": "\"\"\"\nA celerybeat scheduler with a Mongo backend.\n\"\"\"\n\nimport logging\n\nfrom celery.beat import Scheduler\nfrom celery import current_app\nimport pymongo\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\n\nclass MongoScheduler(Scheduler):\n def __init__(self, *args, **kwargs):\n\n # XXX It's ugly to use something called 'filename' as a URI. Make an\n # upstream ticket for a nicer way of passing config into a custom\n # scheduler backend.\n self.uri = current_app.conf['CELERYBEAT_SCHEDULE_FILENAME']\n logging.debug('MongoScheduler connecting to %s' % self.uri)\n parsed = pymongo.uri_parser.parse_uri(self.uri)\n conn = pymongo.Connection(*parsed['nodelist'][0])\n db = conn[parsed['database']]\n self.collection = db[parsed['collection']]\n\n # No two documents may have the same 'key'\n self.collection.ensure_index('key', unique=True)\n\n # If there's not already an 'entries' key, create one.\n entries = self.collection.find_one({'key': 'entries'})\n if not entries:\n self.entries = {}\n self.sync()\n else:\n self.entries = pickle.loads(str(entries['entries']))\n super(MongoScheduler, self).__init__(*args, **kwargs)\n\n def setup_schedule(self):\n self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)\n self.install_default_entries(self.schedule)\n self.sync()\n\n def get_schedule(self):\n return self.entries\n\n def set_schedule(self, schedule):\n self.entries = schedule\n self.sync()\n schedule = property(get_schedule, set_schedule)\n\n def sync(self):\n self.collection.update({'key': 'entries'},\n {'key': 'entries', 'entries':\n pickle.dumps(self.entries)},\n upsert=True)\n\n def close(self):\n self.sync()\n self.collection.database.connection.close()\n\n @property\n def info(self):\n return self.uri\n", "sub_path": "celery_schedulers/mongo_scheduler.py", "file_name": "mongo_scheduler.py", "file_ext": "py", "file_size_in_byte": 2032, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "celery.beat.Scheduler", "line_number": 17, "usage_type": "name"}, {"api_name": "celery.current_app.conf", "line_number": 23, "usage_type": "attribute"}, {"api_name": "celery.current_app", "line_number": 23, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 24, "usage_type": "call"}, {"api_name": "pymongo.uri_parser.parse_uri", "line_number": 25, "usage_type": "call"}, {"api_name": "pymongo.uri_parser", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pymongo.Connection", "line_number": 26, "usage_type": "call"}, {"api_name": "pickle.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.dumps", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "337919540", "text": "import json\n\n\ndef resp(app, data=None, code=200, headers=None):\n if not headers:\n headers = {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = 'application/json'\n data = json.dumps(data)\n return app.make_response((data, code, headers))\n\n\ndef save_history(history, target='/opt/stackbrew/history.json'):\n save = []\n for k in history.iterkeys():\n url, ref, dfile = k # unpack\n save.append({\n 'url': url,\n 'ref': ref,\n 'dfile': dfile,\n 'img': history[k]\n })\n\n with open(target, 'w') as f:\n f.write(json.dumps(save))\n\n\ndef load_history(target='/opt/stackbrew/history.json'):\n history = {}\n try:\n with open(target, 'r') as f:\n savefile = json.loads(f.read())\n for item in savefile:\n history[(item['url'], item['ref'], item['dfile'])] = item['img']\n return history\n except IOError:\n return {}\n", "sub_path": "stackbrew/lib/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 983, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "json.dumps", "line_number": 9, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 25, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "302674072", "text": "from .Model import *\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score\n\nfrom matplotlib.colors import Normalize\n\nclass MidpointNormalize(Normalize):\n\tdef __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n\t\tself.midpoint = midpoint\n\t\tNormalize.__init__(self, vmin, vmax, clip)\n\n\tdef __call__(self, value, clip=None):\n\t\t# I'm ignoring masked values and all kinds of edge cases to make a\n\t\t# simple example...\n\t\tx, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n\t\treturn np.ma.masked_array(np.interp(value, x, y))\n\nclass Interpret:\n\tdef __init__(self, X_test, Y_test, train_avg, train_std, model_loc, tag, minibatch_size, threshold_sig = 0.5):\n\t\tself.X_test = X_test\n\t\tself.Y_test = Y_test\n\t\tself.m = self.X_test.shape[0]\n\t\tself.barcode_num = self.X_test.shape[1]\n\t\tself.train_avg = train_avg\n\t\tself.train_std = train_std\n\t\tself.threshold_sig = threshold_sig\n\t\tself.tag = tag\n\n\t\tself.model = RestoreModelKeras(self.tag, minibatch_size, model_loc) \n\n\t\tself.filter_X_test = None\n\t\tself.filter_Y_test = None\n\t\tself.results = dict()\n\n\tdef plot_avg_on_off(self):\n\t\tY_test = self.Y_test.T\n\t\tposidx = np.nonzero(Y_test)\n\t\tposidx = posidx[0]\n\t\tnegidx = np.nonzero(1-Y_test)\n\t\tnegidx = negidx[0]\n\t\t\n\t\tavg_pos = np.sum(self.X_test[:,posidx],axis=1) / len(posidx)\n\t\tavg_neg = np.sum(self.X_test[:,negidx],axis=1) / len(negidx)\n\n\t\tavg_pos = avg_pos.reshape(self.barcode_num,self.barcode_num)\n\t\tavg_neg = avg_neg.reshape(self.barcode_num,self.barcode_num)\n\n\t\n\t\tf = plt.figure()\n\t\tnorm = MidpointNormalize(midpoint = 0)\n\t\tplt.imshow(avg_pos, cmap = 'seismic', norm=norm)\n\t\tplt.colorbar()\n\t\tf.savefig(\"Pos_Avg_Y_test.pdf\", bbox_inches = 'tight')\n\n\t\tf = plt.figure()\n\t\tnorm = MidpointNormalize(midpoint = 0)\n\t\tplt.imshow(avg_neg, cmap = 'seismic', norm=norm)\n\t\tplt.colorbar()\n\t\tf.savefig(\"Neg_Avg_Y_test.pdf\", bbox_inches = 'tight')\n\n\tdef filter_data(self):\n\t\tsigmoid_Z, auc = self.model.run(self.X_test, self.Y_test)\n\t\tsigmoid_Z = sigmoid_Z.T\n\t\tY_test = self.Y_test.T\n\n\t\tpos_idxs = np.nonzero((np.logical_and((sigmoid_Z > self.threshold_sig),Y_test > 0) > 0))\n\n\t\treturn pos_idxs[0], auc\n\n\tdef run_test(self):\n\t\tsigmoid_Z, auc = self.model.run(self.X_test, self.Y_test)\n\n\t\ty_pred = np.round(sigmoid_Z)\n\n\t\tprecision = precision_score(self.Y_test, y_pred)\n\t\trecall = recall_score(self.Y_test, y_pred)\n\t\taccuracy = accuracy_score(self.Y_test, y_pred)\n\t\tf1 = f1_score(self.Y_test, y_pred)\n\n\t\tprint(\"scores: auc roc [\" + str(auc) + \"] precision [\" + str(precision) + \"] recall [\" + str(recall) + \"] accuracy [\" + str(accuracy) + \"] f1 [\" + str(f1) + \"]\")\n\n\t\treturn y_pred, sigmoid_Z\n\n\tdef run(self):\n\t\t#self.plot_avg_on_off()\n\t\tfrom matplotlib import collections as mc\n\t\tpos_idxs = np.arange(self.m)\n\n\t\tnum_pos = len(pos_idxs)\n\t\tpos_X_test = self.X_test[pos_idxs,:,:,:]\n\t\t\n\t\t#pos_X_test = pos_X_test.reshape(self.barcode_num,self.barcode_num,num_pos)\n\t\tpos_Y_test = self.Y_test[pos_idxs,:]\n\n\t\tsigmoid_Z, trashauc = self.model.run(pos_X_test, pos_Y_test)\n\t\trefAuc = roc_auc_score(pos_Y_test, sigmoid_Z)\n\t\n\t\t#fig =plt.figure()\n\t\tbwidths = [1,5,10,20,30]\n\t\t#colors = ['b', 'g', 'r', 'c', 'm']\n\n\t\tfp = open(self.tag + \"_blanking_values.txt\", \"w\")\n\t\tfp.write(str(refAuc) + \"\\n\")\n\t\tfor w in range(len(bwidths)):\n\n\t\t\tbwidth = bwidths[w]\n\t\t\t#blanked_scores = list()\n\t\t\t#midpoints = list()\n\n\t\t\t#lineCollec = list()\n\n\t\t\t#xvals = np.arange(52)\n\t\t\t#yvals = np.zeros((52,1))\n\t\t\t#ycounts = np.zeros((52,1))\n\t\t\tfor b in range(0,self.barcode_num - (bwidth-1)):\n\t\t\t\tcurr_pos_X_test = pos_X_test.copy()\n\t\t\t\tcurr_pos_X_test[:,b:b+bwidth,:,:] = 0\n\t\t\t\tcurr_pos_X_test[:,:,b:b+bwidth,:] = 0\n\t\t\t\t#curr_pos_X_test = curr_pos_X_test.reshape(-1,num_pos)\n\t\t\t\n\t\t\t\tsigmoid_Z, auc = self.model.run(curr_pos_X_test, pos_Y_test)\n\t\t\t\n\t\t\t\t#blanked_scores.append(auc)\n\t\t\t\t#midpoints.append(b+bwidth/2)\n\t\t\t\t#yvals[b:b+bwidth,:] += auc\n\t\t\t\t#ycounts[b:b+bwidth,:] += 1\n\n\t\t\t\t#lineCollec.append([(b,auc), (b+bwidth,auc)])\n\n\t\t\t\twritestr = str(bwidth) + \"\\t\" + str(b) + \"\\t\" + str(auc) + \"\\t\" + str(b+bwidth) + \"\\t\" + str(auc) + \"\\n\"\n\t\t\t\tfp.write(writestr)\t\n\t\t\t#print(blanked_scores)\n\t\t\t#print(len(blanked_scores))\n\t\t\n\t\t\t#yvals = yvals / ycounts\n\t\t\t#plt.plot(xvals,yvals, colors[w], label='Blank %d barcodes' % bwidth)\n\n\t\t\t#lc = mc.LineCollection(lineCollec, colors = colors[w], linewidths=2)\n\t\t\t#plt.gca().add_collection(lc)\n\n\t\tfp.close()\n\n\t\t#xvals = np.arange(52)\n\t\t#yvals = np.repeat(refAuc,52)\n\t\t#plt.plot(xvals,yvals, 'gray', label='Reference AUC')\n\n\t\t#plt.xlim([0,52])\n\t\t#plt.ylim([.45,0.7])\n\t\t#plt.xlabel('Barcodes')\n\t\t#plt.ylabel('AUC (ROC)')\n\t\t#plt.legend(loc='lower right')\n\t\t#plt.title(\"AUC (ROC) after blanking barcodes (sliding window)\")\n\n\t\t#fig.savefig(self.tag + \"_AbdA_Blank_Barcodes.pdf\")\n", "sub_path": "CNN/KFoldBlanking/KfoldXvalAbdB_DevBlank/src/Interpret.py", "file_name": "Interpret.py", "file_ext": "py", "file_size_in_byte": 4772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 11, "usage_type": "name"}, {"api_name": "matplotlib.colors.Normalize.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 81, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "288396181", "text": "import tweepy\nimport time\nimport sys\nimport threading\nimport sentiment_analysis as sa\nfrom tweets_tools import get_tweet_auth,get_couchdb,get_tweet_app_auth\n\"\"\"\n @author: Team 12\n Declaration:This program is written by team member Bing Xie and Bin Li\n\"\"\"\n\n\ntweet_login_stream = sys.argv[1] #auth_streaming\ntweet_login_rest_steam =sys.argv[2] #auth_stream_rest\ntweet_login_followers = sys.argv[3] #auth_followers\ntweet_login_friends = sys.argv[4] #auth_friends\ntweet_login_rest = sys.argv[5] #auth_rest\ncouch_login = sys.argv[6] #couch_login\ncouch_server = sys.argv[7] # http://115.146.93.83:5984\ndatabase = sys.argv[8] #databse name sydeny\n#user_databse = sys.argv[9] #userlist\nlocationsteam = [float(x) for x in sys.argv[9].split(',')] #streaming \"\"\nlocationrest =sys.argv[10]\ngeocode =sys.argv[11]\ndef main():\n \n global classifier\n classifier = sa.ta_classifier()#general sentimental classifier\n thread1 = harvestThread(1, \"Thread-steaming\")# Streaming haverster run first and manullay create View in Couchdb\n thread1.start()\n time.sleep(30)\n\n thread2 = harvestThread(2, \"Thread-rest-streaming\")#rest harvester using userid for time_lineslines\n thread3 = harvestThread(3, \"Thread-followers\")#rest harvester usering followerid for time_lines\n #thread4 = harvestThread(4, \"Thread-friends\")#\n thread5 = harvestThread(5, \"Thread-rest\")#rest harvester using city geocode\n\n thread2.start()\n thread3.start()\n #thread4.start()\n thread5.start()\n\n print(\"Exiting Main Thread\")\n\n# thread for each harvester, 5 sub thread is designed for crawler system\nclass harvestThread(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n def run(self):\n # Steaming\n if self.threadID == 1:\n print(\"Starting \" + self.name)\n db_data = get_couchdb(couch_login, couch_server, database, True)\n\n #db_users = get_couchdb(couch_login, couch_server, user_databse, True)\n auth_steam = get_tweet_auth(tweet_login_stream)\n api_steam = tweepy.API(auth_steam)\n sapi = tweepy.streaming.Stream(auth=auth_steam, listener=CustomStreamListener(api_steam, db_data))\n\n while True:\n try:\n sapi.filter(locations=locationsteam)\n except:\n pass\n elif self.threadID == 2:\n print(\"Starting \" + self.name)\n db_users = get_couchdb(couch_login, couch_server, database)\n auth_rest_steam = get_tweet_auth(tweet_login_rest_steam)\n api_rest_stream = tweepy.API(auth_rest_steam)\n temp_rowskey = []\n try:\n while True:\n cnt=0\n rows=db_users.view('_design/_view/_view/by_userid', group=True).rows\n rowskey=[]\n for ele in rows:\n rowskey.append(ele.key)\n for ele in temp_rowskey:\n rowskey.remove(ele)\n for ele in rowskey:\n user_id = ele\n if cnt !=0:\n process_user(api_rest_stream, user_id, db_users)\n print('Processed user_id %s' % user_id)\n temp_rowskey = rowskey\n cnt=cnt+1\n\n except KeyboardInterrupt:\n print('\\nKeyboard Interrupt\\nShutting down the harvester')\n\n elif self.threadID == 3:\n print(\"Starting \" + self.name)\n db_users = get_couchdb(couch_login, couch_server, database)\n auth_followers = get_tweet_auth(tweet_login_followers)\n api_followers = tweepy.API(auth_followers)\n temp_rowskey = []\n try:\n while True:\n rows = db_users.view('_design/_view/_view/by_userid', group=True).rows\n rowskey = []\n for ele in rows:\n rowskey.append(ele.key)\n for ele in temp_rowskey:\n rowskey.remove(ele)\n for ele in rowskey:\n user_id = ele\n\n followers_id = get_followers(user_id, api_followers)\n\n for follower_id in followers_id:\n process_user(api_followers, follower_id, db_users)\n\n print('Processed followers_id %s' % follower_id)\n temp_rowskey = rowskey\n except KeyboardInterrupt:\n print('\\nKeyboard Interrupt\\nShutting down the harvester')\n\n elif self.threadID ==4:\n print(\"Starting \" + self.name)\n db_users = get_couchdb(couch_login, couch_server, database)\n auth_friends = get_tweet_auth(tweet_login_friends)\n api_friends = tweepy.API(auth_friends)\n temp_rowskey = []\n try:\n while True:\n rows = db_users.view('_design/_view/_view/by_userid', group=True).rows\n rowskey = []\n for ele in rows:\n rowskey.append(ele.key)\n for ele in temp_rowskey:\n rowskey.remove(ele)\n for ele in rowskey:\n user_id = ele\n friends_id = get_friends(user_id, api_friends)\n # print(friends_id)\n\n for friend_id in friends_id:\n process_user(api_friends, friend_id, db_users)\n\n print('Processed friends_id %s' % friend_id)\n temp_rowskey = rowskey\n except KeyboardInterrupt:\n print('\\nKeyboard Interrupt\\nShutting down the harvester')\n\n elif self.threadID ==5:\n print(\"Starting \" + self.name)\n db_users = get_couchdb(couch_login, couch_server, database)\n #\n # auth_rest = get_tweet_auth(tweet_login_rest)\n # api_rest = tweepy.API(auth_rest)\n # api_rest.wait_on_rate_limit = True\n # api_rest.wait_on_rate_limit_notify = True\n auth_rest = get_tweet_app_auth(tweet_login_rest)\n api_rest=tweepy.API(auth_rest,wait_on_rate_limit = True,wait_on_rate_limit_notify = True)\n\n try:\n while True:\n try:\n new_tweets = api_rest.search(geocode=geocode, count=100)\n add_tweets_to_db(new_tweets, db_users)\n except tweepy.TweepError as e:\n # Just exit if any error\n print(\"some error : \" + str(e))\n time.sleep(10)\n continue\n except KeyboardInterrupt:\n print('\\nKeyboard Interrupt\\nShutting down the harvester')\n\n\ndef get_friends(user_id,api):\n friends = []\n page_count = 0\n try:\n for friend in tweepy.Cursor(api.friends_ids, id=user_id, count=200).pages():\n page_count += 1\n friends.extend(friend)\n except:\n pass\n return friends\n\ndef get_followers(user_id,api):\n followers = []\n page_count = 0\n try:\n for follower in tweepy.Cursor(api.followers_ids, id=user_id, count=200).pages():\n page_count += 1\n followers.extend(follower)\n except:\n pass\n return followers\n\ndef process_user(api, user_id, db):\n try:\n user_statuses = api.user_timeline(id=user_id, count=200)\n add_tweets_to_db(user_statuses, db)\n except:\n pass\n\ndef add_tweets_to_db(statuses, db):\n for status in statuses:\n try:\n if status.place.full_name == locationrest:\n if status.id_str not in db:\n tweet = status._json\n tweet['_id'] = status.id_str\n try:\n label = sa.predict(classifier, tweet['text'])\n tweet['label'] = label\n db.save(tweet)\n print('Tweet added to CouchDB: ')\n except :\n pass\n except :\n pass\n\nclass CustomStreamListener(tweepy.StreamListener):\n def __init__(self, api, db):\n self.db = db\n #self.db_user =db_user\n self.api = api\n super(tweepy.StreamListener, self).__init__()\n\n def on_status(self, status):\n tweet = status._json\n print(tweet)\n tweet['_id'] = status.id_str\n\n if tweet['_id'] not in self.db:\n try:\n label = sa.predict(classifier, tweet['text'])\n tweet['label'] = label\n self.db.save(tweet)\n print(\"Tweets saved by streaming\")\n #self.db_user.save(tweet.user.id)#add id\n except:\n pass\n\n def on_error(self, status_code):\n print('Encountered error with status code:', status_code, file=sys.stderr)\n return True # Don't kill the stream\n\n def on_timeout(self):\n print('Timeout...', file=sys.stderr)\n return True # Don't kill the stream\n\n\n# Run the Main Method\nif __name__ == '__main__':\n main()\n", "sub_path": "Tweets Harvest/Tweets Harvest/twitter_harvest_multi_thread.py", "file_name": "twitter_harvest_multi_thread.py", "file_ext": "py", "file_size_in_byte": 9361, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "sentiment_analysis.ta_classifier", "line_number": 28, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 46, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 48, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 48, "usage_type": "attribute"}, {"api_name": "tweets_tools.get_couchdb", "line_number": 55, "usage_type": "call"}, {"api_name": "tweets_tools.get_tweet_auth", "line_number": 58, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 59, "usage_type": "call"}, {"api_name": "tweepy.streaming.Stream", "line_number": 60, "usage_type": "call"}, {"api_name": "tweepy.streaming", "line_number": 60, "usage_type": "attribute"}, {"api_name": "tweets_tools.get_couchdb", "line_number": 69, "usage_type": "call"}, {"api_name": "tweets_tools.get_tweet_auth", "line_number": 70, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 71, "usage_type": "call"}, {"api_name": "tweets_tools.get_couchdb", "line_number": 95, "usage_type": "call"}, {"api_name": "tweets_tools.get_tweet_auth", "line_number": 96, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 97, "usage_type": "call"}, {"api_name": "tweets_tools.get_couchdb", "line_number": 122, "usage_type": "call"}, {"api_name": "tweets_tools.get_tweet_auth", "line_number": 123, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 124, "usage_type": "call"}, {"api_name": "tweets_tools.get_couchdb", "line_number": 149, "usage_type": "call"}, {"api_name": "tweets_tools.get_tweet_app_auth", "line_number": 155, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 156, "usage_type": "call"}, {"api_name": "tweepy.TweepError", "line_number": 163, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 166, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 176, "usage_type": "call"}, {"api_name": "tweepy.Cursor", "line_number": 187, "usage_type": "call"}, {"api_name": "sentiment_analysis.predict", "line_number": 209, "usage_type": "call"}, {"api_name": "tweepy.StreamListener", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tweepy.StreamListener", "line_number": 223, "usage_type": "attribute"}, {"api_name": "sentiment_analysis.predict", "line_number": 232, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 241, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 245, "usage_type": "attribute"}]} +{"seq_id": "271465564", "text": "from rest_framework import serializers\nfrom weather.models.city_model import City\nfrom weather.models.forecast_model import Forecast\nfrom .city_serializer import CitySerializer\n\n\nclass ForecastSerializer(serializers.ModelSerializer):\n city = CitySerializer()\n\n class Meta:\n model = Forecast\n fields = ('city', 'weather_main', 'weather_description', 'temp', 'date_time')\n\n def create(self, validated_data):\n city_data = validated_data.pop(\"city\")\n city, _ = City.objects.get_or_create(city_name=city_data['city_name'],\n cord_lon=city_data['cord_lon'],\n cord_lat=city_data['cord_lat'],)\n\n weather, created = Forecast.objects.get_or_create(city=city,\n date_time=validated_data['date_time'],\n defaults={\n 'weather_main': validated_data['weather_main'],\n 'weather_description':\n validated_data['weather_description'],\n 'temp': validated_data['temp']})\n\n return weather\n", "sub_path": "weather/serializers/forecast_serializer.py", "file_name": "forecast_serializer.py", "file_ext": "py", "file_size_in_byte": 1365, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 7, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "city_serializer.CitySerializer", "line_number": 8, "usage_type": "call"}, {"api_name": "weather.models.forecast_model.Forecast", "line_number": 11, "usage_type": "name"}, {"api_name": "weather.models.city_model.City.objects.get_or_create", "line_number": 16, "usage_type": "call"}, {"api_name": "weather.models.city_model.City.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "weather.models.city_model.City", "line_number": 16, "usage_type": "name"}, {"api_name": "weather.models.city_model", "line_number": 20, "usage_type": "name"}, {"api_name": "weather.models.forecast_model.Forecast.objects.get_or_create", "line_number": 20, "usage_type": "call"}, {"api_name": "weather.models.forecast_model.Forecast.objects", "line_number": 20, "usage_type": "attribute"}, {"api_name": "weather.models.forecast_model.Forecast", "line_number": 20, "usage_type": "name"}, {"api_name": "weather.models.city_model", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "466670739", "text": "import pymongo\n\naid = '401' # Must be unique\nname = \"Mongo db connector\"\ndesc = \"Connector for the mongo database .\"\n\n#Modify with your parameters\nHOST = \"localhost\"\nPORT = 27017\nDB = \"test\"\nCOLLECTION = \"test\"\n\ndef queryDB(query):\n con = pymongo.MongoClient(HOST, 27017)\n db = con[DB]\n cur = db[COLLECTION].find(query)\n return cur\n\ndef launch(query):\n cur = queryDB(query)\n return cur\n\n", "sub_path": "db_connectors/mongo/mongo.py", "file_name": "mongo.py", "file_ext": "py", "file_size_in_byte": 415, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pymongo.MongoClient", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "21886414", "text": "import sys, argparse\n\nclass Options(object):\n\n opts = ''\n\n def __init__(self, argv):\n \n parser = argparse.ArgumentParser(description='Sialia Command Line Twitter Client')\n parser.add_argument('-m', type=str, default='timeline', help='which stream do you want to open? timeline, mentions, dm, search')\n parser.add_argument('-q', type=str, default='', help='query for search streams')\n \n args = parser.parse_args()\n \n mode = args.m\n \n opts = {}\n \n if (mode != 'timeline' and mode != 'mentions' and mode != 'dm' and mode != 'search'):\n opts['mode'] = 'timeline'\n else: \n opts['mode'] = mode\n \n self.__class__.opts = opts\n", "sub_path": "rcwd/Options.py", "file_name": "Options.py", "file_ext": "py", "file_size_in_byte": 755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "519734492", "text": "from django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns('',\n # Sites pages\n url(r'^tiles/$', views.sites_snippet_page,\n kwargs=dict(template=\"tiles.html\",\n repository_url=\"https://svn.mozilla.org/projects/l10n-misc/trunk/firefoxtiles/en-US/\",\n default_filename=\"tiles.lang\"),\n name='sites-tiles'),\n url(r'^snippets/$', views.sites_snippet_page,\n kwargs=dict(template=\"snippets.html\",\n repository_url=\"https://svn.mozilla.org/projects/l10n-misc/trunk/snippets/en-US/\",\n default_filename=\"jan2014.lang\"),\n name='sites-snippets'),\n url(r'^updater/$', views.sites_snippet_page,\n kwargs=dict(template=\"updater.html\",\n repository_url=\"https://svn.mozilla.org/projects/l10n-misc/trunk/firefoxupdater/en-US/\",\n default_filename=\"updater.lang\"),\n name='sites-updater'),\n)", "sub_path": "pontoon/sites/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 6, "usage_type": "call"}, {"api_name": "views.sites_snippet_page", "line_number": 6, "usage_type": "attribute"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "views.sites_snippet_page", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.sites_snippet_page", "line_number": 16, "usage_type": "attribute"}]} +{"seq_id": "531338128", "text": "import os\nimport logging\nimport urllib\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.db import djangoforms\nfrom google.appengine.ext import db\n\nfrom controllers.home import BASE_PATH, PROJECT_PATH\nfrom models.hostinfo import Owner, Venue, Address\nfrom controllers.utils import get_authentication_urls\n\nlogger = logging.getLogger('OwnerHandler')\n\n\nclass OwnerForm(djangoforms.ModelForm):\n #def __init__(self, *args, **kw):\n # super(djangoforms.ModelForm, self).__init__(*args, **kw)\n # self.fields.keyOrder = [\n # 'referenceNumber',\n # 'surname', 'firstNames', 'emailAddress', 'languages', \n\n class Meta:\n model = Owner\n exclude = ['created', 'creator']\n\n#from acl import Acl\n\nclass ViewOwner(webapp.RequestHandler):\n\n def get(self):\n #acl = Acl(area='ownerinfo',\n # user=users.get_current_user())\n #assert acl.has_access(topic='ViewOwner', name='get') is True\n auth_url, auth_url_text = get_authentication_urls(self.request.uri)\n filepath = os.path.join(PROJECT_PATH, \n 'templates', 'services', 'viewowner.html')\n ownerkey = self.request.get('ownerkey')\n owner = Owner.get(ownerkey)\n form = OwnerForm(instance=owner)\n owner_values = []\n for field in form.fields.keyOrder:\n for value in owner.properties().values():\n if value.name == field:\n name = value.name\n if value.verbose_name:\n name = value.verbose_name\n val = value.get_value_for_form(owner)\n owner_values.append((name, val))\n # venues = Venue.all().filter('owner = ', owner).order('name')\n venues = owner.owner_venues\n addresses = owner.entity_addresses\n phonenumbers = owner.entity_phonenumbers\n emails = owner.entity_emails\n self.response.out.write(template.render(filepath, \n {\n 'base_path':BASE_PATH,\n 'owner':owner,\n 'owner_values':owner_values,\n 'venues':venues,\n 'addresses':addresses,\n 'phonenumbers':phonenumbers,\n 'emails':emails,\n 'user':users.get_current_user(),\n 'is_admin_user':users.is_current_user_admin(),\n 'auth_url':auth_url,\n 'auth_url_text':auth_url_text\n }))\n\n def post(self):\n ownerkey = self.request.get('ownerkey')\n owner = Owner.get(ownerkey)\n venuekey = self.request.get('venuekey', None)\n params = {}\n if venuekey:\n venue = Venue.get(venuekey)\n action = self.request.get('action', None)\n if action == 'Open':\n is_valid, err = venue.validate()\n if not is_valid:\n params['error'] = err\n params['came_from'] = \\\n '/services/owner/viewowner?ownerkey=%s' % ownerkey\n params = urllib.urlencode(params)\n url = '/home/showerror?%s' % params\n self.redirect(url)\n return\n venue.state = 'Open'\n venue.put()\n elif action == 'Close':\n venue.state = 'Closed'\n venue.put()\n else:\n logging.error('Open Venue on Owner receive incorrect action %s',\n action)\n self.redirect('/services/owner/viewowner?ownerkey=%s' % ownerkey)\n\n\nclass CaptureOwner(webapp.RequestHandler):\n\n def get(self):\n auth_url, auth_url_text = get_authentication_urls(self.request.uri)\n filepath = os.path.join(PROJECT_PATH, \n 'templates', 'services', 'captureowner.html')\n self.response.out.write(template.render(filepath, \n {\n 'base_path':BASE_PATH,\n 'form':OwnerForm(),\n 'user':users.get_current_user(),\n 'auth_url':auth_url,\n 'auth_url_text':auth_url_text\n }))\n\n def post(self):\n data = OwnerForm(data=self.request.POST)\n if data.is_valid():\n entity = data.save(commit=False)\n entity.creator = users.get_current_user()\n entity.put()\n self.redirect('/services/hostinfo')\n else:\n filepath = os.path.join(PROJECT_PATH, \n 'templates', 'services', 'captureowner.html')\n self.response.out.write(template.render(filepath, {'base_path':BASE_PATH,\n 'form':data\n }))\n\n\nclass EditOwner(webapp.RequestHandler):\n\n def get(self):\n auth_url, auth_url_text = get_authentication_urls(self.request.uri)\n ownerkey = self.request.get('ownerkey')\n owner = Owner.get(ownerkey)\n filepath = os.path.join(PROJECT_PATH, \n 'templates', 'services', 'editowner.html')\n self.response.out.write(template.render(filepath, \n {\n 'base_path':BASE_PATH,\n 'form':OwnerForm(instance=owner),\n 'ownerkey':ownerkey,\n 'user':users.get_current_user(),\n 'auth_url':auth_url,\n 'auth_url_text':auth_url_text\n }))\n\n def post(self):\n ownerkey = self.request.get('ownerkey')\n owner = Owner.get(ownerkey)\n data = OwnerForm(data=self.request.POST, instance=owner)\n if data.is_valid():\n entity = data.save(commit=False)\n #Change creator to last modified\n entity.creator = users.get_current_user()\n entity.put()\n self.redirect('/services/hostinfo')\n else:\n filepath = os.path.join(PROJECT_PATH, \n 'templates', 'services', 'editowner.html')\n self.response.out.write(template.render(filepath, \n {\n 'base_path':BASE_PATH,\n 'form':data,\n 'ownerkey':ownerkey\n }))\n\n\nclass DeleteOwner(webapp.RequestHandler):\n\n def get(self):\n ownerkey = self.request.get('ownerkey')\n owner = Owner.get(ownerkey)\n if owner:\n #recursive delete\n owner.rdelete()\n\n self.redirect('/services/hostinfo')\n\n", "sub_path": "site/controllers/owner.py", "file_name": "owner.py", "file_ext": "py", "file_size_in_byte": 6885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 14, "usage_type": "call"}, {"api_name": "google.appengine.ext.db.djangoforms.ModelForm", "line_number": 17, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.db.djangoforms", "line_number": 17, "usage_type": "name"}, {"api_name": "models.hostinfo.Owner", "line_number": 25, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 30, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 30, "usage_type": "name"}, {"api_name": "controllers.utils.get_authentication_urls", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "controllers.home.PROJECT_PATH", "line_number": 37, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.hostinfo.Owner.get", "line_number": 40, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner", "line_number": 40, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 56, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 56, "usage_type": "name"}, {"api_name": "controllers.home.BASE_PATH", "line_number": 58, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 65, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 65, "usage_type": "name"}, {"api_name": "google.appengine.api.users.is_current_user_admin", "line_number": 66, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 66, "usage_type": "name"}, {"api_name": "models.hostinfo.Owner.get", "line_number": 73, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner", "line_number": 73, "usage_type": "name"}, {"api_name": "models.hostinfo.Venue.get", "line_number": 77, "usage_type": "call"}, {"api_name": "models.hostinfo.Venue", "line_number": 77, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 95, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 100, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 100, "usage_type": "name"}, {"api_name": "controllers.utils.get_authentication_urls", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 104, "usage_type": "call"}, {"api_name": "controllers.home.PROJECT_PATH", "line_number": 104, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 104, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 106, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 106, "usage_type": "name"}, {"api_name": "controllers.home.BASE_PATH", "line_number": 108, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 110, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 110, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 119, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 119, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 123, "usage_type": "call"}, {"api_name": "controllers.home.PROJECT_PATH", "line_number": 123, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 123, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 125, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 125, "usage_type": "name"}, {"api_name": "controllers.home.BASE_PATH", "line_number": 125, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 130, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 130, "usage_type": "name"}, {"api_name": "controllers.utils.get_authentication_urls", "line_number": 133, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner.get", "line_number": 135, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner", "line_number": 135, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "controllers.home.PROJECT_PATH", "line_number": 136, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 138, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 138, "usage_type": "name"}, {"api_name": "controllers.home.BASE_PATH", "line_number": 140, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 143, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 143, "usage_type": "name"}, {"api_name": "models.hostinfo.Owner.get", "line_number": 150, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner", "line_number": 150, "usage_type": "name"}, {"api_name": "google.appengine.api.users.get_current_user", "line_number": 155, "usage_type": "call"}, {"api_name": "google.appengine.api.users", "line_number": 155, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 159, "usage_type": "call"}, {"api_name": "controllers.home.PROJECT_PATH", "line_number": 159, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 159, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp.template.render", "line_number": 161, "usage_type": "call"}, {"api_name": "google.appengine.ext.webapp.template", "line_number": 161, "usage_type": "name"}, {"api_name": "controllers.home.BASE_PATH", "line_number": 163, "usage_type": "name"}, {"api_name": "google.appengine.ext.webapp.RequestHandler", "line_number": 169, "usage_type": "attribute"}, {"api_name": "google.appengine.ext.webapp", "line_number": 169, "usage_type": "name"}, {"api_name": "models.hostinfo.Owner.get", "line_number": 173, "usage_type": "call"}, {"api_name": "models.hostinfo.Owner", "line_number": 173, "usage_type": "name"}]} +{"seq_id": "554874316", "text": "\"\"\"empty message\n\nRevision ID: 3c47e7865cdd\nRevises: f9af8b43a437\nCreate Date: 2020-12-22 23:50:38.783851\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3c47e7865cdd'\ndown_revision = 'f9af8b43a437'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('artists', sa.Column('website', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('artists', 'website')\n # ### end Alembic commands ###\n", "sub_path": "FSND/Udacity Repo Files/projects/01_fyyur/starter_code/migrations_old/versions/3c47e7865cdd_.py", "file_name": "3c47e7865cdd_.py", "file_ext": "py", "file_size_in_byte": 655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "alembic.op.add_column", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op.drop_column", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "351825727", "text": "from Time_Matters_SingleDoc.InvertedIndex import kw_ext\nfrom Time_Matters_SingleDoc.GetDateScores import dt_frames\nfrom langdetect import detect\n\n\ndef Time_Matters_SingleDoc(txt, temporal_tagger=[], time_matters_parameters=[], score_type='single', debug_mode=False):\n try:\n yake_lang = detect(txt)\n except:\n yake_lang = 'en'\n tt_name, language, document_type, document_creation_time, date_granularity, \\\n num_of_keywords, N, TH, n_contextual_window = verify_input_data(temporal_tagger, time_matters_parameters)\n\n inverted_index, words_array, dates_array, sentence_array, date_dictionary, new_text = kw_ext(yake_lang,language, txt, num_of_keywords, document_type,\n document_creation_time, date_granularity, tt_name)\n\n relevant_dates, DiceMatrix = dt_frames(inverted_index, words_array, dates_array, n_contextual_window,\n TH, N, score_type)\n\n dates_array_score = []\n for k in range(len(relevant_dates)):\n dates_array_score.append((relevant_dates[k][0], relevant_dates[k][1]))\n if temporal_tagger[0] == 'py_heideltime':\n final_score_output = get_final_output(inverted_index, dates_array_score, debug_mode, date_dictionary)\n else:\n final_score_output = get_final_output_rule_based(inverted_index, dates_array_score)\n\n if score_type == 'multiple' and debug_mode:\n n_txt = text_refactor(new_text, final_score_output, temporal_tagger)\n return final_score_output, dates_array, words_array, inverted_index, DiceMatrix, n_txt\n elif score_type == 'multiple' and not debug_mode:\n return final_score_output, sentence_array\n elif score_type == 'single' and debug_mode:\n n_txt = text_refactor(new_text, final_score_output, temporal_tagger)\n return final_score_output, dates_array, words_array, inverted_index, DiceMatrix, n_txt\n elif score_type == 'single' and not debug_mode:\n return final_score_output\n else:\n print('You must select a valid type of score.\\n'\n 'options:\\n'\n ' single;\\n'\n ' multiple;')\n return []\n\n\ndef verify_input_data(temporal_tagger, time_matters_parameters):\n\n tt_name = 'py_heideltime'\n language = 'English'\n document_type = 'news'\n document_creation_time = ''\n date_granularity = ''\n # Verify the values for temporal Tagger parameters.\n try:\n tt_name = temporal_tagger[0].lower()\n if tt_name == 'py_heideltime':\n language = temporal_tagger[1]\n date_granularity = temporal_tagger[2].lower()\n document_type = temporal_tagger[3]\n document_creation_time = temporal_tagger[4]\n elif tt_name == 'rule_based':\n date_granularity = temporal_tagger[1].lower()\n except:\n pass\n num_of_keywords = 10\n n_contextual_window = 'none'\n N = 'max'\n TH = 0.05\n try:\n num_of_keywords = time_matters_parameters[0]\n n_contextual_window = time_matters_parameters[1]\n N = time_matters_parameters[2]\n TH = time_matters_parameters[3]\n except:\n pass\n return tt_name, language, document_type, document_creation_time, date_granularity, \\\n num_of_keywords, N, TH, n_contextual_window\n\n\ndef get_final_output(dictionary, list_dates_score, debug_mode, date_dictionary):\n final_output= {}\n if debug_mode:\n for n_lt in range(len(list_dates_score)):\n dict_date_info = (dictionary[list_dates_score[n_lt][0]][2])\n\n total_offset = []\n #print(date_dictionary[list_dates_score[n_lt][0]])\n #print(dict_date_info)\n # get all offset from dates\n for offset in dict_date_info:\n total_offset += dict_date_info[offset][1]\n #print(total_offset)\n final_output = create_final_output_debug(final_output, list_dates_score, date_dictionary, total_offset, n_lt)\n\n return final_output\n else:\n for n_lt in range(len(list_dates_score)):\n dict_date_info = (dictionary[list_dates_score[n_lt][0]][2])\n total_offset=[]\n\n # get all offset from dates\n for offset in dict_date_info:\n total_offset += dict_date_info[offset][1]\n #print(total_offset)\n final_output = create_final_output(final_output, list_dates_score, date_dictionary, total_offset, n_lt)\n\n return final_output\n\n\ndef create_final_output(final_output, list_dates_score, date_dictionary, total_offset, n_lt):\n for n_expression in range(len(total_offset)):\n try:\n if date_dictionary[list_dates_score[n_lt][0]][n_expression] not in final_output:\n final_output[date_dictionary[list_dates_score[n_lt][0]][n_expression]] = list_dates_score[n_lt][1]\n except:\n return final_output\n return final_output\n\n\ndef create_final_output_debug(final_output, list_dates_score, date_dictionary, total_offset, n_lt):\n for n_expression in range(len(total_offset)):\n # print('mm'+str(n_expression))\n # print(date_dictionary[list_dates_score[n_lt][0]][n_expression])\n try:\n new_word = date_dictionary[list_dates_score[n_lt][0]][n_expression].replace(' ', '_')\n if new_word not in final_output:\n final_output[new_word] = [list_dates_score[n_lt][1], [total_offset[n_expression]]]\n else:\n final_output[new_word][1].append(total_offset[n_expression])\n except:\n return final_output\n return final_output\n\ndef text_refactor(new_text, final_score_output, temporal_tagger):\n if temporal_tagger[0] == 'rule_based':\n return new_text\n else:\n tokenize_text = new_text.split()\n for i in final_score_output:\n offset = final_score_output[i][1]\n new_word = i.replace(' ', '_')\n for n_ofset in offset:\n tokenize_text[n_ofset] = new_word\n n_txt = \" \".join(tokenize_text)\n\n return n_txt\n\ndef get_final_output_rule_based(dictionary, list_dates_score):\n final_output= []\n for lt in list_dates_score:\n dict_date_info = (dictionary[lt[0]][2])\n total_offset=[]\n for offset in dict_date_info:\n total_offset += dict_date_info[offset][1]\n\n final_output.append((lt[0],lt[1],total_offset))\n return final_output", "sub_path": "Time_Matters_SingleDoc/Time_Matters_SingleDoc.py", "file_name": "Time_Matters_SingleDoc.py", "file_ext": "py", "file_size_in_byte": 6451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "langdetect.detect", "line_number": 8, "usage_type": "call"}, {"api_name": "Time_Matters_SingleDoc.InvertedIndex.kw_ext", "line_number": 14, "usage_type": "call"}, {"api_name": "Time_Matters_SingleDoc.GetDateScores.dt_frames", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "412154924", "text": "import wx\nfrom app import DashboardApp\n\nclass AutoChooserPanel(wx.Panel):\n def __init__(self, parent, config):\n wx.Panel.__init__(self, parent)\n self.config = config\n self.build()\n\n def build(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n self.SetSizer(sizer)\n\n dropDownPanel = self.createAutoPanel(self)\n sizer.Add(dropDownPanel, 1, border=5, flag=wx.ALL | wx.EXPAND)\n\n\n def createAutoPanel(self, parent):\n panel = wx.Panel(parent)\n sizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(sizer)\n\n auto_selection = self.config.get_auto_selection()\n auto_groups = self.config.get_auto_mode_groups()\n\n self.dropDown = wx.ComboBox(panel, choices=auto_groups, value=auto_selection['name'], style=wx.CB_READONLY)\n self.dropDown.Bind(wx.EVT_COMBOBOX, self.on_group_selected)\n sizer.Add(self.dropDown, 0, border=5, flag=wx.ALL | wx.EXPAND)\n\n auto_modes = self.config.get_auto_modes_by_group(self.dropDown.GetStringSelection())\n\n self.modesList = wx.ListBox(panel, choices=auto_modes, style=wx.LB_SINGLE)\n self.modesList.SetStringSelection(auto_selection['mode'])\n sizer.Add(self.modesList, 1, border=5, flag=wx.ALL | wx.EXPAND)\n\n self.setAutoButton = wx.Button(panel, label=\"Select Autonomous\")\n self.setAutoButton.Bind(wx.EVT_BUTTON, self.on_set_auto)\n\n sizer.Add(self.setAutoButton, 0, flag=wx.CENTER | wx.EXPAND) \n\n return panel\n\n def on_set_auto(self, event):\n self.config.set_selected_auto_group(self.dropDown.GetValue())\n self.config.set_selected_auto_mode(self.modesList.GetStringSelection())\n self.config.save()\n\n def on_group_selected(self, event):\n self.modesList.Clear()\n auto_modes = self.config.get_auto_modes_by_group(self.dropDown.GetStringSelection())\n self.modesList.Append(auto_modes)\n self.Refresh()", "sub_path": "panels/auto_chooser.py", "file_name": "auto_chooser.py", "file_ext": "py", "file_size_in_byte": 1932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "wx.Panel", "line_number": 4, "usage_type": "attribute"}, {"api_name": "wx.Panel.__init__", "line_number": 6, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 6, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 11, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 11, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.BoxSizer", "line_number": 20, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 20, "usage_type": "attribute"}, {"api_name": "wx.ComboBox", "line_number": 26, "usage_type": "call"}, {"api_name": "wx.CB_READONLY", "line_number": 26, "usage_type": "attribute"}, {"api_name": "wx.EVT_COMBOBOX", "line_number": 27, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 28, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 28, "usage_type": "attribute"}, {"api_name": "wx.ListBox", "line_number": 32, "usage_type": "call"}, {"api_name": "wx.LB_SINGLE", "line_number": 32, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 34, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 34, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 36, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 37, "usage_type": "attribute"}, {"api_name": "wx.CENTER", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 39, "usage_type": "attribute"}]} +{"seq_id": "642329872", "text": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass FeatAvgPool(nn.Module):\n def __init__(self, input_size, hidden_size, kernel_size, stride, freeze):\n super(FeatAvgPool, self).__init__()\n self.conv = nn.Conv1d(input_size, hidden_size, 1, 1)\n self.pool = nn.AvgPool1d(kernel_size, stride)\n if freeze:\n for p in self.parameters():\n p.requires_grad = False\n\n def forward(self, x):\n return self.pool(self.conv(x.transpose(1, 2)).relu())\n \n\ndef build_featpool(cfg, arch):\n input_dim = cfg.DATASETS.VISUAL_DIM\n if arch == 'TAN':\n input_size = cfg.MODEL.TAN.FEATPOOL.INPUT_SIZE\n hidden_size = cfg.MODEL.TAN.FEATPOOL.HIDDEN_SIZE\n kernel_size = cfg.MODEL.TAN.FEATPOOL.KERNEL_SIZE\n stride = cfg.INPUT.NUM_SEGMENTS // cfg.DATASETS.NUM_CLIPS\n freeze = cfg.MODEL.TAN.FEATPOOL.FREEZE\n return FeatAvgPool(input_size, hidden_size, kernel_size, stride, freeze)\n else:\n raise NotImplementedError", "sub_path": "vmr/modeling/tan/.ipynb_checkpoints/featpool-checkpoint.py", "file_name": "featpool-checkpoint.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 5, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.nn.Conv1d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool1d", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "175358390", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 6 10:01:58 2020\r\n\r\n@author: Xu\r\n\r\n3.文本关键信息提取\r\n单文本分析:\r\n基于TextRank的算法的单文本摘要提取与关键词抽取。\r\n\r\n多文本分析:\r\n基于LDA的多文档主题分布探索。\r\n\r\n\r\n\r\n负责响应数据查询请求,调用数据逻辑程序。\r\n基于数据逻辑查询结果,业务逻辑程序组装出文本关键信息并返回给前端页面。\r\n\r\n\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport re\r\nimport config\r\nfrom newspaper import Article\r\nsys.path.append(r'D:\\PythonJupyterNootebook\\My NLP projects\\My projects\\NLPVisualizationSystem\\model')\r\nfrom keywords_textrank import TextRank\r\nfrom abstract_textrank import AbstarctTextrank\r\nfrom topic_cluster_lda import lda_model\r\n\r\n\r\n# 3. 文本关键信息提取--- Part 1 单文本分析 \r\n# 数据逻辑:\r\ndef save_to_file(filepath, content):\r\n \"\"\"\r\n Write the text to the local file.\r\n\r\n Parameters\r\n ----------\r\n filepath : TYPE-str\r\n DESCRIPTION: the file save path.\r\n\r\n Returns\r\n -------\r\n content : TYPE-str\r\n DESCRIPTION: the text.\r\n\r\n \"\"\"\r\n f = open(filepath, 'w', encoding='utf-8') \r\n f.write(content)\r\n f.close()\r\n\r\n\r\ndef read_file(filepath):\r\n \"\"\"\r\n Read the local file and transform to text.\r\n\r\n Parameters\r\n ----------\r\n filepath : TYPE-str\r\n DESCRIPTION: the text file path.\r\n\r\n Returns\r\n -------\r\n content : TYPE-str\r\n DESCRIPTION:The preprocessed news text.\r\n\r\n \"\"\"\r\n f = open(filepath,'r',encoding='utf-8')\r\n content = f.read()\r\n f.close()\r\n return content \r\n\r\n\r\ndef get_webcontent(url):\r\n \"\"\"\r\n Online mode: According to the URL, grab the text content of the news.\r\n\r\n Parameters\r\n ----------\r\n url : TYPE-str\r\n DESCRIPTION: news online URL.\r\n\r\n Returns\r\n -------\r\n content : TYPE-str\r\n DESCRIPTION:The preprocessed news text.\r\n\r\n \"\"\"\r\n news = Article(url, language='zh')\r\n news.download()\r\n news.parse()\r\n content = news.text\r\n return content\r\n\r\n\r\ndef get_abstract(text): \r\n \"\"\"\r\n Use Textrank algorithm to extract text summaries/abstract.\r\n \r\n Parameters\r\n ----------\r\n text : TYPE-str\r\n DESCRIPTION: the text content to be extracted.\r\n\r\n Returns\r\n -------\r\n abstract : TYPE-str\r\n DESCRIPTION: the abstract extracted from text.\r\n \r\n \"\"\"\r\n abstracter = AbstarctTextrank()\r\n keysentences = abstracter.extract_abstract(text, 3)\r\n abstract = []\r\n for sent in keysentences:\r\n abstract.append(sent[0]) \r\n return abstract\r\n\r\ndef get_keywords(text):\r\n \"\"\"\r\n Use Textrank algorithm to extract text keywords.\r\n \r\n Parameters\r\n ----------\r\n text : TYPE-str\r\n DESCRIPTION: the text content to be extracted.\r\n\r\n Returns\r\n -------\r\n words : TYPE-str\r\n DESCRIPTION: the keywords extracted from text.\r\n \r\n \"\"\"\r\n keywords_textanker = TextRank()\r\n keywords = keywords_textanker.extract_keywords(text, 10)\r\n words = []\r\n for word in keywords:\r\n words.append(word[0])\r\n return words \r\n\r\n\r\n\r\n\r\ndef keyinfo_by_url_query():\r\n \"\"\"\r\n According to the user's input to specify the URL, the text of the URL is collected, \r\n and an abstract and keywords are automatically generated.\r\n\r\n Returns\r\n -------\r\n abstract:TYPE-strs\r\n DESCRIPTION: the abstract extracted from text.\r\n keywords:TYPE-strs\r\n DESCRIPTION: the keywords extracted from text.\r\n\r\n \"\"\"\r\n url = read_file(config.keyinfo_input_url_path)\r\n content = get_webcontent(url)\r\n abstract = get_abstract(content)\r\n keywords = get_keywords(content)\r\n abstract = '。 '.join(abstract) + '。'\r\n keywords = ', '.join(keywords)\r\n wr_to_file = '摘要:\\n' + abstract + '\\n关键词:\\n' + keywords\r\n save_to_file(config.download_keyinfo_input_url_save_path, wr_to_file)\r\n return abstract, keywords\r\n \r\n\r\ndef keyinfo_by_input_text_query():\r\n \"\"\"\r\n According to the text input by the user, an abstract and keywords \r\n are automatically generated.\r\n\r\n Returns\r\n -------\r\n abstract:TYPE-strs\r\n DESCRIPTION: the abstract extracted from text.\r\n keywords:TYPE-strs\r\n DESCRIPTION: the keywords extracted from text.\r\n\r\n \"\"\"\r\n input_text = read_file(config.keyinfo_input_text_path)\r\n abstract = get_abstract(input_text)\r\n keywords = get_keywords(input_text)\r\n abstract = '。 '.join(abstract) + '。'\r\n keywords = ', '.join(keywords)\r\n wr_to_file = '摘要:\\n' + abstract + '\\n关键词:\\n' + keywords\r\n save_to_file(config.download_keyinfo_input_text_save_path, wr_to_file)\r\n return abstract, keywords\r\n\r\n\r\ndef keyinfo_by_import_file_query():\r\n \"\"\"\r\n According to the local file imported by the user, an abstract and keywords \r\n are automatically generated.\r\n\r\n Returns\r\n -------\r\n abstract:TYPE-strs\r\n DESCRIPTION: the abstract extracted from text.\r\n keywords:TYPE-strs\r\n DESCRIPTION: the keywords extracted from text.\r\n\r\n \"\"\"\r\n path = read_file(config.keyinfo_input_file_save_path).strip()\r\n content = read_file(path)\r\n abstract = get_abstract(content)\r\n keywords = get_keywords(content)\r\n abstract = '。 '.join(abstract) + '。'\r\n keywords = ', '.join(keywords)\r\n wr_to_file = '摘要:\\n' + abstract + '\\n关键词:\\n' + keywords\r\n save_to_file(config.download_keyinfo_input_file_save_path, wr_to_file)\r\n return abstract, keywords\r\n\r\n\r\n\r\n\r\n# 业务逻辑:\r\n \r\ndef rt_keyinfo_url_base():\r\n \"\"\"\r\n It is used to return the requested real-time data.\r\n\r\n Returns\r\n -------\r\n curinput : TYPE-dictionary\r\n return the frontend requested real-time data.\r\n \r\n \"\"\"\r\n url = read_file(config.keyinfo_input_url_path)\r\n abstract, keywords = keyinfo_by_url_query()\r\n curinput = {'url': url, 'abstract': abstract, 'keywords': keywords}\r\n return curinput\r\n\r\n\r\n\r\n\r\ndef rt_keyinfo_input_text_base():\r\n \"\"\"\r\n It is used to return the requested real-time data.\r\n\r\n Returns\r\n -------\r\n curinput : TYPE-dictionary\r\n return the frontend requested real-time data.\r\n \r\n \"\"\"\r\n input_text = read_file(config.keyinfo_input_text_path)\r\n abstract, keywords = keyinfo_by_input_text_query()\r\n curinput = {'input_text':input_text, 'abstract': abstract, 'keywords': keywords }\r\n return curinput\r\n\r\ndef download_rt_keyinfo_import_file_base():\r\n path = read_file(config.keyinfo_input_file_save_path).strip()\r\n file_dir, filename = os.path.split(path)\r\n return file_dir, filename\r\n\r\n\r\ndef rt_keyinfo_import_file_base():\r\n \"\"\"\r\n It is used to return the requested real-time data.\r\n\r\n Returns\r\n -------\r\n curinput : TYPE-dictionary\r\n return the frontend requested real-time data.\r\n \r\n \"\"\"\r\n path = read_file(config.keyinfo_input_file_save_path).strip()\r\n filename = os.path.split(path)[-1]\r\n abstract, keywords = keyinfo_by_import_file_query()\r\n curinput = {'filename':filename, 'abstract': abstract, 'keywords': keywords}\r\n return curinput\r\n\r\n\r\n\r\n# 3. 文本关键信息提取--- Part 2 多文本分析 \r\n# 数据逻辑:\r\ndef lda_topics_query():\r\n \"\"\"\r\n It is used to get the optimal number of topics and save the topic keywords and \r\n topic distribution of documents to a file based on the file imported by the user.\r\n \r\n Returns\r\n -------\r\n num_topics: type-integer\r\n return the number of topics.\r\n \r\n \r\n \"\"\"\r\n # data prepare\r\n filepath = read_file(config.topic_input_file_save_path).strip()\r\n f = open(filepath, 'r', encoding='utf-8')\r\n content = f.readlines()\r\n f.close()\r\n data = [text for text in content if len(re.sub(r'\\s','',text))>5]\r\n # get optimal number of topics \r\n # write topic keywords to file \r\n num_topics, output_topic_keywords, output_topic_dist = lda_model(data, config.download_topic_input_file_save_path) \r\n return num_topics, output_topic_keywords, output_topic_dist\r\n\r\n\r\n\r\n# 业务逻辑:\r\ndef rt_topic_import_file_base():\r\n \"\"\"\r\n It is used to return the requested real-time data.\r\n\r\n Returns\r\n -------\r\n curinput : TYPE-dictionary\r\n return the frontend requested real-time data.\r\n \r\n \"\"\"\r\n path = read_file(config.topic_input_file_save_path).strip()\r\n filename = os.path.split(path)[-1]\r\n num_topics, topic_keywords, topic_dist = lda_topics_query()\r\n curinput = {'filename':filename, 'num_topics':num_topics, 'topic_keywords': topic_keywords, 'topic_dist': topic_dist}\r\n return curinput\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "apps/exe_03.py", "file_name": "exe_03.py", "file_ext": "py", "file_size_in_byte": 8801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "newspaper.Article", "line_number": 91, "usage_type": "call"}, {"api_name": "abstract_textrank.AbstarctTextrank", "line_number": 113, "usage_type": "call"}, {"api_name": "keywords_textrank.TextRank", "line_number": 135, "usage_type": "call"}, {"api_name": "config.keyinfo_input_url_path", "line_number": 158, "usage_type": "attribute"}, {"api_name": "config.download_keyinfo_input_url_save_path", "line_number": 165, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_text_path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "config.download_keyinfo_input_text_save_path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_file_save_path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "config.download_keyinfo_input_file_save_path", "line_number": 212, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_url_path", "line_number": 230, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_text_path", "line_number": 248, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_file_save_path", "line_number": 254, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "config.keyinfo_input_file_save_path", "line_number": 269, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}, {"api_name": "config.topic_input_file_save_path", "line_number": 292, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 296, "usage_type": "call"}, {"api_name": "topic_cluster_lda.lda_model", "line_number": 299, "usage_type": "call"}, {"api_name": "config.download_topic_input_file_save_path", "line_number": 299, "usage_type": "attribute"}, {"api_name": "config.topic_input_file_save_path", "line_number": 315, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 316, "usage_type": "call"}, {"api_name": "os.path", "line_number": 316, "usage_type": "attribute"}]} +{"seq_id": "633230198", "text": "\"\"\"\nAuthor: Wan Zhou\nClass: Information Retrieval\nTerm: Spring 2016\nProject: Who2Vote4\nThis script combines the content of all json file into one txt file called 'dictionary.txt'\n\"\"\"\n\nimport json\nimport codecs\n\nwith codecs.open('tweets.json', encoding='utf-8') as file1:\n tweet_data = json.load(file1)\n\nwith codecs.open('fb_posts.json', encoding='utf-8') as file2:\n post_data = json.load(file2)\n\nwith codecs.open('official_websites.json', encoding='utf-8') as file3:\n website_data = json.load(file3)\n\nwith codecs.open('wiki.json', encoding='utf-8') as file4:\n wiki_data = json.load(file4)\n\ntweets = \"\"\nfor key in tweet_data:\n for i in tweet_data[key]:\n tweets += i['text'] + \" \"\n\nposts = \"\"\nfor i in range(len(post_data)):\n posts += post_data[i]['message'] + \" \"\n\nwebsites = \"\"\nfor i in range(len(website_data)):\n websites = website_data[i]['text'] + \" \"\n\nwikis = \"\"\nfor key in wiki_data:\n wikis += wiki_data[key]['abstract'] + \" \"\n\ndictionary = \"\"\ndictionary = tweets + posts + websites + wikis\nwith codecs.open('dictionary.txt', 'w', encoding='utf-8') as txt:\n txt.write(dictionary)\n\n\n\n\n", "sub_path": "scripts/spelling/combine_corpus.py", "file_name": "combine_corpus.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "codecs.open", "line_number": 12, "usage_type": "call"}, {"api_name": "json.load", "line_number": 13, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 15, "usage_type": "call"}, {"api_name": "json.load", "line_number": 16, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 18, "usage_type": "call"}, {"api_name": "json.load", "line_number": 19, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 21, "usage_type": "call"}, {"api_name": "json.load", "line_number": 22, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "233818345", "text": "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch as th\nfrom torch.distributions import kl_divergence\nimport torch.distributions as D\nfrom modules.agents import snail_blocks as snail\nimport math\n\nclass LatentOracleRNNAgent(nn.Module):\n def __init__(self, input_shape, args):\n super(LatentOracleRNNAgent, self).__init__()\n self.args = args\n self.input_shape = input_shape\n self.n_agents = args.n_agents\n self.n_actions = args.n_actions\n self.latent_dim = args.latent_dim\n self.hidden_dim = args.rnn_hidden_dim\n self.bs = 0\n\n self.embed_fc_input_size = args.own_feature_size\n self.latent = th.rand(args.n_agents, args.latent_dim * 2) # (n,mu+var)\n\n self.fc1 = nn.Linear(input_shape, args.rnn_hidden_dim)\n self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)\n\n self.fc2_w_nn = nn.Linear(self.embed_fc_input_size, args.rnn_hidden_dim * args.n_actions,bias=False)\n self.fc2_b_nn = nn.Linear(self.embed_fc_input_size, args.n_actions,bias=False)\n\n def init_latent(self, bs):\n self.bs = bs\n loss = 0\n # end\n\n return loss, self.latent[:self.n_agents,:].detach()\n\n def forward(self, inputs, hidden_state, t=0, batch=None):\n inputs = inputs.reshape(-1, self.input_shape)\n h_in = hidden_state.reshape(-1, self.hidden_dim)\n\n embed_fc_input = inputs[:, - self.embed_fc_input_size:] #own features(unit_type_bits+shield_bits_ally)+id\n\n fc2_w = self.fc2_w_nn(embed_fc_input)\n fc2_b = self.fc2_b_nn(embed_fc_input)\n fc2_w = fc2_w.reshape(-1, self.args.rnn_hidden_dim, self.args.n_actions)\n fc2_b = fc2_b.reshape((-1, 1, self.args.n_actions))\n\n x = F.relu(self.fc1(inputs)) # (bs*n,(obs+act+id)) at time t\n h = self.rnn(x, h_in)\n h = h.reshape(-1, 1, self.args.rnn_hidden_dim)\n q = th.bmm(h, fc2_w) + fc2_b\n\n return q.view(-1, self.args.n_actions), h.view(-1, self.args.rnn_hidden_dim), 0\n # (bs*n,n_actions), (bs*n,hidden_dim), (bs*n,latent_dim)\n", "sub_path": "src/modules/agents/latent_oracle_rnn_agent.py", "file_name": "latent_oracle_rnn_agent.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 9, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "name"}, {"api_name": "torch.rand", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.GRUCell", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 47, "usage_type": "name"}, {"api_name": "torch.bmm", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "500413460", "text": "from scipy.cluster import vq\nfrom sklearn.cluster import MiniBatchKMeans\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom MCQ.datasets import SiftLike\nfrom MCQ.metrics import Eval, QuantizationError\n\n\ndef OPQTrain(trainSet, M, K):\n x = trainSet\n\n d = x.shape[-1] // M\n\n R = np.random.randn(x.shape[-1], x.shape[-1])\n\n C = np.random.randn(M, K, d).astype(np.float)\n\n for i in range(M):\n xs = np.split(x, M, -1)\n C[i] = (C[i] * xs[i].std(0)) + xs[i].mean(0)\n for n in range(64):\n y = np.zeros_like(x)\n xProj = x @ R\n xs = np.split(xProj, M, -1)\n\n\n for i in range(M):\n print(\"subspace #: %d\" % i)\n xSub = xs[i]\n kmeans = MiniBatchKMeans(n_clusters=256, max_iter=64, init=C[i], n_init=1).fit(xSub)\n\n # [k, d]\n centers = kmeans.cluster_centers_\n\n C[i] = centers\n\n # [N, k] = [N, d] [k, d]\n dist = ((xSub[:, None, :] - C[i]) ** 2).sum(-1)\n\n # [N, ]\n idx = dist.argmin(axis=1).reshape(-1)\n\n ySub = centers[idx]\n y[:, i*d:(i+1)*d] = ySub\n\n print(\" opq-np: iter: %d\" % n)\n R_opq_np = R\n\n U, _, Vh = np.linalg.svd(x.T @ y)\n R = U.dot(Vh)\n\n\n multiC = np.zeros((M, K, x.shape[-1]))\n for i in range(M):\n multiC[i, :, i*d:(i+1)*d] = C[i]\n return multiC, R_opq_np\n\ndef OPQEncode(C, R, dataLoader):\n M, K, D = C.shape\n print(C.shape)\n d = C.shape[-1] // M\n B = list()\n for x in dataLoader:\n x = x.cuda()\n x = x @ R\n xs = torch.split(x, d, -1)\n b = list()\n for i in range(M):\n xSub = xs[i]\n dist = ((xSub[:, None, :] - C[i, :, i*d:(i+1)*d]) ** 2).sum(-1)\n idx = dist.argmin(axis=-1)\n b.append(idx)\n b = torch.stack(b, -1)\n B.append(b)\n B = torch.cat(B, 0)\n return B\n\n\nif __name__ == \"__main__\":\n with torch.no_grad():\n sift = SiftLike(\"labelme\")\n sift.Train(device=\"cpu\")\n C, R = OPQTrain(sift.data.numpy(), 2, 256)\n C = torch.from_numpy(C).cuda()\n R = torch.from_numpy(R).cuda()\n sift.Encode(device=\"cuda\")\n dataLoader = DataLoader(sift, batch_size=10000, shuffle=False, num_workers=0)\n B = OPQEncode(C, R, dataLoader)\n print(QuantizationError(sift.data @ R, C, B).mean())\n\n sift.Query(device=\"cuda\")\n sift.data = sift.data @ R.cuda()\n # dataLoader = DataLoader(sift, batch_size=1, shuffle=False, num_workers=4, pin_memory=True)\n\n results = Eval.Retrieval(sift.data, C.cuda(), B.cuda())\n sift.Gt()\n recalls = Eval.Recall(results, sift.data[:, :1].cuda()) * 100\n print(\"R @ 1: %.2f%%\" % recalls[0])\n print(\"R @ 10: %.2f%%\" % recalls[9])\n print(\"R @ 100: %.2f%%\" % recalls[99])\n", "sub_path": "src/misc/benchmarks/OPQ.py", "file_name": "OPQ.py", "file_ext": "py", "file_size_in_byte": 2873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.random.randn", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.split", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.cluster.MiniBatchKMeans", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg.svd", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.split", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 75, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 82, "usage_type": "call"}, {"api_name": "MCQ.datasets.SiftLike", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 89, "usage_type": "call"}, {"api_name": "MCQ.metrics.QuantizationError", "line_number": 91, "usage_type": "call"}, {"api_name": "MCQ.metrics.Eval.Retrieval", "line_number": 97, "usage_type": "call"}, {"api_name": "MCQ.metrics.Eval", "line_number": 97, "usage_type": "name"}, {"api_name": "MCQ.metrics.Eval.Recall", "line_number": 99, "usage_type": "call"}, {"api_name": "MCQ.metrics.Eval", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "49727923", "text": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nhttp://www.scipy.org/Cookbook/Least_Squares_Circle\n\"\"\"\n\nfrom numpy import *\n\n# Coordinates of the 2D points\n\n# x = r_[ 9, 35, -13, 10, 23, 0]\n# y = r_[ 34, 10, 6, -14, 27, -10]\n\n# x = r_[36, 36, 19, 18, 33, 26]\n# y = r_[14, 10, 28, 31, 18, 26]\nx = r_[68.2533, 74.9322, 82.4288, 104.337, 111.892, 127.953, 142.97, 156.818, 175.432, 184.949]\ny = r_[98.9417, 132.844, 67.0712, 148.646, 50.9481, 159.973, 45.1403, 166.788, 51.6778, 152.553]\n\n# R0 = 25\n# nb_pts = 8\n# dR = 2\n# angle =9*pi/5\n# x = (10 + R0*cos(theta0) + dR*random.normal(size=nb_pts)).round()\n# y = (10 + R0*sin(theta0) + dR*random.normal(size=nb_pts)).round()\n\n\n# == METHOD 1 ==\nmethod_1 = 'algebraic'\n\n# coordinates of the barycenter\nx_m = mean(x)\ny_m = mean(y)\n\n# calculation of the reduced coordinates\nu = x - x_m\nv = y - y_m\n\n# linear system defining the center in reduced coordinates (uc, vc):\n# Suu * uc + Suv * vc = (Suuu + Suvv)/2\n# Suv * uc + Svv * vc = (Suuv + Svvv)/2\n\nSuv = sum(u * v)\nSuu = sum(u ** 2)\nSvv = sum(v ** 2)\nSuuv = sum(u ** 2 * v)\nSuvv = sum(u * v ** 2)\nSuuu = sum(u ** 3)\nSvvv = sum(v ** 3)\n\n# Solving the linear system\nA = array([[Suu, Suv], [Suv, Svv]])\nB = array([Suuu + Suvv, Svvv + Suuv]) / 2.0\nuc, vc = linalg.solve(A, B)\n\nxc_1 = x_m + uc\nyc_1 = y_m + vc\n\n# Calculation of all distances from the center (xc_1, yc_1)\nRi_1 = sqrt((x - xc_1) ** 2 + (y - yc_1) ** 2)\nR_1 = mean(Ri_1)\nresidu_1 = sum((Ri_1 - R_1) ** 2)\nresidu2_1 = sum((Ri_1 ** 2 - R_1 ** 2) ** 2)\n\n# Decorator to count functions calls\nimport functools\n\n\ndef countcalls(fn):\n \"decorator function count function calls \"\n\n @functools.wraps(fn)\n def wrapped(*args):\n wrapped.ncalls += 1\n return fn(*args)\n\n wrapped.ncalls = 0\n return wrapped\n\n\n# == METHOD 2 ==\nfrom scipy import optimize\n\nmethod_2 = \"leastsq\"\n\n\ndef calc_R(c):\n \"\"\" calculate the distance of each 2D points from the center c=(xc, yc) \"\"\"\n return sqrt((x - c[0]) ** 2 + (y - c[1]) ** 2)\n\n\n@countcalls\ndef f_2(c):\n \"\"\" calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) \"\"\"\n Ri = calc_R(c)\n return Ri - Ri.mean()\n\n\ncenter_estimate = x_m, y_m\ncenter_2, ier = optimize.leastsq(f_2, center_estimate)\n\nxc_2, yc_2 = center_2\nRi_2 = calc_R(center_2)\nR_2 = Ri_2.mean()\nresidu_2 = sum((Ri_2 - R_2) ** 2)\nresidu2_2 = sum((Ri_2 ** 2 - R_2 ** 2) ** 2)\nncalls_2 = f_2.ncalls\n\n# == METHOD 3 ==\nfrom scipy import odr\n\nmethod_3 = \"odr\"\n\n\n@countcalls\ndef f_3(beta, x):\n \"\"\" implicit function of the circle \"\"\"\n xc, yc, r = beta\n return (x[0] - xc) ** 2 + (x[1] - yc) ** 2 - r ** 2\n\n\ndef calc_estimate(data):\n \"\"\" Return a first estimation on the parameter from the data \"\"\"\n xc0, yc0 = data.x.mean(axis=1)\n r0 = sqrt((data.x[0] - xc0) ** 2 + (data.x[1] - yc0) ** 2).mean()\n return xc0, yc0, r0\n\n\n# for implicit function :\n# data.x contains both coordinates of the points\n# data.y is the dimensionality of the response\nlsc_data = odr.Data(row_stack([x, y]), y=1)\nlsc_model = odr.Model(f_3, implicit=True, estimate=calc_estimate)\nlsc_odr = odr.ODR(lsc_data, lsc_model)\nlsc_out = lsc_odr.run()\n\nxc_3, yc_3, R_3 = lsc_out.beta\nRi_3 = calc_R([xc_3, yc_3])\nresidu_3 = sum((Ri_3 - R_3) ** 2)\nresidu2_3 = sum((Ri_3 ** 2 - R_3 ** 2) ** 2)\nncalls_3 = f_3.ncalls\n\nprint('lsc_out.sum_square = ', lsc_out.sum_square)\n\n# == METHOD 4 ==\n\nmethod_4 = \"odr with jacobian\"\n\n\n@countcalls\ndef f_4(beta, x):\n \"\"\" implicit function of the circle \"\"\"\n xc, yc, r = beta\n xi, yi = x\n\n return (xi - xc) ** 2 + (yi - yc) ** 2 - r ** 2\n\n\n@countcalls\ndef jacb(beta, x):\n \"\"\" Jacobian function with respect to the parameters beta.\n return df/dbeta\n \"\"\"\n xc, yc, r = beta\n xi, yi = x\n\n df_db = empty((beta.size, x.shape[1]))\n df_db[0] = 2 * (xc - xi) # d_f/dxc\n df_db[1] = 2 * (yc - yi) # d_f/dyc\n df_db[2] = -2 * r # d_f/dr\n\n return df_db\n\n\n@countcalls\ndef jacd(beta, x):\n \"\"\" Jacobian function with respect to the input x.\n return df/dx\n \"\"\"\n xc, yc, r = beta\n xi, yi = x\n\n df_dx = empty_like(x)\n df_dx[0] = 2 * (xi - xc) # d_f/dxi\n df_dx[1] = 2 * (yi - yc) # d_f/dyi\n\n return df_dx\n\n\ndef calc_estimate(data):\n \"\"\" Return a first estimation on the parameter from the data \"\"\"\n xc0, yc0 = data.x.mean(axis=1)\n r0 = sqrt((data.x[0] - xc0) ** 2 + (data.x[1] - yc0) ** 2).mean()\n return xc0, yc0, r0\n\n\n# for implicit function :\n# data.x contains both coordinates of the points\n# data.y is the dimensionality of the response\nlsc_data = odr.Data(row_stack([x, y]), y=1)\nlsc_model = odr.Model(f_4, implicit=True, estimate=calc_estimate, fjacd=jacd, fjacb=jacb)\nlsc_odr = odr.ODR(lsc_data, lsc_model)\nlsc_odr.set_job(deriv=3) # use user derivatives function without checking\nlsc_out = lsc_odr.run()\n\nxc_4, yc_4, R_4 = lsc_out.beta\nRi_4 = calc_R([xc_4, yc_4])\nresidu_4 = sum((Ri_4 - R_4) ** 2)\nresidu2_4 = sum((Ri_4 ** 2 - R_4 ** 2) ** 2)\nncalls_4 = f_4.ncalls\n\nprint(\"Method 4 :\")\nprint(\"Functions calls : f_4=%d jacb=%d jacd=%d\" % (f_4.ncalls, jacb.ncalls, jacd.ncalls))\n\n# Summary\nfmt = '%-18s %10.5f %10.5f %10.5f %10d %10.6f %10.6f %10.2f'\n# print('\\n%-18s' + ' %10s' * 7) % tuple('METHOD Xc Yc Rc nb_calls std(Ri) residu residu2'.split())\nprint('-' * (18 + 7 * (10 + 1)))\nprint(fmt % (method_1, xc_1, yc_1, R_1, 1, Ri_1.std(), residu_1, residu2_1))\nprint(fmt % (method_2, xc_2, yc_2, R_2, ncalls_2, Ri_2.std(), residu_2, residu2_2))\nprint(fmt % (method_3, xc_3, yc_3, R_3, ncalls_3, Ri_3.std(), residu_3, residu2_3))\nprint(fmt % (method_4, xc_4, yc_4, R_4, ncalls_4, Ri_4.std(), residu_4, residu2_4))\n\n# plotting functions\n\nfrom matplotlib import pyplot as p, cm\n\n\ndef plot_all(residu2=False, basename='circle'):\n \"\"\" Draw data points, best fit circles and center for the three methods,\n and adds the iso contours corresponding to the fiel residu or residu2\n \"\"\"\n\n f = p.figure(figsize=(6.5, 4.5), dpi=90, facecolor='white')\n p.axis('equal')\n\n p.plot(x, y, 'ro', label='data', ms=9, mec='b', mew=1)\n\n theta_fit = linspace(-pi, pi, 180)\n\n x_fit1 = xc_1 + R_1 * cos(theta_fit)\n y_fit1 = yc_1 + R_1 * sin(theta_fit)\n p.plot(x_fit1, y_fit1, 'b-', label=method_1, lw=2)\n\n x_fit2 = xc_2 + R_2 * cos(theta_fit)\n y_fit2 = yc_2 + R_2 * sin(theta_fit)\n p.plot(x_fit2, y_fit2, 'k--', label=method_2, lw=2)\n\n x_fit3 = xc_3 + R_3 * cos(theta_fit)\n y_fit3 = yc_3 + R_3 * sin(theta_fit)\n p.plot(x_fit3, y_fit3, 'r-.', label=method_3, lw=2)\n\n p.plot([xc_1], [yc_1], 'bD', mec='y', mew=1)\n p.plot([xc_2], [yc_2], 'gD', mec='r', mew=1)\n p.plot([xc_3], [yc_3], 'kD', mec='w', mew=1)\n\n # draw\n p.xlabel('x')\n p.ylabel('y')\n p.legend(loc='best', labelspacing=0.1)\n\n # plot the residu fields\n nb_pts = 100\n\n p.draw()\n xmin, xmax = p.xlim()\n ymin, ymax = p.ylim()\n\n vmin = min(xmin, ymin)\n vmax = max(xmax, ymax)\n\n xg, yg = ogrid[vmin:vmax:nb_pts * 1j, vmin:vmax:nb_pts * 1j]\n xg = xg[..., newaxis]\n yg = yg[..., newaxis]\n\n Rig = sqrt((xg - x) ** 2 + (yg - y) ** 2)\n Rig_m = Rig.mean(axis=2)[..., newaxis]\n\n if residu2:\n residu = sum((Rig ** 2 - Rig_m ** 2) ** 2, axis=2)\n else:\n residu = sum((Rig - Rig_m) ** 2, axis=2)\n\n lvl = exp(linspace(log(residu.min()), log(residu.max()), 15))\n\n p.contourf(xg.flat, yg.flat, residu.T, lvl, alpha=0.75, cmap=cm.Purples_r)\n cbar = p.colorbar(format='%.f')\n\n if residu2:\n cbar.set_label('Residu_2')\n else:\n cbar.set_label('Residu')\n\n p.xlim(xmin=vmin, xmax=vmax)\n p.ylim(ymin=vmin, ymax=vmax)\n\n p.grid()\n p.title('Leasts Squares Circle')\n p.savefig('%s_residu%d.png' % (basename, 2 if residu2 else 1))\n\n\nplot_all(residu2=False, basename='circle')\nplot_all(residu2=True, basename='circle')\n\np.show()\n# vim: set et sts=4 sw=4:\n", "sub_path": "fit_circle2.py", "file_name": "fit_circle2.py", "file_ext": "py", "file_size_in_byte": 7870, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "functools.wraps", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.optimize.leastsq", "line_number": 100, "usage_type": "call"}, {"api_name": "scipy.optimize", "line_number": 100, "usage_type": "name"}, {"api_name": "scipy.odr.Data", "line_number": 132, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 132, "usage_type": "name"}, {"api_name": "scipy.odr.Model", "line_number": 133, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 133, "usage_type": "name"}, {"api_name": "scipy.odr.ODR", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 134, "usage_type": "name"}, {"api_name": "scipy.odr.Data", "line_number": 200, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 200, "usage_type": "name"}, {"api_name": "scipy.odr.Model", "line_number": 201, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 201, "usage_type": "name"}, {"api_name": "scipy.odr.ODR", "line_number": 202, "usage_type": "call"}, {"api_name": "scipy.odr", "line_number": 202, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 234, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 234, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 235, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 237, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 237, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 243, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 251, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 251, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 253, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 253, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 254, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 254, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 255, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 255, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 258, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 258, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 259, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 259, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 260, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 260, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 265, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 265, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 266, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 267, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 267, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contourf", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.cm.Purples_r", "line_number": 286, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 294, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 294, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 295, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 295, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 297, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 297, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 298, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 298, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 299, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 299, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 305, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 305, "usage_type": "name"}]} +{"seq_id": "338382488", "text": "import pandas as pd\nimport numpy as np\nimport gc\nfrom sklearn.metrics import r2_score\n\ndef get_trend_data(df_t):\n \"\"\" Returns the trend data for a given time series data.\n 1. Avg value\n 2. Std. Dev\n 3. Avg Growth Rate\n 4. Polynomial coefficients \"\"\"\n\n # df_t : df_time_series\n\n if df_t is None:\n return \"Not enough last n years data\"\n\n df = pd.DataFrame()\n\n # Recent value\n df['most_recent_value'] = df_t[[df_t.columns.tolist()[-1]]]\n # Mean and std_dev\n df['mean'] = df_t.mean(axis=1)\n df['std'] = df_t.std(axis=1)\n\n # Avg growth rate\n cols = df_t.columns.tolist()\n # t+1 step\n df_t_copy = df_t.copy()\n df_t_1 = df_t.copy()\n df_t_1 = df_t_1.drop([cols[0]],axis=1).as_matrix()\n df_t_copy = df_t_copy.drop([cols[-1]],axis=1).as_matrix()\n\n df_growth = df_t_1 - df_t_copy\n\n df_growth_rate = np.divide(df_growth,df_t_copy)\n\n df['avg_growth_rate'] = np.mean(df_growth_rate, axis=1)\n\n # Linear regression line\n years = df_t.columns.tolist()\n #x = np.asarray([i-years[0] for i in years])\n x = np.asarray(years)\n df_t_T = df_t.transpose()\n y = df_t_T.as_matrix()\n\n # fit polynomial\n slope = {}\n constant = {}\n r_squared = {}\n for i,feature in enumerate(df_t_T.columns.tolist()):\n y = df_t_T[feature].values\n coeff = np.polyfit(x,y,1)\n p = np.poly1d(coeff)\n y_pred = p(x)\n slope[feature] = coeff[0]\n constant[feature] = coeff[1]\n try:\n r_squared[feature] = r2_score(y,y_pred)\n except ValueError:\n r_squared[feature] = float('nan')\n\n df['slope'] = pd.Series(slope)\n df['constant'] = pd.Series(constant)\n df['r2'] = pd.Series(r_squared)\n\n\n del df_t_1\n del df_t_copy\n del df_t\n gc.collect()\n\n return df\n", "sub_path": "data/scripts/simplified_finance_stats/capture_trends.py", "file_name": "capture_trends.py", "file_ext": "py", "file_size_in_byte": 1820, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.DataFrame", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.divide", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.poly1d", "line_number": 54, "usage_type": "call"}, {"api_name": "sklearn.metrics.r2_score", "line_number": 59, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 63, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 65, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "435105235", "text": "import typing\nfrom typing import Any, Callable, List, Tuple, Union\n\nimport IPython.display as display\nimport cv2\nimport numpy as np\nimport os, sys\nfrom PIL import Image\n\nfrom .abc_interpreter import Interpreter\nfrom ..data_processor.readers import preprocess_image, read_image\nfrom ..data_processor.visualizer import visualize_heatmap\n\n\nclass ScoreCAMInterpreter(Interpreter):\n \"\"\"\n Score CAM Interpreter.\n\n More details regarding the Score CAM method can be found in the original paper:\n https://arxiv.org/abs/1910.01279\n \"\"\"\n\n def __init__(self,\n paddle_model,\n trained_model_path,\n use_cuda=True,\n model_input_shape=[3, 224, 224]) -> None:\n \"\"\"\n Initialize the GradCAMInterpreter.\n\n Args:\n paddle_model (callable): A user-defined function that gives access to model predictions.\n It takes the following arguments:\n\n - data: Data inputs.\n and outputs predictions. See the example at the end of ``interpret()``.\n trained_model_path (str): The pretrained model directory.\n use_cuda (bool, optional): Whether or not to use cuda. Default: True\n model_input_shape (list, optional): The input shape of the model. Default: [3, 224, 224]\n \"\"\"\n Interpreter.__init__(self)\n self.paddle_model = paddle_model\n self.trained_model_path = trained_model_path\n self.use_cuda = use_cuda\n self.model_input_shape = model_input_shape\n self.paddle_prepared = False\n\n def interpret(self,\n data,\n target_layer_name,\n label=None,\n visual=True,\n save_path=None):\n \"\"\"\n Main function of the interpreter.\n\n Args:\n data (str or numpy.ndarray): The input image filepath or numpy array.\n target_layer_name (str): The target layer to calculate gradients.\n label (int, optional): The target label to analyze. If None, the most likely label will be used. Default: None\n visual (bool, optional): Whether or not to visualize the processed image. Default: True\n save_path (str, optional): The filepath to save the processed image. If None, the image will not be saved. Default: None\n\n :return: interpretations\n :rtype: numpy.ndarray\n\n Example::\n\n import interpretdl as it\n def paddle_model(image_input):\n import paddle.fluid as fluid\n class_num = 1000\n model = ResNet50()\n logits = model.net(input=image_input, class_dim=class_num)\n probs = fluid.layers.softmax(logits, axis=-1)\n return probs\n\n scorecam = it.ScoreCAMInterpreter(paddle_model,\n \"assets/ResNet50_pretrained\", True)\n scorecam.interpret(\n 'assets/catdog.png',\n 'res5c.add.output.5.tmp_0',\n label=None,\n visual=True,\n save_path='assets/scorecam_test.jpg')\n \"\"\"\n\n if isinstance(data, str):\n with open(data, 'rb') as f:\n org = Image.open(f)\n org = org.convert('RGB')\n org = np.array(org)\n img = read_image(data, crop_size=self.model_input_shape[1])\n data = preprocess_image(img)\n else:\n org = data.copy\n\n b, c, h, w = data.shape\n\n self.target_layer_name = target_layer_name\n self.label = label\n\n if not self.paddle_prepared:\n self._paddle_prepare()\n\n if self.label is None:\n _, probs = self.predict_fn(data)\n self.label = np.argmax(probs, axis=1)\n\n feature_map, _ = self.predict_fn(data)\n attributions = np.zeros((1, 1, h, w))\n\n for i in range(feature_map.shape[1]):\n feature_channel = np.expand_dims(feature_map[:, i, :, :], 1)[0][0]\n feature_channel = cv2.resize(feature_channel, (h, w))\n norm_feature_channel = (\n feature_channel - feature_channel.min()) / (\n feature_channel.max() - feature_channel.min())\n _, probs = self.predict_fn(data * norm_feature_channel)\n score = probs[0][self.label]\n attributions += score * feature_channel\n\n attributions = np.maximum(attributions, 0)\n attributions_min, attributions_max = attributions.min(\n ), attributions.max()\n\n if attributions_min == attributions_max:\n return None\n\n interpretations = (attributions - attributions_min) / (\n attributions_max - attributions_min)\n\n visualize_heatmap(interpretations[0][0], org, visual, save_path)\n\n return interpretations\n\n def _paddle_prepare(self, predict_fn=None):\n if predict_fn is None:\n import paddle.fluid as fluid\n startup_prog = fluid.Program()\n main_program = fluid.Program()\n with fluid.program_guard(main_program, startup_prog):\n with fluid.unique_name.guard():\n data_op = fluid.data(\n name='data',\n shape=[1] + self.model_input_shape,\n dtype='float32')\n probs = self.paddle_model(data_op)\n if isinstance(probs, tuple):\n probs = probs[0]\n trainable_vars = list(main_program.list_vars())\n for v in trainable_vars:\n if v.name == self.target_layer_name:\n conv = v\n\n main_program = main_program.clone(for_test=True)\n\n if self.use_cuda:\n gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))\n place = fluid.CUDAPlace(gpu_id)\n else:\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n fluid.io.load_persistables(exe, self.trained_model_path,\n main_program)\n\n def predict_fn(data):\n feature_map, probs_out = exe.run(main_program,\n feed={'data': data},\n fetch_list=[conv, probs])\n return feature_map, probs_out\n\n self.predict_fn = predict_fn\n self.paddle_prepared = True\n", "sub_path": "interpretdl/interpreter/score_cam.py", "file_name": "score_cam.py", "file_ext": "py", "file_size_in_byte": 6550, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "abc_interpreter.Interpreter", "line_number": 15, "usage_type": "name"}, {"api_name": "abc_interpreter.Interpreter.__init__", "line_number": 41, "usage_type": "call"}, {"api_name": "abc_interpreter.Interpreter", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 90, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 90, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "data_processor.readers.read_image", "line_number": 93, "usage_type": "call"}, {"api_name": "data_processor.readers.preprocess_image", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 123, "usage_type": "call"}, {"api_name": "data_processor.visualizer.visualize_heatmap", "line_number": 133, "usage_type": "call"}, {"api_name": "paddle.fluid.Program", "line_number": 140, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 140, "usage_type": "name"}, {"api_name": "paddle.fluid.Program", "line_number": 141, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 141, "usage_type": "name"}, {"api_name": "paddle.fluid.program_guard", "line_number": 142, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 142, "usage_type": "name"}, {"api_name": "paddle.fluid.unique_name.guard", "line_number": 143, "usage_type": "call"}, {"api_name": "paddle.fluid.unique_name", "line_number": 143, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 143, "usage_type": "name"}, {"api_name": "paddle.fluid.data", "line_number": 144, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 144, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 159, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 159, "usage_type": "attribute"}, {"api_name": "paddle.fluid.CUDAPlace", "line_number": 160, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 160, "usage_type": "name"}, {"api_name": "paddle.fluid.CPUPlace", "line_number": 162, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 162, "usage_type": "name"}, {"api_name": "paddle.fluid.Executor", "line_number": 163, "usage_type": "call"}, {"api_name": "paddle.fluid", "line_number": 163, "usage_type": "name"}, {"api_name": "paddle.fluid.io.load_persistables", "line_number": 164, "usage_type": "call"}, {"api_name": "paddle.fluid.io", "line_number": 164, "usage_type": "attribute"}, {"api_name": "paddle.fluid", "line_number": 164, "usage_type": "name"}]} +{"seq_id": "148598407", "text": "import requests\nimport time\nimport sys\n\n\ndef convertHexData(row_birthmark):\n hex_row = \"\"\n for j in row_birthmark:\n for i in j.split(' '):\n try:\n hex_row += hex(int(i)).replace('0x', '')\n except:\n return row_birthmark\n hex_row += ','\n return hex_row\n\n\nimport os\nos.makedirs('search_result', exist_ok=True)\n\n# python3 postFile length birthmark\npostFile = sys.argv[1]\nlength = sys.argv[2]\nbirthmark = sys.argv[3]\nwith open(postFile, 'r') as f:\n import csv\n for row in csv.reader(f):\n if len(row[3:]) <= int(length):\n continue\n if birthmark == 'uc':\n postData = ','.join(row[3:])\n else:\n postData = convertHexData(row[3:])\n\n if birthmark == 'uc':\n payload = {'query': 'data: ' + postData}\n else:\n payload = {'query': 'encode_data: ' + postData}\n\n start = time.time()\n sumQtime = 0\n\n r = requests.post(\n 'http://localhost:8983/solr/' + birthmark +\n '/query?fl=output,score,place,barthmark,data&rows=1000000&sort=score%20desc&wt=json',\n json=payload)\n # print(r.json())\n maxScore = float(r.json()['response']['maxScore'])\n starts = 1000\n with open('search_result/'+row[0]+birthmark, 'a') as write_file:\n write_file.write(','.join(row) + '\\n')\n try:\n if len(list(r.json()['response']['docs'])) == 0:\n sys.exit(1)\n except:\n sys.exit(1)\n for result in r.json()['response']['docs']:\n write_file.write('{0},{1},{2},{3}\\n'.format(\n result['output'], float(result['score'])/maxScore, result['barthmark'], result['data'].replace('quot;', '')))\n\n # qtime\n sumQtime += r.json()['responseHeader']['QTime']\n elapsed_time = time.time() - start\n\n elapsed_time = time.time() - start\n print(\"elapsed_time:{0}\".format(elapsed_time) + \"[sec]\")\n", "sub_path": "docs/docs_for_research/procudure-of-experimental/FP/row_search.py", "file_name": "row_search.py", "file_ext": "py", "file_size_in_byte": 2045, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.makedirs", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 27, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 43, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 63, "usage_type": "call"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "268607709", "text": "from flask import Blueprint, render_template, redirect, url_for, flash, request\nfrom app.models import User, Posts, Comment, Admin\nfrom app.forms import AdminForm\nfrom app.extensions import db\n\nadmin = Blueprint('admin', __name__)\n\n\n@admin.route('/', methods=['GET', 'POST'])\ndef index():\n form = AdminForm()\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n a = Admin.query.filter_by(username=username).first()\n if not a:\n flash(\"管理员账号错误\")\n elif a.verify_password(password=password):\n flash(\"登录成功\")\n return redirect(url_for('admin.manage_users', uname=a.username))\n else:\n flash(\"密码错误\")\n return render_template('admin/manage_login.html', form=form)\n\n\n@admin.route('/manage_users/', methods=['GET', 'POST'])\ndef manage_users(uname):\n ad = Admin.query.filter_by(username=uname).first()\n if not ad:\n flash(\"请先登录\")\n return redirect(url_for('admin.index'))\n page = request.args.get('page', 1, type=int) # 用户想查看第几页\n pagination = User.query.filter_by().order_by(User.id.asc()).paginate(page, per_page=10, error_out=False)\n users = pagination.items\n return render_template('admin/manage_users.html', uname=uname, pagination=pagination, users=users)\n\n\n@admin.route('/manage_posts/', methods=['GET', 'POST'])\ndef manage_posts(uname):\n ad = Admin.query.filter_by(username=uname).first()\n if not ad:\n flash(\"请先登录\")\n return redirect(url_for('admin.index'))\n page = request.args.get('page', 1, type=int) # 用户想查看第几页\n pagination = Posts.query.filter_by().order_by(Posts.id.asc()).paginate(page, per_page=10, error_out=False)\n posts = pagination.items\n return render_template('admin/manage_posts.html', uname=uname, pagination=pagination, posts=posts)\n\n\n@admin.route('/manage_comments/', methods=['GET', 'POST'])\ndef manage_comments(uname):\n ad = Admin.query.filter_by(username=uname).first()\n if not ad:\n flash(\"请先登录\")\n return redirect(url_for('admin.index'))\n page = request.args.get('page', 1, type=int) # 用户想查看第几页\n pagination = Comment.query.filter_by().order_by(Comment.id.asc()).paginate(page, per_page=10, error_out=False)\n comments = pagination.items\n return render_template('admin/manage_comments.html', uname=uname, pagination=pagination, comments=comments)\n", "sub_path": "app/views/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 2513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Blueprint", "line_number": 6, "usage_type": "call"}, {"api_name": "app.forms.AdminForm", "line_number": 11, "usage_type": "call"}, {"api_name": "app.models.Admin.query.filter_by", "line_number": 15, "usage_type": "call"}, {"api_name": "app.models.Admin.query", "line_number": 15, "usage_type": "attribute"}, {"api_name": "app.models.Admin", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "app.models.Admin.query.filter_by", "line_number": 28, "usage_type": "call"}, {"api_name": "app.models.Admin.query", "line_number": 28, "usage_type": "attribute"}, {"api_name": "app.models.Admin", "line_number": 28, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "app.models.User.query.filter_by", "line_number": 33, "usage_type": "call"}, {"api_name": "app.models.User.query", "line_number": 33, "usage_type": "attribute"}, {"api_name": "app.models.User", "line_number": 33, "usage_type": "name"}, {"api_name": "app.models.User.id.asc", "line_number": 33, "usage_type": "call"}, {"api_name": "app.models.User.id", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 35, "usage_type": "call"}, {"api_name": "app.models.Admin.query.filter_by", "line_number": 40, "usage_type": "call"}, {"api_name": "app.models.Admin.query", "line_number": 40, "usage_type": "attribute"}, {"api_name": "app.models.Admin", "line_number": 40, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "app.models.Posts.query.filter_by", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.Posts.query", "line_number": 45, "usage_type": "attribute"}, {"api_name": "app.models.Posts", "line_number": 45, "usage_type": "name"}, {"api_name": "app.models.Posts.id.asc", "line_number": 45, "usage_type": "call"}, {"api_name": "app.models.Posts.id", "line_number": 45, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 47, "usage_type": "call"}, {"api_name": "app.models.Admin.query.filter_by", "line_number": 52, "usage_type": "call"}, {"api_name": "app.models.Admin.query", "line_number": 52, "usage_type": "attribute"}, {"api_name": "app.models.Admin", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 56, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 56, "usage_type": "name"}, {"api_name": "app.models.Comment.query.filter_by", "line_number": 57, "usage_type": "call"}, {"api_name": "app.models.Comment.query", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.models.Comment", "line_number": 57, "usage_type": "name"}, {"api_name": "app.models.Comment.id.asc", "line_number": 57, "usage_type": "call"}, {"api_name": "app.models.Comment.id", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "535337902", "text": "\"\"\"Support for Google Assistant SDK broadcast notifications.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any\n\nfrom homeassistant.components.notify import ATTR_TARGET, BaseNotificationService\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.typing import ConfigType, DiscoveryInfoType\n\nfrom .helpers import async_send_text_commands\n\n\nasync def async_get_service(\n hass: HomeAssistant,\n config: ConfigType,\n discovery_info: DiscoveryInfoType | None = None,\n) -> BaseNotificationService:\n \"\"\"Get the broadcast notification service.\"\"\"\n return BroadcastNotificationService(hass)\n\n\nclass BroadcastNotificationService(BaseNotificationService):\n \"\"\"Implement broadcast notification service.\"\"\"\n\n def __init__(self, hass: HomeAssistant) -> None:\n \"\"\"Initialize the service.\"\"\"\n self.hass = hass\n\n async def async_send_message(self, message: str = \"\", **kwargs: Any) -> None:\n \"\"\"Send a message.\"\"\"\n if not message:\n return\n\n commands = []\n targets = kwargs.get(ATTR_TARGET)\n if not targets:\n commands.append(f\"broadcast {message}\")\n else:\n for target in targets:\n commands.append(f\"broadcast to {target} {message}\")\n await async_send_text_commands(commands, self.hass)\n", "sub_path": "homeassistant/components/google_assistant_sdk/notify.py", "file_name": "notify.py", "file_ext": "py", "file_size_in_byte": 1333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "homeassistant.core.HomeAssistant", "line_number": 14, "usage_type": "name"}, {"api_name": "homeassistant.helpers.typing.ConfigType", "line_number": 15, "usage_type": "name"}, {"api_name": "homeassistant.helpers.typing.DiscoveryInfoType", "line_number": 16, "usage_type": "name"}, {"api_name": "homeassistant.components.notify.BaseNotificationService", "line_number": 17, "usage_type": "name"}, {"api_name": "homeassistant.components.notify.BaseNotificationService", "line_number": 22, "usage_type": "name"}, {"api_name": "homeassistant.core.HomeAssistant", "line_number": 25, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 29, "usage_type": "name"}, {"api_name": "homeassistant.components.notify.ATTR_TARGET", "line_number": 35, "usage_type": "argument"}, {"api_name": "helpers.async_send_text_commands", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "74996203", "text": "#!/usr/bin/env python3\n\n\"\"\"\nQUESTION: \"How was working with MongoDB different from working with PostgreSQL? What was easier, and what was harder?\"\nANSWER: \"Data input into MongoDB is easier than into PostgreSQL, as it works with dictionaries. Data analysis in MongoDB\nis more complicated, because you can not use SQL commands.\"\n\"\"\"\n\nimport pymongo\nimport sqlite3\n\nclient = pymongo.MongoClient(\n \"mongodb+srv://Okocha:qoH3DyAYDe8KREAC@cluster0-5sv8d.mongodb.net/test?retryWrites=true&w=majority\")\ndb = client.rpg\n\nconn = sqlite3.connect('rpg_db.sqlite3')\ncurs = conn.cursor()\n\nget_characters = 'SELECT * FROM charactercreator_character;'\ncharacters = curs.execute(get_characters).fetchall()\n\ncurs.close()\n\nfor character in characters:\n insert_character = {\n 'character_id': character[0],\n 'name': character[1],\n 'level': character[2],\n 'exp': character[3],\n 'hp': character[4],\n 'strength': character[5],\n 'intelligence': character[6],\n 'dexterity': character[7],\n 'wisdom': character[8]\n }\n db.charactercreator_character.insert_one(insert_character)\n\nprint(db.charactercreator_character.find_one())\n", "sub_path": "module3-nosql-and-document-oriented-databases/rpg2mongo.py", "file_name": "rpg2mongo.py", "file_ext": "py", "file_size_in_byte": 1174, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pymongo.MongoClient", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "280559742", "text": "import inspect\nimport functools\nfrom io import StringIO\nfrom pathlib import Path\nimport pyecore.ecore as Ecore\nfrom pyecore.utils import DynamicEPackage\nfrom pyecore.resources import ResourceSet, Resource, URI\nfrom mako.template import Template\nfrom mako.runtime import Context, Undefined\n\n\ndef in_file(path, append, body):\n p = Path(path)\n p.parent.mkdir(parents=True, exist_ok=True)\n mode = 'w+' if append else 'w'\n with open(path, mode) as f:\n f.write(body)\n return \"\"\n\n\nfile_tag = \"\"\"\n<%namespace name=\"motra\">\n<%def name=\"file(path, append=True)\"><% body = capture(caller.body) %> ${in_file(path, append, body)}\n\n\"\"\"\n\n\nclass Transformation(object):\n def __init__(self, name):\n self.name = name\n self.mains = []\n self.file_tag = file_tag\n self.full_template = self.file_tag\n self.metamodels = set()\n self._polymorphic_calls = {}\n self.helpers = []\n\n def run(self, model, resource_set=None, **kwargs):\n if isinstance(model, Resource):\n model = model.contents[0] # FIXME deal with mutliple root resources\n elif isinstance(model, (str, URI)):\n rset = resource_set if resource_set else ResourceSet()\n for metamodel in self.metamodels:\n rset.metamodel_registry[metamodel.nsURI] = metamodel\n resource = rset.get_resource(model)\n model = resource.contents[0] # FIXME deal with mutliple root resources\n buf = StringIO()\n myprops = {'in_file': in_file}\n myprops.update(**kwargs)\n for f in self.helpers:\n myprops[f.__name__] = f\n for name, templates in self._polymorphic_calls.items():\n myprops[name] = templates[0]\n for metamodel in self.metamodels:\n myprops[metamodel.name] = DynamicEPackage(metamodel)\n\n ctx = Context(buf,**myprops)\n sp = inspect.currentframe()\n sp.f_globals[\"mycontext\"] = ctx\n\n self.template = Template\n for element in (model, *model.eAllContents()):\n for (fun, pname, etype), template in self.mains:\n if isinstance(element, etype):\n params = {pname: element}\n template = Template(self.full_template)\n template.get_def(fun.__name__).render_context(ctx, element)\n del sp.f_globals[\"mycontext\"]\n return buf.getvalue()\n\n def _register_template(self, f):\n def_template = \"\"\"\n<%def name=\"{}({})\">{}\n\"\"\".format(f.__name__, (', '.join(x for x in inspect.signature(f).parameters)), f.__doc__)\n self.full_template += def_template\n\n def main(self, f):\n cached_fun = functools.lru_cache()(f)\n if not f.__doc__:\n return cached_fun\n parameter = next(iter(inspect.signature(f).parameters.values()))\n self.mains.append(((f, parameter.name, parameter.annotation), cached_fun))\n ec = parameter.annotation\n if hasattr(ec, '_staticEClass'):\n self.metamodels.add(parameter.annotation.eClass.ePackage)\n elif isinstance(ec, Ecore.EObject):\n self.metamodels.add(eclass.ePackage)\n self._register_template(f)\n return cached_fun\n\n def template(self, f=None, when=None):\n if not f:\n return functools.partial(self.template,\n when=when)\n f.when = when\n\n @functools.wraps(f)\n def inner(*args, **kwargs):\n try:\n var_name = f.__code__.co_varnames[0]\n index = f.__code__.co_varnames.index(var_name)\n self_parameter = args[index]\n except IndexError:\n self_parameter = kwargs[var_name]\n candidates = self._polymorphic_calls[f.__name__]\n for candidate in candidates:\n candidate = candidate.__wrapped__.__wrapped__\n parameter = next(iter(inspect.signature(candidate).parameters.values()))\n if isinstance(self_parameter, parameter.annotation):\n # func = candidate\n # break\n if not candidate.when or candidate.when(*args, **kwargs):\n func = candidate\n break\n else:\n return Undefined\n # Create object for the context\n sp = inspect.currentframe()\n try:\n ctx = sp.f_globals[\"mycontext\"]\n except KeyError:\n raise RuntimeError(\"Template cannot be executed outside of the \"\n \"the transformation.\")\n old_io = ctx._buffer_stack[0]\n buf = StringIO()\n ctx._buffer_stack[0] = buf\n func.template.get_def(func.__name__).render_context(ctx, *args, **kwargs)\n ctx._buffer_stack[0] = old_io\n return buf.getvalue()\n cached_fun = functools.lru_cache()(inner)\n self._polymorphic_calls.setdefault(f.__name__,[]).append(cached_fun)\n if not f.__doc__:\n return cached_fun\n def_template = \"\"\"<%def name=\"{}({})\">{}\"\"\".format(f.__name__, (', '.join(x for x in inspect.signature(f).parameters)), f.__doc__)\n f.template = Template(def_template)\n parameter = next(iter(inspect.signature(f).parameters.values()))\n f.f_parameter = parameter\n self.metamodels.add(parameter.annotation.eClass.ePackage)\n return cached_fun\n\n def helper(self, f):\n self.helpers.append(f)\n", "sub_path": "motra/m2t.py", "file_name": "m2t.py", "file_ext": "py", "file_size_in_byte": 5548, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pathlib.Path", "line_number": 13, "usage_type": "call"}, {"api_name": "pyecore.resources.Resource", "line_number": 39, "usage_type": "argument"}, {"api_name": "pyecore.resources.URI", "line_number": 41, "usage_type": "name"}, {"api_name": "pyecore.resources.ResourceSet", "line_number": 42, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 47, "usage_type": "call"}, {"api_name": "pyecore.utils.DynamicEPackage", "line_number": 55, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 57, "usage_type": "call"}, {"api_name": "inspect.currentframe", "line_number": 58, "usage_type": "call"}, {"api_name": "mako.template.Template", "line_number": 61, "usage_type": "name"}, {"api_name": "mako.template.Template", "line_number": 66, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 74, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 78, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 81, "usage_type": "call"}, {"api_name": "pyecore.ecore.EObject", "line_number": 86, "usage_type": "attribute"}, {"api_name": "pyecore.ecore", "line_number": 86, "usage_type": "name"}, {"api_name": "functools.partial", "line_number": 93, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 108, "usage_type": "call"}, {"api_name": "mako.runtime.Undefined", "line_number": 116, "usage_type": "name"}, {"api_name": "inspect.currentframe", "line_number": 118, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 125, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 97, "usage_type": "call"}, {"api_name": "functools.lru_cache", "line_number": 130, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 134, "usage_type": "call"}, {"api_name": "mako.template.Template", "line_number": 135, "usage_type": "call"}, {"api_name": "inspect.signature", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "461858962", "text": "# -*- coding: utf-8 -*-\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nfrom sktime.classification.compose import (\r\n ColumnEnsembleClassifier,\r\n TimeSeriesForestClassifier,\r\n)\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n# ==============TimeSeriesForest on fNIRS Data========================================\r\nnamesCol = ['Subject Name', 'Event Name', 'Channel Name', 'Start time', 'End time']\r\n\r\nfor q in range(131):\r\n strVar = 'v' + str(q)\r\n namesCol.append(strVar)\r\n\r\ndf = pd.read_csv('211_AXCPT19_TK_axcaxwr_hb_cs539.csv', header = 0, names = namesCol)\r\n\r\ncol_name = list(df.columns)\r\ntrans_df = pd.DataFrame(data = df, columns = col_name)\r\n\r\n\r\n# Get Y-target -df\r\ny = trans_df['Event Name']\r\n\r\n# Drop target variable and get X-feature- df\r\nX = trans_df.drop(['Event Name'], axis = 1)\r\n\r\n# Splitting the dataset: \r\n#random_state simply sets a seed to the random generator, so that your train-test splits are always deterministic. If you don't set a seed, it is different each time.\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)#test_size specifies percetage of split between test and train\r\n#print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)\r\n#print(X_train.head())\r\n\r\ncat_cols = ['Subject Name', 'Channel Name']\r\nenc = preprocessing.LabelEncoder()\r\n\r\nfor col in cat_cols:\r\n X_train[col] = X_train[col].astype('str')\r\n X_test[col] = X_test[col].astype('str')\r\n X_train[col] = enc.fit_transform(X_train[col])\r\n X_test[col] = enc.transform(X_test[col])\r\n\r\n\r\n#isolate the time series\r\nX_train_timedata = X_train[X_train.columns[4:136]]\r\nX_test_timedata = X_test[X_test.columns[4:136]]\r\n\r\n\r\n# Conversion to numpy array\r\nX_train_timedata['combine'] = X_train_timedata.values.tolist()\r\nX_test_timedata['combine'] = X_test_timedata.values.tolist()\r\nX_train_timedata = X_train_timedata['combine']\r\nX_test_timedata = X_test_timedata['combine'] \r\n\r\n#convert to dataframe\r\nX_train_timedata = X_train_timedata.to_frame()\r\nX_test_timedata = X_test_timedata.to_frame()\r\n\r\nts_train = pd.Series(X_train_timedata['combine'].values, index=X_train_timedata.index)\r\nX_ts_train = ts_train.to_frame()\r\n\r\nts_test = pd.Series(X_test_timedata['combine'].values, index=X_test_timedata.index)\r\nX_ts_test = ts_test.to_frame()\r\n\r\nfor row_num in range(0,X_ts_train.shape[0]):\r\n series1 = pd.Series(X_ts_train.iat[row_num,0])\r\n X_ts_train.iat[row_num,0] = series1\r\n\r\nfor row_num in range(0,X_ts_test.shape[0]):\r\n series2 = pd.Series(X_ts_test.iat[row_num,0])\r\n X_ts_test.iat[row_num,0] = series2\r\n\r\n# =======================Column ensembling================================ \r\nNum_Estimator_List =[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\r\n\r\nAc =[] #Accuracy\r\nEf =[] #Efficiency\r\nfor n in Num_Estimator_List:\r\n clf = ColumnEnsembleClassifier(\r\n estimators=[\r\n (\"TSF0\", TimeSeriesForestClassifier(n_estimators=n), [0]),\r\n ]\r\n )\r\n\r\n start_time= time.time()\r\n clf.fit(X_ts_train, y_train)\r\n Efficiency= time.time() - start_time\r\n Ef.append(Efficiency)\r\n\r\n Accuracy = clf.score(X_ts_test, y_test)\r\n Ac.append(Accuracy)\r\n \r\nprint(\"Efficiency is:\\n\", Ef)\r\nprint(\"Accuracy is :\\n\",Ac)\r\n\r\n\r\nfig,axes=plt.subplots() \r\nplt.plot(Num_Estimator_List, Ac, color=\"darkgreen\", marker='o',markerfacecolor='mediumvioletred', markersize=6,linewidth=2, alpha=0.9,linestyle='--', label=\"Accuracy\") \r\nplt.title(\"60-40 Train-Test Split\") \r\nplt.xlabel(\"Number of Estimators\")\r\nplt.ylabel(\"Accuracy\")\r\n\r\nfig,axes=plt.subplots() \r\nplt.plot(Num_Estimator_List, Ef, color=\"steelblue\", marker='o',markerfacecolor='mediumvioletred', markersize=6,linewidth=2, alpha=0.9,linestyle='--', label=\"Efficiency\") \r\nplt.title(\"60-40 Train-Test Split\") \r\nplt.xlabel(\"Number of Estimators\")\r\nplt.ylabel(\"Efficiency (Seconds)\")", "sub_path": "Time Series Forest Classifier/TimeSeriesForest_fNIRS.py", "file_name": "TimeSeriesForest_fNIRS.py", "file_ext": "py", "file_size_in_byte": 3878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 38, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 65, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 69, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 73, "usage_type": "call"}, {"api_name": "sktime.classification.compose.ColumnEnsembleClassifier", "line_number": 82, "usage_type": "call"}, {"api_name": "sktime.classification.compose.TimeSeriesForestClassifier", "line_number": 84, "usage_type": "call"}, {"api_name": "time.time", "line_number": 88, "usage_type": "call"}, {"api_name": "time.time", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 107, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 107, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}]} +{"seq_id": "271396181", "text": "import numpy as np\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense, Flatten, Reshape, Input\nfrom keras.layers import LSTM\nfrom keras.initializers import RandomNormal\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nimport json\nimport csv\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\nfile_path = \"D:\\\\邢老师数据集\\\\singlerack_all.csv\"\nserver_list = []\nconditioner_outlet_temp = []\nconditioner_inlet_temp = []\n\n\nclass Server(object):\n def __init__(self, i):\n self.id = i\n self.inlet_temp = []\n self.outlet_temp = []\n self.cpu = []\n self.memory = []\n\n\ndef strided_app(a, L, S): # Window len = L, Stride len/stepsize = S\n nrows = ((a.size - L) // S) + 1\n n = a.strides[0]\n return np.lib.stride_tricks.as_strided(a, shape=(nrows, L), strides=(S * n, n), writeable=False)\n\n\ndef process_data():\n for i in range(15):\n server_list.append(Server(i))\n with open(file_path, \"r\", encoding='utf-8') as datacsv:\n csvr = csv.reader(datacsv)\n for row in csvr:\n i = 1\n for server in server_list:\n server.outlet_temp.append(float(row[i]))\n i = i + 1\n for server in server_list:\n server.inlet_temp.append(float(row[46 - i]))\n i = i + 1\n conditioner_outlet_temp.append(float(row[i]))\n i = i + 1\n conditioner_inlet_temp.append(float(row[i]))\n i = i + 6\n for server in server_list:\n # if(server.id<10):\n # server.cpu.append(float(row[i])/10)\n # else:\n # server.cpu.append(float(row[i]))\n server.cpu.append(float(row[i]) /100)\n i = i + 6\n\n\ndef format_dataset():\n predict_server_inlet_model_x = []\n predict_server_inlet_model_y = []\n predict_conditioner_inlet_model_x = []\n predict_conditioner_inlet_model_y = conditioner_inlet_temp\n for i in range(len(conditioner_outlet_temp)):\n x_row = []\n y_row = []\n x_row.append(conditioner_outlet_temp[i])\n for server in server_list:\n x_row.append(server.cpu[i])\n y_row.append(server.inlet_temp[i])\n predict_server_inlet_model_x.append(x_row)\n predict_server_inlet_model_y.append(y_row)\n predict_conditioner_inlet_model_x.append(y_row)\n return np.array(predict_server_inlet_model_x), np.array(predict_server_inlet_model_y), np.array(\n predict_conditioner_inlet_model_x), np.array(predict_conditioner_inlet_model_y)\n\n\ndef train_server_model(predict_server_inlet_model_x, predict_server_inlet_model_y):\n server_model = Sequential()\n # server_model.add(LSTM(10, activation=\"relu\", input_shape=(train_x.shape[1], train_x.shape[2]), return_sequences=True,\n # kernel_initializer=RandomNormal()))\n # server_model.add(Flatten())\n server_model.add(Dense(16, activation=\"relu\"))\n server_model.add(Dense(100, activation=\"relu\"))\n # server_model.add(Dense(500, activation=\"relu\"))\n # server_model.add(Dense(100, activation=\"relu\"))\n server_model.add(Dense(15))\n server_model.compile(loss='mean_absolute_error', optimizer='Adadelta')\n checkpoint1 = ModelCheckpoint(\n \"./model/predict_server_inlet_1ConditionerOutletTemp+15ServerCpuUsage_15out_{val_loss:.2f}.hdf5\",\n monitor='val_loss', verbose=1, save_best_only=True,\n mode='min')\n callbacks_list1 = [checkpoint1]\n history1 = server_model.fit(predict_server_inlet_model_x, predict_server_inlet_model_y, epochs=10000,\n batch_size=256,\n validation_split=0.2, verbose=2, callbacks=callbacks_list1)\n\n\ndef train_cpu_usage_model():\n timestep = 60\n predict_horizon=60\n train_x = []\n train_y = []\n for server in server_list:\n cpu = np.array(server.cpu)\n data = strided_app(cpu, timestep + +predict_horizon+1, 1)\n x = data[:, :-1-predict_horizon]\n y = data[:, -1]\n if isinstance(train_x,list):\n train_x = x\n train_y = y\n else:\n train_x = np.concatenate((train_x, x), axis=0)\n train_y = np.concatenate((train_y, y), axis=0)\n input=Input(shape=(timestep,))\n re=Reshape(target_shape=(timestep, 1))(input)\n lstm=LSTM(120, return_sequences=True)(re)\n lstm1 = LSTM(120)(lstm)\n flatten=lstm1\n dense=Dense(10,activation='relu')(flatten)\n output=Dense(1)(dense)\n model=Model(inputs=[input],outputs=[output])\n model.summary()\n model.compile(loss='mean_absolute_error', optimizer='Adam')\n checkpoint = ModelCheckpoint(\"./model/predict_cpu_usage_ts60_ph60_{val_loss:.2f}.hdf5\",\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n mode='min')\n early_stopping = EarlyStopping(monitor='val_loss', mode='min', min_delta=0.002, patience=3, verbose=1)\n callbacks_list = [checkpoint, early_stopping]\n history = model.fit(train_x, train_y, epochs=10000,\n batch_size=128,\n validation_split=0.02, verbose=1, callbacks=callbacks_list)\n\n\ndef train_conditioner_model(predict_conditioner_inlet_model_x, predict_conditioner_inlet_model_y):\n conditioner_model = Sequential()\n conditioner_model.add(Dense(15, activation=\"relu\"))\n conditioner_model.add(Dense(100, activation=\"relu\"))\n conditioner_model.add(Dense(1))\n conditioner_model.compile(loss='mse', optimizer='Adadelta')\n checkpoint2 = ModelCheckpoint(\"./model/predict_condition_inlet_15ServerInletTempin_1out_{val_loss:.2f}.hdf5\",\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n mode='min')\n early_stopping = EarlyStopping(monitor='val_loss', mode='min', min_delta=0.002, patience=10, verbose=1)\n callbacks_list2 = [checkpoint2]\n history2 = conditioner_model.fit(predict_conditioner_inlet_model_x, predict_conditioner_inlet_model_y, epochs=10000,\n batch_size=256,\n validation_split=0.2, verbose=2, callbacks=callbacks_list2)\n\n\nif __name__ == \"__main__\":\n process_data()\n # predict_server_inlet_model_x,\\\n # predict_server_inlet_model_y, \\\n # predict_conditioner_inlet_model_x,\\\n # predict_conditioner_inlet_model_y = format_dataset()\n #\n # train_server_model(predict_server_inlet_model_x, predict_server_inlet_model_y)\n\n # train_conditioner_model(predict_conditioner_inlet_model_x,predict_conditioner_inlet_model_y)\n # res_y=server_model.predict(predict_server_inlet_model_x)\n # while(True):\n # pass\n\n train_cpu_usage_model()\n", "sub_path": "core/prediction_model/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 6903, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.lib.stride_tricks.as_strided", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.lib", "line_number": 29, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 82, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 83, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 86, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers.Reshape", "line_number": 115, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 117, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 119, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 121, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 124, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 129, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 137, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 138, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 140, "usage_type": "call"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 142, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 147, "usage_type": "call"}]} +{"seq_id": "598732951", "text": "#!/usr/bin/env python\nimport argparse\nimport re\nimport sys\nimport textwrap\nimport matplotlib\nimport numpy as np\nfrom os import path\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.patches import Patch\n\nfrom cyvcf2 import VCF, Writer\n\nplt.rcParams[\"figure.figsize\"] = (18, 14)\nmatplotlib.use(\"Agg\")\n\nCHROMOSOME_19_ANNOTATION = {\"ucsc\": \"chr19\", \"ensembl\": \"19\", \"genbank\": \"CM000681.2\"}\n\n# Required headers for panel input files, either kirimp (kirimp.uk1.snp.info.csv) or custom with the required fields\nKIRIMP_HEADER = [\"id\", \"position\", \"allele0\", \"allele1\", \"allele1_frequency\"]\nCUSTOM_HEADER = [\"chrom\", \"pos\", \"a0\", \"a1\", \"freq\"]\n\nCOMPLEMENT = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\", \".\": \".\"}\n\n\"\"\" CUSTOM HEADERS \"\"\"\nUPDATED = {\n \"ID\": \"UPD\",\n \"Description\": \"REF and ALT updated based on reference panel frequency\",\n \"Type\": \"Flag\",\n \"Number\": \"A\",\n}\n\nPANEL_FREQ_DIF = {\n \"ID\": \"PFD\",\n \"Description\": \"Alternate frequency difference to reference panel frequency\",\n \"Type\": \"Float\",\n \"Number\": \"A\",\n}\n\nPANEL_FREQ_DIFF = {\n \"ID\": \"PFD\",\n \"Description\": \"Alternate frequency difference to reference panel frequency\",\n \"Type\": \"Float\",\n \"Number\": \"A\",\n}\n\nMISSINGNES = {\n \"ID\": \"MISS\",\n \"Description\": \"Missing Genotype Frequency\",\n \"Type\": \"Float\",\n \"Number\": \"A\",\n}\n\nMAF = {\n \"ID\": \"MAF\",\n \"Description\": \"Minor Allele Frequency\",\n \"Type\": \"Float\",\n \"Number\": \"A\",\n}\n\ntoml_header = \"\"\"\n[encoder_settings]\noutlier_threshold=%s\nminimum_ambigious_threshold=%f\nmaximum_ambigious_threshold=%f\nreference_panel_type=%s\nchromosome_annotation=%s\nambigious_dropped=%r\ncorrect_complement=%r\n\n\n[variant]\n\n\n\"\"\"\n\nvariant_toml = \"\"\"\n [variant.%s]\n status = %s\n reason = %s\n frequency = %f\n panel_frequency=%r\n updated_frequency=%r\n minor_allele_frequency=%r\n missing_genotype_frequency=%r\n panel_frequency_difference=%r\n\n\n\"\"\"\n\n\"\"\" Class to represent genotypes in 0/1 format might not be necessary as I can flip from there\"\"\"\n\n\nclass Genotype(object):\n __slots__ = (\"alleles\", \"phased\")\n FLIP_DICT = {0: 1, 1: 0, -1: -1}\n\n def __init__(self, li):\n self.alleles = li[:-1]\n self.phased = li[-1]\n\n def __str__(self):\n sep = \"/|\"[int(self.phased)]\n return sep.join(\"0123.\"[a] for a in self.alleles)\n\n def flip(self):\n self.alleles = [Genotype.FLIP_DICT[allele] for allele in self.alleles]\n\n def genotype(self):\n return self.alleles + [self.phased]\n\n __repr__ = __str__\n\n\n\"\"\" Class to keep track of VCF file summaries for variants \"\"\"\n\n\nclass VCFSummary(object):\n __slots__ = (\n \"ambigious\",\n \"unknown_alt\",\n \"updated\",\n \"flipped\",\n \"n_in_panel\",\n \"kept\",\n \"__freqs\",\n \"VARIANTS\",\n )\n\n def __init__(self):\n self.VARIANTS = {}\n self.ambigious = 0\n self.unknown_alt = 0\n self.updated = 0\n self.flipped = 0\n self.n_in_panel = 0\n self.kept = 0\n self.__freqs = None\n\n def __str__(self):\n summary_toml = \"\"\"\n [summary]\n n_ambigious_variants=%d\n n_unknown_alt_or_monomorphic=%d\n n_updated=%d\n n_flipped_strand=%d\n vcf_variants_in_panel=%d\n vcf_variants_in_panel_after_encoding_snps=%d\n \"\"\".rstrip()\n return textwrap.dedent(summary_toml) % (\n self.ambigious,\n self.unknown_alt,\n self.updated,\n self.flipped,\n self.n_in_panel,\n self.kept,\n )\n\n def add_variant(self, v_id):\n self.VARIANTS[v_id] = {\"freq\": None, \"updated_freq\": None, \"panel_freq\": None}\n\n def add_variant_dict(self, vdict):\n self.VARIANTS.update(vdict)\n\n def freqs(self):\n if not self.__freqs:\n self.__freqs = np.array(\n [\n [\n v[\"freq\"],\n v[\"updated_freq\"],\n v.get(\"MAF\", None),\n v.get(\"MISS\", None),\n v.get(\"PFD\", None),\n v[\"panel_freq\"],\n ]\n for k, v in sorted(self.VARIANTS.items())\n ]\n )\n return self.__freqs\n\n def v_ids(self, original=True):\n if original:\n vids = np.array([v[\"v_id\"] for k, v in sorted(self.VARIANTS.items())])\n else:\n vids = np.array(sorted(self.VARIANTS.keys()))\n return vids\n\n def updates(self):\n return np.array([v[\"updated\"] == 1 for k, v in sorted(self.VARIANTS.items())])\n\n __repr__ = __str__\n\n\ndef main(arguments=None):\n args = parse_arguments()\n vcf = VCF(args[\"vcf_file\"], threads=args[\"threads\"])\n vcf.add_info_to_header(UPDATED)\n vcf.add_info_to_header(PANEL_FREQ_DIFF)\n vcf.add_info_to_header(MISSINGNES)\n vcf.add_info_to_header(MAF)\n\n w = Writer(args[\"output\"], vcf)\n panel = generate_panel_data(\n panel_file=args[\"reference_panel\"],\n chr=args[\"chromosomes\"],\n annotation=args[\"chromosome_annotation\"],\n panel_type=args[\"reference_panel_type\"],\n separator=args[\"separator\"],\n )\n\n vcf_summary = VCFSummary()\n\n print(\n toml_header\n % (\n args[\"outlier_threshold\"],\n args[\"min_ambigious_threshold\"],\n args[\"max_ambigious_threshold\"],\n args[\"reference_panel_type\"],\n args[\"chromosome_annotation\"],\n args[\"ambigious\"],\n args[\"fix_complement_ref_alt\"],\n ),\n file=sys.stderr,\n )\n for variant in vcf:\n status = \"unchanged\"\n reason = \"None\"\n panel_variant_freq = None\n variant_id_end = str(variant.CHROM) + \"_\" + str(variant.end)\n if variant_id_end in panel:\n variant.INFO[\"UPD\"] = 0\n panel_variant = panel[variant_id_end]\n panel_variant_freq = panel_variant[\"freq\"]\n vcf_summary.n_in_panel += 1\n if not variant.ALT:\n print_variant_toml(\n variant, panel_variant[\"freq\"], \"removed\", \"unknown_alt/monomorphic\"\n )\n vcf_summary.unknown_alt += 1\n continue\n if (\n args[\"ambigious\"]\n and variant.aaf > args[\"min_ambigious_threshold\"]\n and variant.aaf < args[\"max_ambigious_threshold\"]\n ):\n vcf_summary.ambigious += 1\n print_variant_toml(\n variant, panel_variant[\"freq\"], \"removed\", \"ambigious_frequency\"\n )\n continue\n if should_recode(variant, panel_variant):\n swap_ref_alt(variant)\n variant.INFO[\"UPD\"] = 1\n vcf_summary.updated += 1\n status = \"updated\"\n reason = \"frequency_unsynced\"\n if (\n should_flipstrand(variant, panel_variant)\n and args[\"fix_complement_ref_alt\"]\n ):\n flipstrand(variant)\n variant.INFO[\"UPD\"] = 1\n vcf_summary.flipped += 1\n status = \"strand_flipped\"\n reason = \"ref/alt_not_in_panel_nucleotides\"\n\n vcf_summary.add_variant(variant_id_end)\n v_freq = variant.INFO.get(\"AF\")\n\n variant.INFO[\"PFD\"] = abs(variant.INFO.get(\"AF\") - panel_variant[\"freq\"])\n variant.INFO[\"MISS\"] = np.sum(variant.gt_types == 2) / len(variant.gt_types)\n variant.INFO[\"MAF\"] = v_freq if v_freq < 0.5 else 1 - v_freq\n\n vcf_summary.VARIANTS[variant_id_end].update(\n {\n \"freq\": variant.aaf,\n \"panel_freq\": panel_variant[\"freq\"],\n \"updated_freq\": v_freq,\n \"MAF\": variant.INFO.get(\"MAF\"),\n \"MISS\": variant.INFO.get(\"MISS\"),\n \"PFD\": variant.INFO.get(\"PFD\"),\n \"v_id\": variant.ID,\n \"updated\": variant.INFO.get(\"UPD\"),\n }\n )\n print_variant_toml(variant, panel_variant_freq, status, reason)\n vcf_summary.kept += 1\n w.write_record(variant)\n w.close()\n vcf.close()\n plot_file = re.sub(r\"(vcf|bcf)(\\.gz)*$\", \"png\", args[\"output\"])\n if not vcf_summary.n_in_panel == 0:\n create_summary_plot(\n vcf_summary, outfile=plot_file, threshold=args[\"outlier_threshold\"]\n )\n print(vcf_summary, file=sys.stderr)\n print(\"n_reference_panel_size=%d\" % len(panel.keys()), file=sys.stderr)\n\n\ndef print_variant_toml(\n variant, panel_variant_freq, status, reason, variant_toml=variant_toml\n):\n\n variant_tup = (\n variant.INFO.get(\"AF\", None),\n variant.INFO.get(\"MAF\", None),\n variant.INFO.get(\"MISS\", None),\n variant.INFO.get(\"PFD\", None),\n )\n\n print(\n variant_toml\n % ((variant.ID, status, reason, variant.aaf, panel_variant_freq) + variant_tup),\n file=sys.stderr,\n )\n\n\ndef swap_ref_alt(variant):\n gts = variant.genotypes\n gts = [Genotype(li) for li in gts]\n for gt in gts:\n gt.flip()\n variant.genotypes = [gt.genotype() for gt in gts]\n updated_frequency = sum([gt.alleles.count(1) for gt in gts]) / (\n 2 * len([gt for gt in gts if -1 not in gt.alleles])\n )\n temp_nuc = variant.REF\n variant.REF = variant.ALT[0]\n variant.ALT = [temp_nuc]\n variant.INFO[\"AF\"] = updated_frequency\n\n\ndef flipstrand(variant, COMPLEMENT=COMPLEMENT):\n variant.REF = COMPLEMENT[variant.REF]\n variant.ALT = COMPLEMENT[variant.ALT]\n swap_ref_alt(variant)\n\n\ndef should_recode(variant, panel_variant):\n panel_nucleotides = [panel_variant[\"A0\"], panel_variant[\"A1\"]]\n variant_nucleotides = variant.ALT[:]\n variant_nucleotides.extend(variant.REF)\n frequency_synced = (\n panel_variant[\"freq\"] > 0.5 and variant.INFO.get(\"AF\") > 0.5\n ) or (panel_variant[\"freq\"] < 0.5 and variant.INFO.get(\"AF\") < 0.5)\n nucleotides_synced = all(nuc in variant_nucleotides for nuc in panel_nucleotides)\n return not (frequency_synced and nucleotides_synced)\n\n\ndef should_flipstrand(variant, panel_variant, COMPLEMENT=COMPLEMENT):\n unsynced = should_recode(variant, panel_variant)\n is_alt_complement = COMPLEMENT[variant.REF] == variant.ALT\n return unsynced and is_alt_complement\n\n\ndef create_summary_plot(v_summary, outfile, threshold=None):\n freqs = v_summary.freqs()\n default_color = plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"][0]\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(323, sharex=ax1)\n ax3 = fig.add_subplot(325)\n ax4 = fig.add_subplot(222)\n ax5 = fig.add_subplot(224, sharex=ax4)\n\n titles = (\n \"Original VCF Frequencies Compared to Panel Frequencies\",\n \"Updated VCF Frequencies Compared to Panel Frequencies\",\n \"Minor Allele Frequency Compared to Difference in Frequency Between Panel and VCF\",\n \"Genotype Missingness Compared to Difference in Frequency Between Panel and VCF\",\n )\n\n x_labs = (\n \"VCF Alternate Frequency\",\n \"VCF Alternate Frequency\",\n \"Minor Allele Frequency\",\n \"Missing Genotype Frequency\",\n )\n\n y_labs = (\n \"Panel Allele Frequency\",\n \"Panel Allele Frequency\",\n \"Panel vs VCF Frequency Difference\",\n \"Panel vs VCF Frequency Difference\",\n )\n\n coefs = np.corrcoef(freqs.T)[:, [4, 5]]\n\n for i, ax in enumerate([ax1, ax2, ax4, ax5]):\n coef, comparison_freq = (\n (coefs[i, 0], freqs[:, 4]) if i > 1 else (coefs[i, 1], freqs[:, 5])\n )\n ax.set_title(titles[i], fontsize=9)\n ax.scatter(freqs[:, i], comparison_freq, s=10, alpha=0.7)\n ax.annotate(\n \"corr = %.2f\" % coef,\n (\n max(freqs[:, i]) - max(freqs[:, i]) / 20,\n max(comparison_freq) - max(comparison_freq) / 20,\n ),\n ha=\"center\",\n fontsize=10,\n )\n ax.set_ylabel(y_labs[i], fontsize=9)\n ax.set_xlabel(x_labs[i], fontsize=9)\n ax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\".3\")\n\n if threshold:\n idxs = freqs[:, 4] > threshold\n for f, cf, vid in zip(\n freqs[idxs, i], comparison_freq[idxs], v_summary.v_ids()[idxs]\n ):\n ax.annotate(vid, (f, cf), ha=\"center\", fontsize=8)\n\n v_types = [\"REF --> ALT\", \"Strand Flipped\", \"Ambigious Variants\", \"ALT Missing\"]\n counts = [\n v_summary.updated,\n v_summary.flipped,\n v_summary.ambigious,\n v_summary.unknown_alt,\n ]\n bar_width = 0.75\n idx = np.arange(len(counts))\n barlist = ax3.bar(idx, counts, width=bar_width, align=\"center\")\n ax3.set_xticks(idx)\n ax3.set_xticklabels(v_types, rotation=45, minor=False, fontsize=8)\n [bar.set_color(\"r\") for bar in barlist[2:]]\n for i, count in enumerate(counts):\n col = \"r\" if i > 1 else \"black\"\n ax3.text(i, count, \" \" + str(count), ha=\"center\", color=col)\n ax3.set_ylabel(\"Counts\", fontsize=9)\n ax3.set_title(\"Variant Modification Type and Excluded Counts\", fontsize=9)\n\n leg_elements = [\n Patch(facecolor=default_color, label=\"Updated\"),\n Patch(facecolor=\"red\", label=\"Removed\"),\n ]\n ax3.legend(handles=leg_elements, loc=\"upper right\")\n\n plt.savefig(outfile)\n\n\ndef generate_panel_data(\n panel_file, chr=None, annotation=\"ensembl\", panel_type=\"kirimp\", separator=None\n):\n f = open(panel_file, \"r\")\n header_line = next(f).strip()\n sep = get_separator(header_line, separator)\n header_line = [cell.replace('\"', \"\") for cell in header_line.split(sep)]\n if panel_type == \"kirimp\":\n chromosome = CHROMOSOME_19_ANNOTATION[annotation]\n if header_line != KIRIMP_HEADER:\n raise TypeError(\n \"If input panel type is kirimp, the panel needs to contain a comma-separated header:\\n%s\"\n % \",\".join(KIRIMP_HEADER)\n )\n else:\n if header_line != CUSTOM_HEADER:\n raise TypeError(\n \"If input panel type is custom, the panel needs to contain a comma-separated header:\\n%s\"\n % \",\".join(CUSTOM_HEADER)\n )\n snp_dict = {\n chromosome + \"_\" + cells[1]\n if panel_type == \"kirimp\"\n else cells[0]\n + \"_\"\n + cells[1]: {\"A0\": cells[2], \"A1\": cells[3], \"freq\": float(cells[4].strip())}\n if panel_type == \"kirimp\"\n else {\"A0\": cells[2], \"A1\": cells[3], \"freq\": float(cells[4])}\n for cells in [line.strip().replace('\"', \"\").split(sep) for line in f]\n }\n f.close()\n return snp_dict\n\n\ndef get_separator(line, passed_separator=None):\n tabs = line.count(r\"\\t\")\n commas = line.count(r\",\")\n if passed_separator:\n sep = passed_separator\n elif tabs == 4 and commas != 4:\n sep = r\"\\t\"\n elif tabs != 4 and commas == 4:\n sep = \",\"\n else:\n raise TypeError(\n \"Cannot determine separator from file please specify separator directly as an argument [--reference-panel-col-separator]\"\n )\n return sep\n\n\ndef parse_arguments(arguments=None):\n parser = argparse.ArgumentParser(\n description=\"This script encodes SNPs in a VCF to a reference panel based on allele frequencies\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser._action_groups.pop()\n required = parser.add_argument_group(\"required arguments\")\n optional = parser.add_argument_group(\"optional arguments\")\n required.add_argument(\n \"-v\",\n \"--vcf-file\",\n help=\"VCF/BCF file to re-encode (can be compressed with bgzip)\",\n required=True,\n type=str,\n )\n required.add_argument(\n \"-r\",\n \"--reference-panel\",\n help=\"Reference panel file either in format of KIR*IMP reference panel or a custom data format [chrom pos ref alt freq]\",\n required=True,\n type=str,\n )\n optional.add_argument(\n \"-rt\",\n \"--reference-panel-type\",\n help=\"Reference panel file type\",\n choices=[\"kirimp\", \"custom\"],\n default=\"kirimp\",\n type=str,\n )\n optional.add_argument(\n \"--separator\", help=\"Custom reference panel column separator\", type=str\n )\n optional.add_argument(\n \"-o\", \"--output\", help=\"Output vcf file\", required=True, type=str\n )\n optional.add_argument(\n \"-chr\",\n \"--chromosomes\",\n help=\"Chromosome over which to encode SNPs \",\n required=False,\n nargs=\"?\",\n type=str,\n )\n optional.add_argument(\n \"--chromosome-annotation\",\n help=\"Chromosome annotation type in the VCF\",\n choices=[\"ucsc\", \"ensembl\", \"genbank\"],\n default=\"ensembl\",\n type=str,\n )\n optional.add_argument(\n \"-a\",\n \"--ambigious\",\n help=\"Determines whether ambigious alternate alleles should be dropped\",\n action=\"store_false\",\n )\n optional.add_argument(\n \"-c\",\n \"--fix-complement-ref-alt\",\n help=\"Should ref/alt that are complements be fixed with respect to frequency\",\n action=\"store_false\",\n )\n optional.add_argument(\n \"-min\",\n \"--min-ambigious-threshold\",\n help=\"Alternate alleles above this frequency and below the max ambigious frequency will be flagged as ambigious\",\n default=0.495,\n type=float,\n )\n optional.add_argument(\n \"-max\",\n \"--max-ambigious-threshold\",\n help=\"Alternate alleles above this frequency and below the max ambigious frequency will be flagged as ambigious\",\n default=0.505,\n type=float,\n )\n optional.add_argument(\n \"--outlier-threshold\",\n help=\"Threshold to use to label variant frequency differences between alternate and panel frequencis that are significant\",\n default=None,\n type=float,\n )\n optional.add_argument(\n \"-t\", \"--threads\", help=\"Number of threads to use for compression\", type=int\n )\n args = vars(parser.parse_args())\n if (\n args[\"reference_panel_type\"] == \"custom\"\n and args[\"reference_panel_format\"] is None\n ):\n parser.error(\n \"custom --reference-panel-type requires --reference-panel-format to be set\"\n )\n return args\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": ".test/scripts/frequency_encode_snps.py", "file_name": "frequency_encode_snps.py", "file_ext": "py", "file_size_in_byte": 18504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 16, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.use", "line_number": 17, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "cyvcf2.VCF", "line_number": 199, "usage_type": "call"}, {"api_name": "cyvcf2.Writer", "line_number": 205, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 227, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 275, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 300, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 301, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 318, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 362, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 362, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 364, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 393, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 429, "usage_type": "call"}, {"api_name": "matplotlib.patches.Patch", "line_number": 441, "usage_type": "call"}, {"api_name": "matplotlib.patches.Patch", "line_number": 442, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 446, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 446, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 500, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 502, "usage_type": "attribute"}]} +{"seq_id": "254991845", "text": "#!/usr/bin/env python3\n\nimport connexion\n\nfrom openapi_server import encoder\nfrom os import environ\nfrom openapi_server.config import DevelopmentConfig, ProductionConfig\nfrom openapi_server.db import db\n\n\ndef main():\n app = connexion.App(__name__, specification_dir='./openapi/')\n\n # Load the config\n if environ.get(\"FLASK_ENV\") == \"production\":\n app.app.config.from_object(ProductionConfig)\n else:\n app.app.config.from_object(DevelopmentConfig)\n\n # Initialize database\n db.init_app(app.app)\n\n with app.app.app_context():\n db.create_all()\n\n app.app.json_encoder = encoder.JSONEncoder\n\n app.add_api('openapi.yaml',\n arguments={'title': 'TODO API'},\n pythonic_params=True)\n app.run(port=8080)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "api/openapi_server/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "connexion.App", "line_number": 12, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "name"}, {"api_name": "openapi_server.config.ProductionConfig", "line_number": 16, "usage_type": "argument"}, {"api_name": "openapi_server.config.DevelopmentConfig", "line_number": 18, "usage_type": "argument"}, {"api_name": "openapi_server.db.db.init_app", "line_number": 21, "usage_type": "call"}, {"api_name": "openapi_server.db.db", "line_number": 21, "usage_type": "name"}, {"api_name": "openapi_server.db.db.create_all", "line_number": 24, "usage_type": "call"}, {"api_name": "openapi_server.db.db", "line_number": 24, "usage_type": "name"}, {"api_name": "openapi_server.encoder.JSONEncoder", "line_number": 26, "usage_type": "attribute"}, {"api_name": "openapi_server.encoder", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "569517424", "text": "from opengever.base.source import RepositoryPathSourceBinder\nfrom opengever.document import _\nfrom plone.autoform import directives as form\nfrom plone.autoform.interfaces import IFormFieldProvider\nfrom plone.supermodel import model\nfrom z3c.relationfield.schema import RelationChoice\nfrom z3c.relationfield.schema import RelationList\nfrom zope.interface import alsoProvides\n\n\nclass IRelatedDocuments(model.Schema):\n \"\"\"The 'Related documents' behvavior is an opengever.document\n specific 'Related items' behavior. Only allows references to\n opengever.documents.\n \"\"\"\n\n form.order_after(relatedItems='IDocumentMetadata.preserved_as_paper')\n relatedItems = RelationList(\n title=_(u'label_related_documents', default=u'Related Documents'),\n default=[],\n missing_value=[],\n value_type=RelationChoice(\n title=u\"Related\",\n source=RepositoryPathSourceBinder(\n portal_type=(\"opengever.document.document\", \"ftw.mail.mail\"),\n navigation_tree_query={\n 'review_state': {'not': 'document-state-shadow'},\n 'object_provides':\n ['opengever.repository.repositoryroot.IRepositoryRoot',\n 'opengever.repository.repositoryfolder.IRepositoryFolderSchema',\n 'opengever.dossier.behaviors.dossier.IDossierMarker',\n 'opengever.document.document.IDocumentSchema',\n 'ftw.mail.mail.IMail', ]\n }),\n ),\n required=False,\n )\n\n model.fieldset(\n u'common',\n label=_(u'fieldset_common', default=u'Common'),\n fields=[\n u'relatedItems',\n ],\n )\n\n\nalsoProvides(IRelatedDocuments, IFormFieldProvider)\n", "sub_path": "opengever/document/behaviors/related_docs.py", "file_name": "related_docs.py", "file_ext": "py", "file_size_in_byte": 1792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "plone.supermodel.model.Schema", "line_number": 11, "usage_type": "attribute"}, {"api_name": "plone.supermodel.model", "line_number": 11, "usage_type": "name"}, {"api_name": "plone.autoform.directives.order_after", "line_number": 17, "usage_type": "call"}, {"api_name": "plone.autoform.directives", "line_number": 17, "usage_type": "name"}, {"api_name": "z3c.relationfield.schema.RelationList", "line_number": 18, "usage_type": "call"}, {"api_name": "opengever.document._", "line_number": 19, "usage_type": "call"}, {"api_name": "z3c.relationfield.schema.RelationChoice", "line_number": 22, "usage_type": "call"}, {"api_name": "opengever.base.source.RepositoryPathSourceBinder", "line_number": 24, "usage_type": "call"}, {"api_name": "plone.supermodel.model.fieldset", "line_number": 39, "usage_type": "call"}, {"api_name": "plone.supermodel.model", "line_number": 39, "usage_type": "name"}, {"api_name": "opengever.document._", "line_number": 41, "usage_type": "call"}, {"api_name": "zope.interface.alsoProvides", "line_number": 48, "usage_type": "call"}, {"api_name": "plone.autoform.interfaces.IFormFieldProvider", "line_number": 48, "usage_type": "argument"}]} +{"seq_id": "131471781", "text": "# thanks to matthuisman for the example at https://github.com/matthuisman/proxy.plugin.example\n\nimport threading\n\nimport xbmc\nimport requests\n\ntry:\n # Python3\n from http.server import BaseHTTPRequestHandler, HTTPServer\n from socketserver import ThreadingMixIn\n from urllib.parse import urlparse, parse_qs\nexcept:\n # Python2\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\n from SocketServer import ThreadingMixIn\n from urlparse import urlparse, parse_qs\n\nHOST = '127.0.0.1'\nPORT = 48201\nREMOVE_IN_HEADERS = ['upgrade', 'host']\nREMOVE_OUT_HEADERS = ['date', 'server', 'transfer-encoding', 'keep-alive', 'connection', 'content-length', 'content-encoding']\n\nclass RequestHandler(BaseHTTPRequestHandler):\n def log_message(self, format, *args):\n return\n\n def do_POST(self):\n self.send_error(404)\n\n def do_HEAD(self):\n self.send_error(404)\n\n def do_GET(self):\n url = urlparse(self.path)\n path = url.path\n query = parse_qs(url.query)\n if not path == \"/dplus_proxy.m3u8\":\n self.send_error(404)\n\n headers = {}\n for key in self.headers:\n if key.lower() not in REMOVE_IN_HEADERS:\n headers[key] = self.headers[key]\n \n origin_url = query[\"hls_origin_url\"][0]\n\n response = requests.get(origin_url, headers=headers, timeout=5)\n\n self.send_response(response.status_code)\n\n for key in response.headers:\n if key.lower() not in REMOVE_OUT_HEADERS:\n self.send_header(key, response.headers[key])\n\n self.end_headers()\n\n ## Edit the content\n content = response.content.decode('utf8')\n if \"old_framerate\" in query:\n content = content.replace(query[\"old_framerate\"][0], query[\"new_framerate\"][0])\n # Output the content\n self.wfile.write(content.encode('utf8'))\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n daemon_threads = True\n\ndef main():\n server = ThreadedHTTPServer((HOST, PORT), RequestHandler)\n server.allow_reuse_address = True\n httpd_thread = threading.Thread(target=server.serve_forever)\n httpd_thread.start()\n\n xbmc.Monitor().waitForAbort()\n\n server.shutdown()\n server.server_close()\n server.socket.close()\n httpd_thread.join()\n", "sub_path": "plugin.video.discoveryplus/resources/service/proxy.py", "file_name": "proxy.py", "file_ext": "py", "file_size_in_byte": 2319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "BaseHTTPServer.BaseHTTPRequestHandler", "line_number": 24, "usage_type": "name"}, {"api_name": "urlparse.urlparse", "line_number": 35, "usage_type": "call"}, {"api_name": "urlparse.parse_qs", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 48, "usage_type": "call"}, {"api_name": "SocketServer.ThreadingMixIn", "line_number": 65, "usage_type": "name"}, {"api_name": "BaseHTTPServer.HTTPServer", "line_number": 65, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 71, "usage_type": "call"}, {"api_name": "xbmc.Monitor", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "549480687", "text": "from flask import current_app, jsonify, render_template\nimport requests\nfrom HTMLParser import HTMLParser\n\nfrom app.comms.encryption import encrypt\nfrom app.models import EVENT\nfrom app.dao.events_dao import dao_get_event_by_id\n\nh = HTMLParser()\n\n\ndef get_nice_event_dates(event_dates):\n event_dates.sort(key=lambda k: k.event_datetime)\n\n event_date_str = ''\n _event_month = ''\n _event_dates = ''\n for event_date in event_dates:\n m = event_date.event_datetime.strftime(\"%B\")\n d = event_date.event_datetime.strftime(\"%a %-d, \")\n\n if not _event_month:\n _event_month = event_date.event_datetime.strftime(\"%B\")\n\n if m == _event_month:\n _event_dates += d\n elif _event_dates:\n event_date_str += _event_dates[:-2] + ' of ' + _event_month + ', '\n _event_dates = d\n _event_month = m\n\n event_date_str = (event_date_str if len(event_date_str) > 2 else '') + _event_dates[:-2] + ' of ' + _event_month\n event_datetime = event_dates[0].event_datetime\n event_date_str += ' - ' + event_datetime.strftime(\n \"%-I:%M %p\" if event_datetime.strftime(\"%M\") != '00' else \"%-I %p\")\n\n return event_date_str\n\n\ndef get_email_html(email_type, **kwargs):\n if email_type == EVENT:\n event = dao_get_event_by_id(kwargs.get('event_id'))\n member_id = kwargs.get('member_id')\n if not member_id:\n member_id = '0' # for preview of emails\n current_app.logger.debug('Email Tokens %s', current_app.config['EMAIL_TOKENS'])\n unsubcode = encrypt(\n \"{}={}\".format(current_app.config['EMAIL_TOKENS']['member_id'], member_id),\n current_app.config['EMAIL_UNSUB_SALT']\n )\n return render_template(\n 'emails/events.html',\n event=event,\n event_dates=get_nice_event_dates(event.event_dates),\n description=h.unescape(event.description),\n details=kwargs.get('details'),\n extra_txt=kwargs.get('extra_txt'),\n unsubcode=unsubcode\n )\n\n\ndef send_email(to, subject, message, _from=None):\n if not _from:\n _from = 'noreply@{}'.format(current_app.config['EMAIL_DOMAIN'])\n\n email_provider_url = current_app.config['EMAIL_PROVIDER_URL']\n email_provider_apikey = current_app.config['EMAIL_PROVIDER_APIKEY']\n\n if current_app.config['ENVIRONMENT'] != 'live':\n message = message.replace('', '
Test email, intended for {}
'.format(to))\n to = current_app.config['TEST_EMAIL']\n\n data = {\n \"from\": _from,\n \"to\": to,\n \"subject\": subject,\n \"html\": message\n }\n\n if email_provider_url and email_provider_apikey:\n response = requests.post(\n email_provider_url,\n auth=('api', email_provider_apikey),\n data=data,\n )\n\n response.raise_for_status()\n current_app.logger.info('Sent email: {}, response: {}'.format(subject, response.text))\n if current_app.config['ENVIRONMENT'] != 'live': # pragma: no cover\n current_app.logger.info('Email to: {}'.format(to))\n current_app.logger.info('Email provider: {}'.format(email_provider_url))\n current_app.logger.info('Email key: {}'.format(email_provider_apikey[:5]))\n\n return response.status_code\n else:\n current_app.logger.info('Email not configured, email would have sent: {}'.format(data))\n", "sub_path": "app/comms/email.py", "file_name": "email.py", "file_ext": "py", "file_size_in_byte": 3454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "HTMLParser.HTMLParser", "line_number": 9, "usage_type": "call"}, {"api_name": "app.models.EVENT", "line_number": 41, "usage_type": "name"}, {"api_name": "app.dao.events_dao.dao_get_event_by_id", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.current_app.logger.debug", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 46, "usage_type": "attribute"}, {"api_name": "app.comms.encryption.encrypt", "line_number": 47, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 48, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 48, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 49, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 64, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 71, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.current_app.logger.info", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 88, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 88, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 90, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 90, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 91, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 91, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 92, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.current_app.logger.info", "line_number": 96, "usage_type": "call"}, {"api_name": "flask.current_app.logger", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 96, "usage_type": "name"}]} +{"seq_id": "339341794", "text": "#!/usr/bin/env python\n\n#####################################################################\n# This script presents labels buffer that shows only visible game objects\n# (enemies, pickups, exploding barrels etc.), each with unique label.\n# OpenCV is used here to display images, install it or remove any\n# references to cv2\n# Configuration is loaded from \"../../examples/config/basic.cfg\" file.\n# number of episodes are played.\n# Random combination of buttons is chosen for every action.\n# Game variables from state and last reward are printed.\n#\n# To see the scenario description go to \"../../scenarios/README.md\"\n#####################################################################\n\nfrom __future__ import print_function\n\nfrom random import choice\nfrom vizdoom import *\n\nimport cv2\n\ngame = DoomGame()\n\n# Use other config file if you wish.\ngame.load_config(\"../../examples/config/deadly_corridor.cfg\")\ngame.set_render_hud(False)\n\ngame.set_screen_resolution(ScreenResolution.RES_640X480)\n\n# Enables labeling of the in game objects.\ngame.set_labels_buffer_enabled(True)\n\ngame.init()\n\nactions = [[True, False, False], [False, True, False], [False, False, True]]\n\nepisodes = 10\n\n# sleep time in ms\nsleep_time = 28\n\nfor i in range(episodes):\n print(\"Episode #\" + str(i + 1))\n\n # Not needed for the first episode but the loop is nicer.\n game.new_episode()\n while not game.is_episode_finished():\n # Gets the state and possibly to something with it\n state = game.get_state()\n\n labels = state.labels_buffer\n if labels is not None:\n cv2.imshow('ViZDoom Labels Buffer', labels)\n\n cv2.waitKey(sleep_time)\n\n game.make_action(choice(actions))\n\n print(\"State #\" + str(state.number))\n print(\"Labels:\")\n\n # Print information about objects visible on the screen.\n # object_id identifies specific in game object.\n # object_name contains name of object.\n # value tells which value represents object in labels_buffer.\n for l in state.labels:\n print(\"Object id: \" + str(l.object_id) + \" object name: \" + l.object_name + \" label: \" + str(l.value))\n\n print(\"=====================\")\n\n print(\"Episode finished!\")\n print(\"************************\")\n\ncv2.destroyAllWindows()", "sub_path": "examples/python/labels.py", "file_name": "labels.py", "file_ext": "py", "file_size_in_byte": 2296, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "cv2.imshow", "line_number": 54, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 56, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 58, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 75, "usage_type": "call"}]} +{"seq_id": "596229799", "text": "import csv\nimport re\nimport django\ndjango.setup()\nfrom sefaria.model import *\nfrom sefaria.utils.hebrew import gematria\nfrom sefaria.utils.talmud import section_to_daf\nfrom sources.functions import post_link, post_text, post_index, add_term\n\nname = 'Ohr HaChamah'\nhname = 'אור החמה'\ntitle = f'{name} on Zohar'\ntext = []\nlinks = []\nparashot = []\nremain = ''\nwith open('ohr for post.csv') as fp:\n for r, row in enumerate(csv.DictReader(fp)):\n if r in [0, 7084, 15092]:\n text.append([])\n if row['stream original']:\n row['text'] = f\"{row['stream original']}
{row['text']}\"\n if row['page']:\n page = gematria(row['page'].split()[0]) * 2 + gematria(row['page'].split()[1]) - 72\n if page < len(text[-1]):\n row['text'] = f\"{row['page']}
{row['text']}\"\n else:\n text[-1] += [[] for _ in range(page - len(text[-1]))]\n rowtext = ' '.join(row['text'].split())\n rowtext = re.sub('@\\d\\d( *)(.*?)( *)@\\d\\d', r'\\1\\2\\3', rowtext)\n rowtext = re.sub('[@\\d]', '', rowtext)\n rowtext = re.sub('\\{ *[\\[\\(]?[א-ת\"\\*] *[\\)\\]] *(.*?)\\}', r'*\\1', rowtext)\n if re.search('[\\{\\}]', rowtext):\n print(rowtext)\n if row['text'].startswith('@88') and len(row['text']) < 23:\n remain = f'{rowtext}
'\n continue\n else:\n rowtext = remain + rowtext\n remain = ''\n text[-1][-1].append(rowtext)\n ref = f'{title} {len(text)}:{section_to_daf(len(text[-1]))}:{len(text[-1][-1])}'\n if row['base ref']:\n links.append({\n 'refs': [row['base ref'], ref],\n 'auto': True,\n 'type': 'commentary',\n 'generated_by': 'ohr hachamah parser'\n })\n\nserver = 'https://new-shmuel.cauldron.sefaria.org'\nserver = 'http://localhost:9000'\nadd_term(name, hname, server=server)\n\nsubtitles = [(\"Author's Introduction\", 'הקדמת המחבר'), ('Preface', 'הקדמה לספר')]\n\n\nschema = SchemaNode()\nschema.add_primary_titles(title, f'{hname} על ספר הזהר')\nfor subtitle in subtitles:\n node = JaggedArrayNode()\n node.add_primary_titles(*subtitle)\n node.sectionNames = ['Paragraph']\n node.addressTypes = ['Integer']\n node.depth = 1\n schema.append(node)\nnode = JaggedArrayNode()\nnode.depth = 3\nnode.key = 'default'\nnode.default = True\nnode.sectionNames = ['Volume', 'Daf', 'Paragraph']\nnode.addressTypes = ['Integer', 'Talmud', 'Integer']\nschema.append(node)\nschema.validate()\nindex = {\n 'title': title,\n 'categories': [\"Kabbalah\", 'Zohar'],\n 'dependence': \"Commentary\",\n 'base_text_titles': ['Zohar'],\n 'collective_title': name,\n 'schema': schema.serialize(),\n}\npost_index(index, server=server)\n\nfor i in range(3):\n temp = [[] for _ in range(3)]\n temp[i] = text[i]\n text_version = {\n 'versionTitle': f'Ohr Hachama, Peremyshl, 1896-1898, Vol. {i+1}',\n 'versionSource': 'https://www.nli.org.il/he/books/NNL_ALEPH001148825/NLI',\n 'language': 'he',\n 'text': temp}\n post_text(title, text_version, server=server)\n\npost_link(links, server=server, skip_lang_check=False, VERBOSE=False)\n", "sub_path": "sources/new_zohar/ohr_post.py", "file_name": "ohr_post.py", "file_ext": "py", "file_size_in_byte": 3320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.setup", "line_number": 4, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 18, "usage_type": "call"}, {"api_name": "sefaria.utils.hebrew.gematria", "line_number": 24, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 31, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 32, "usage_type": "call"}, {"api_name": "re.search", "line_number": 33, "usage_type": "call"}, {"api_name": "sefaria.utils.talmud.section_to_daf", "line_number": 42, "usage_type": "call"}, {"api_name": "sources.functions.add_term", "line_number": 53, "usage_type": "call"}, {"api_name": "sources.functions.post_index", "line_number": 83, "usage_type": "call"}, {"api_name": "sources.functions.post_text", "line_number": 93, "usage_type": "call"}, {"api_name": "sources.functions.post_link", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "267544350", "text": "from enum import Enum, auto\n\n\n# класс Animal - это перечисление\nclass Animal(Enum):\n # элементы перечисления, константы (не изменяются)\n # элементы имеют имена и значения\n # функция auto() автоматически присваивает элементу значение от 1 до N-1 с шагом 1\n cat = auto() # 1\n dog = auto() # 1 + 1 = 2\n frog = auto() # 2 + 1 = 3\n duck = auto() # 3 + 1 = 4\n\n\n# эквивалентная более короткая запись\nAnimalShort = Enum('Animal', \"cat dog frog duck\")\n\nif __name__ == '__main__':\n print(Animal)\n for ae in Animal:\n print(ae, ae.value)\n\n print(\"-\"*50)\n print(AnimalShort)\n for ase in AnimalShort:\n print(ase, ase.value)\n", "sub_path": "topic_09_enum/examples/4_enum_short.py", "file_name": "4_enum_short.py", "file_ext": "py", "file_size_in_byte": 841, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "enum.Enum", "line_number": 5, "usage_type": "name"}, {"api_name": "enum.auto", "line_number": 9, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 10, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 11, "usage_type": "call"}, {"api_name": "enum.auto", "line_number": 12, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "194126061", "text": "#-------------------------------------------------------------------------------\n#\n# WPS process fetching information about the provided product collections.\n#\n# Authors: Martin Paces \n#-------------------------------------------------------------------------------\n# Copyright (C) 2019 EOX IT Services GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies of this Software or works derived from this Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#-------------------------------------------------------------------------------\n# pylint: disable=unused-argument\n\nimport re\nfrom io import StringIO\nfrom django.db.models import Count, Min, Max\nfrom eoxserver.services.ows.wps.parameters import (\n LiteralData, ComplexData, FormatText, FormatJSON, CDFileWrapper, CDObject,\n RequestParameter,\n)\nfrom eoxserver.services.ows.wps.exceptions import InvalidOutputDefError\nfrom vires.models import ProductCollection\nfrom vires.util import unique\nfrom vires.time_util import format_datetime\nfrom vires.access_util import get_vires_permissions\nfrom vires.processes.base import WPSProcess\n\n\nRE_CSL_DEMIMITER = re.compile(r\"\\s*,\\s*\")\n\n\nclass GetCollectionInfo(WPSProcess):\n \"\"\" Process information about the available products collections.\n \"\"\"\n identifier = \"vires:get_collection_info\"\n title = \"Get get information about the available product collections.\"\n metadata = {}\n profiles = [\"vires\"]\n\n inputs = WPSProcess.inputs + [\n (\"permissions\", RequestParameter(get_vires_permissions)),\n (\"collection_ids\", LiteralData(\n \"collection\", str, optional=True, default=None,\n title=\"Optional comma separated list of collection identifiers.\"\n )),\n ]\n\n outputs = [\n (\"output\", ComplexData(\n 'output', title=\"Output data\", formats=(\n FormatText('text/csv'),\n FormatJSON('application/json'),\n )\n )),\n ]\n\n def execute(self, permissions, collection_ids, output, **kwargs):\n \"\"\" Execute process \"\"\"\n access_logger = self.get_access_logger(**kwargs)\n\n collection_ids = self._parse_collection_ids(collection_ids)\n collections = self._get_collections(collection_ids, permissions)\n\n access_logger.info(\n \"request: collection_ids: %s, \",\n \"\" if collection_ids is None else\n \"(%s)\" % \", \".join(collection_ids)\n )\n\n if output['mime_type'] == \"text/csv\":\n return self._csv_output(collections, output)\n if output['mime_type'] == \"application/json\":\n return self._json_output(collections, output)\n\n raise InvalidOutputDefError(\n 'output',\n \"Unexpected output format %r requested!\" % output['mime_type']\n )\n\n @staticmethod\n def _parse_collection_ids(collection_ids):\n if collection_ids is None:\n return None\n collection_ids = collection_ids.strip()\n if not collection_ids:\n return []\n return list(unique(RE_CSL_DEMIMITER.split(collection_ids)))\n\n @staticmethod\n def _get_collections(collection_ids, permisisons):\n\n collections = ProductCollection.select_permitted(permisisons).values(\n 'identifier', 'type__identifier', 'metadata'\n ).annotate(\n product_count=Count('products'),\n begin_time=Min('products__begin_time'),\n end_time=Max('products__end_time'),\n ).order_by('identifier')\n\n if collection_ids is not None:\n collections = collections.filter(identifier__in=collection_ids)\n\n return collections\n\n @classmethod\n def _csv_output(cls, collections, output):\n output_fobj = StringIO(newline=\"\\r\\n\")\n print(\n \"collectionId,productType,productCount,startTime,endTime\",\n file=output_fobj\n )\n for collection in collections:\n print(\"%s,%s,%d,%s,%s\" % (\n collection['identifier'],\n collection['type__identifier'],\n collection['product_count'],\n format_datetime(collection['begin_time']) or \"\",\n format_datetime(collection['end_time']) or \"\",\n ), file=output_fobj)\n return CDFileWrapper(output_fobj, **output)\n\n @classmethod\n def _json_output(cls, collections, output):\n\n def _get_collection_info(collection):\n time_extent = {} if collection['product_count'] == 0 else {\n 'timeExtent': {\n 'start': format_datetime(collection['begin_time']),\n 'end': format_datetime(collection['end_time']),\n },\n }\n return {\n 'name': collection['identifier'],\n 'productType': collection['type__identifier'],\n 'productCount': collection['product_count'],\n **time_extent,\n }\n\n return CDObject([\n _get_collection_info(collection) for collection in collections\n ], format=FormatJSON(), **output)\n", "sub_path": "vires/vires/processes/get_collection_info.py", "file_name": "get_collection_info.py", "file_ext": "py", "file_size_in_byte": 6005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.compile", "line_number": 44, "usage_type": "call"}, {"api_name": "vires.processes.base.WPSProcess", "line_number": 47, "usage_type": "name"}, {"api_name": "vires.processes.base.WPSProcess.inputs", "line_number": 55, "usage_type": "attribute"}, {"api_name": "vires.processes.base.WPSProcess", "line_number": 55, "usage_type": "name"}, {"api_name": "eoxserver.services.ows.wps.parameters.RequestParameter", "line_number": 56, "usage_type": "call"}, {"api_name": "vires.access_util.get_vires_permissions", "line_number": 56, "usage_type": "argument"}, {"api_name": "eoxserver.services.ows.wps.parameters.LiteralData", "line_number": 57, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.ComplexData", "line_number": 64, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.FormatText", "line_number": 66, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.FormatJSON", "line_number": 67, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.exceptions.InvalidOutputDefError", "line_number": 90, "usage_type": "call"}, {"api_name": "vires.util.unique", "line_number": 102, "usage_type": "call"}, {"api_name": "vires.models.ProductCollection.select_permitted", "line_number": 107, "usage_type": "call"}, {"api_name": "vires.models.ProductCollection", "line_number": 107, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 110, "usage_type": "call"}, {"api_name": "django.db.models.Min", "line_number": 111, "usage_type": "call"}, {"api_name": "django.db.models.Max", "line_number": 112, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 122, "usage_type": "call"}, {"api_name": "vires.time_util.format_datetime", "line_number": 132, "usage_type": "call"}, {"api_name": "vires.time_util.format_datetime", "line_number": 133, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.CDFileWrapper", "line_number": 135, "usage_type": "call"}, {"api_name": "vires.time_util.format_datetime", "line_number": 143, "usage_type": "call"}, {"api_name": "vires.time_util.format_datetime", "line_number": 144, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.CDObject", "line_number": 154, "usage_type": "call"}, {"api_name": "eoxserver.services.ows.wps.parameters.FormatJSON", "line_number": 156, "usage_type": "call"}]} +{"seq_id": "582816646", "text": "import requests\nimport configparser\n\nfrom app import get_all_rates\nfrom os import path\n\nimport os\n\nenvironment = path.join(os.getcwd(), 'environment', 'config.ini')\nconfig = configparser.ConfigParser()\nconfig.read(environment)\n\nchat_id = config['TelegramBot']['chat_id']\nTOKEN = config['TelegramBot']['token']\nPROXY = {'https': f\"socks5h://{config['Proxy']['user']}:{config['Proxy']['password']}@{config['Proxy']['url']}:1080\"}\nURL = f'https://api.telegram.org/bot{TOKEN}/'\n\nmessage_id_list = []\n\n\ndef send_message(text):\n params = {'chat_id': chat_id, 'text': text}\n return requests.post(URL + 'sendMessage', data=params, proxies=PROXY)\n\n\ndef ping_pong():\n response = requests.post(URL + 'getUpdates', proxies=PROXY)\n result_json = response.json()['result']\n message_id = result_json[-1]['message'].get('message_id')\n if result_json[-1]['message']['text'] == '/list' and message_id not in message_id_list:\n send_message(get_all_rates())\n message_id_list.append(result_json[-1]['message'].get('message_id'))\n elif message_id not in message_id_list:\n send_message(result_json[-1]['message']['text'])\n message_id_list.append(result_json[-1]['message'].get('message_id'))\n\n\nif __name__ == '__main__':\n send_message(f'Watch Dog обнаружил цель')\n while True:\n ping_pong()\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "app.get_all_rates", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "343429522", "text": "from kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.button import Button\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.core.window import Window\n\nclass BuyInfoView(BoxLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n high = max(Window.size)\n low = min(Window.size)\n self.orientation = 'vertical'\n self.width = high-low\n self.height = self.width*2\n self.pos = (low,0)\n\n self.base_grid = GridLayout(cols = 2)\n self.for_sale_grid = GridLayout(cols = 2)\n self.owned_grid = GridLayout(cols = 2)\n \n type_lbl = Label(text='Building type:')\n self.type_lbl = Label(text='')\n cost_lbl = Label(text='Building cost:')\n self.cost_lbl = Label(text='')\n self.upkeep_lbl = Label(text='')\n self.upkeep_lbl1 = Label(text='Daily upkeep:')\n \n \n self.base_grid.add_widget(type_lbl)\n self.base_grid.add_widget(self.type_lbl)\n self.base_grid.add_widget(self.upkeep_lbl1)\n self.base_grid.add_widget(self.upkeep_lbl)\n \n self.for_sale_grid.add_widget(cost_lbl)\n self.for_sale_grid.add_widget(self.cost_lbl)\n\n self.add_widget(self.base_grid)\n\n def update(self):\n building = self.parent.zone.building\n self.type_lbl.text = building.name\n self.cost_lbl.text = str(building.cost)\n if building.upkeep > 0:\n self.upkeep_lbl1.text = 'Daily upkeep:'\n self.upkeep_lbl.text = str(building.upkeep)\n if building.upkeep < 0:\n self.upkeep_lbl1.text = 'Daily revenue:'\n self.upkeep_lbl.text = str(abs(building.upkeep))\n if building.owner is None:\n self.add_widget(self.for_sale_grid)\n self.buy_btn = Button(text='Buy', disabled = True)\n self.buy_btn.bind(on_press = self.buy)\n if self.parent.player.clean_money >= building.cost:\n self.buy_btn.disabled = False\n self.for_sale_grid.add_widget(self.buy_btn)\n\n if building.owner is self.parent.player:\n pass\n\n def buy(self, btn):\n building = self.parent.zone.building\n player = self.parent.player\n building.owner = player\n player.clean_money -= building.cost\n self.remove_widget(self.for_sale_grid)\n self.update()\n", "sub_path": "UI/buyinfoview.py", "file_name": "buyinfoview.py", "file_ext": "py", "file_size_in_byte": 2395, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "kivy.uix.boxlayout.BoxLayout", "line_number": 7, "usage_type": "name"}, {"api_name": "kivy.core.window.Window.size", "line_number": 10, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 10, "usage_type": "name"}, {"api_name": "kivy.core.window.Window.size", "line_number": 11, "usage_type": "attribute"}, {"api_name": "kivy.core.window.Window", "line_number": 11, "usage_type": "name"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 17, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 18, "usage_type": "call"}, {"api_name": "kivy.uix.gridlayout.GridLayout", "line_number": 19, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 21, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 22, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 23, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 24, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 25, "usage_type": "call"}, {"api_name": "kivy.uix.label.Label", "line_number": 26, "usage_type": "call"}, {"api_name": "kivy.uix.button.Button", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "166234063", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nnetwork\r\nfun:\r\n\r\nenv:\r\n\tLinux ubuntu 4.4.0-31-generic x86_64 GNU;python 2.7;tensorflow1.10.1;Keras2.2.4\r\n\tpip2,matplotlib2.2.3\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport os\r\nimport datetime\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras import models,layers,optimizers\r\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping,ReduceLROnPlateau,CSVLogger,LambdaCallback\r\nfrom keras.models import load_model\r\nfrom keras.layers import Dense,Dropout,Conv2D,MaxPooling2D,LSTM,Activation\r\nfrom keras.layers.normalization import BatchNormalization\r\nimport matplotlib.pyplot as plt\r\n\r\ndef helloworld():\r\n print('hello world')\r\n\r\n\r\n##########################定义模型################################\r\ndef build_network():\r\n # 构建模型\r\n model = models.Sequential()\r\n model.add(Dense(units=36, input_shape=(12,), activation='relu', kernel_initializer=keras.initializers.glorot_uniform(seed=0),bias_initializer=keras.initializers.Zeros(),name=\"hidden01\")) #yasong\r\n #model.add(BatchNormalization())\r\n #model.add(Dense(units=12, activation='relu',kernel_initializer=keras.initializers.glorot_uniform(seed=0),bias_initializer=keras.initializers.Zeros(),name=\"hidden02\"))\r\n #model.add(BatchNormalization())\r\n #model.add(Dropout(0.2))\r\n model.add(Dense(units=4, activation='relu',kernel_initializer=keras.initializers.glorot_uniform(seed=0),bias_initializer=keras.initializers.Zeros(), name=\"output\"))\r\n #model.add(BatchNormalization())\r\n model.summary()\r\n return model\r\n\r\ndef user_loss(y_true, y_pred):\r\n return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))\r\n##########################模型训练################################\r\ndef train_network(trainX, trainY, validX, validY,\r\n model, model_dir='model', model_name=\"model_Epoch{epoch:04d}-val_mean_squared_error{val_mean_squared_error:.2f}.h5\",\r\n log_name=\"train_log.csv\",\r\n lr=0.01, epochs=1000, batch_size=8,loss_fun=\"mse\"):\r\n # model_name = \"model_best.h5\"#\"model_Epoch{epoch:04d}-val_mean_absolute_error{val_mean_absolute_error:.2f}.h5\"\r\n print(\"###############model train###############\")\r\n if not os.path.isdir(model_dir):\r\n os.makedirs(model_dir)\r\n model_file = os.path.join(model_dir, model_name)\r\n optimizer = optimizers.Adam(lr=lr) # Adam RMSprop SGD\r\n model.compile(optimizer=optimizer, loss=loss_fun, metrics=['mse'])\r\n print('model.metrics_names = ', model.metrics_names) # mean_squared_error mean_absolute_error\r\n saveBestModel = ModelCheckpoint(model_file, monitor='val_mean_squared_error', verbose=1,\r\n save_best_only=False, period=10)\r\n earlyStopping = EarlyStopping(monitor='val_mean_squared_error', patience=epochs / 10, verbose=1, mode='auto')\r\n\r\n reduce_lr = ReduceLROnPlateau(monitor='val_mean_squared_error', factor=0.1, verbose=1, patience=epochs / 100,\r\n min_lr=lr / 1000.0)\r\n csv_logger = CSVLogger(os.path.join(model_dir, log_name), separator=',',\r\n append=True) # CSVLogger:将epoch的训练结果保存在csv中\r\n epoch_end_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:\r\n print(\"epoch_end_callback epoch\", epoch, \"lr\", logs[\"lr\"])\r\n ) # 自定义的回调函数\r\n callback_lists = [earlyStopping, saveBestModel, reduce_lr, csv_logger, epoch_end_callback]\r\n # 训练模型。\r\n history = model.fit(trainX, trainY,\r\n batch_size=batch_size,\r\n epochs=epochs, verbose=2,\r\n shuffle=False,\r\n #callbacks=callback_lists,\r\n validation_data=(validX, validY)\r\n )\r\n # 模型保存\r\n now = datetime.datetime.now()\r\n print(\"now date time:\",now)\r\n #print(history.history['mean_squared_error'][-1])\r\n model_name = \"%04d\"%(now.year)+\"%02d\"%(now.month)+\"%02d\"%(now.day)+\"%02d\"%(now.hour)+\"%02d\"%(now.minute) + \\\r\n \"_epoch{epoch:04d}-val_mean_squared_error{val_mean_squared_error:.2f}.h5\".format(epoch=history.epoch[-1],val_mean_squared_error=history.history['val_mean_squared_error'][-1])\r\n model_file = os.path.join(model_dir, model_name)\r\n model.save(model_file) # elesun\r\n print(model_file,'Model Saved.') # elesun\r\n return history\r\n\r\n##########################展示训练结果################################\r\ndef plt_result(history,output_dir = \"output\",result_name = \"history.png\"):\r\n if not os.path.isdir(output_dir):\r\n os.makedirs(output_dir)\r\n print(\"###############show result###############\")\r\n #print('history.history.keys = ',history.history.keys())\r\n #print('history.history = ',history.history)\r\n #print('history.epoch = ',history.epoch)\r\n # plot history\r\n plt.title(\"model performace\")\r\n plt.plot(history.epoch,history.history['loss'], label='train_loss')\r\n #plt.plot(history.epoch,history.history['val_loss'], label='val_loss')\r\n #plt.plot(history.epoch,history.history['mean_squared_error'], label='mean_squared_error')\r\n plt.plot(history.epoch,history.history['val_mean_squared_error'], label='val_mean_squared_error')\r\n\r\n plt.ylabel(\"loss or metric\")\r\n plt.xlabel(\"epochs\")\r\n plt.legend()\r\n plt.savefig(os.path.join(output_dir, result_name))\r\n print(result_name,\"has been saved!\")\r\n plt.show()\r\n", "sub_path": "20191107_Keras乘用车销量预测神经网络训练对比实验/code/network.py", "file_name": "network.py", "file_ext": "py", "file_size_in_byte": 5504, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "keras.models.Sequential", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.initializers.glorot_uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.initializers", "line_number": 32, "usage_type": "attribute"}, {"api_name": "keras.initializers.Zeros", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.initializers.glorot_uniform", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.initializers", "line_number": 37, "usage_type": "attribute"}, {"api_name": "keras.initializers.Zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 54, "usage_type": "name"}, {"api_name": "keras.callbacks.ModelCheckpoint", "line_number": 57, "usage_type": "call"}, {"api_name": "keras.callbacks.EarlyStopping", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.callbacks.ReduceLROnPlateau", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.callbacks.CSVLogger", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "keras.callbacks.LambdaCallback", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.show", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}]} +{"seq_id": "380148423", "text": "\"\"\"after drop\n\nRevision ID: b027f6085aa9\nRevises: 3ec707ae96ff\nCreate Date: 2021-01-22 12:12:43.792952\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b027f6085aa9'\ndown_revision = '3ec707ae96ff'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=80), nullable=False),\n sa.Column('email', sa.String(length=256), nullable=False),\n sa.Column('password', sa.String(length=128), nullable=False),\n sa.Column('is_admin', sa.Boolean(), nullable=True),\n sa.Column('fname', sa.String(length=256), nullable=True),\n sa.Column('lname', sa.String(length=256), nullable=True),\n sa.Column('phone', sa.String(length=25), nullable=True),\n sa.Column('img', sa.String(length=128), nullable=True),\n sa.Column('signup_date', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n sa.UniqueConstraint('username')\n )\n op.create_table('project',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=80), nullable=False),\n sa.Column('description', sa.Text(), nullable=True),\n sa.Column('creation_date', sa.String(length=20), nullable=False),\n sa.Column('limit_date', sa.String(length=20), nullable=True),\n sa.Column('author_id', sa.Integer(), nullable=False),\n sa.Column('color', sa.String(length=10), nullable=True),\n sa.Column('leader_id', sa.Integer(), nullable=True),\n sa.Column('public', sa.Boolean(), nullable=False),\n sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['leader_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('title')\n )\n op.create_table('author_project_assc',\n sa.Column('author_id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['author_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.PrimaryKeyConstraint('author_id', 'project_id')\n )\n op.create_table('leader_project_assc',\n sa.Column('leader_id', sa.Integer(), nullable=False),\n sa.Column('project_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['leader_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['project_id'], ['project.id'], ),\n sa.PrimaryKeyConstraint('leader_id', 'project_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('leader_project_assc')\n op.drop_table('author_project_assc')\n op.drop_table('project')\n op.drop_table('user')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/b027f6085aa9_after_drop.py", "file_name": "b027f6085aa9_after_drop.py", "file_ext": "py", "file_size_in_byte": 2867, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 34, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 36, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 37, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 38, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Text", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.Boolean", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 47, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 48, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 49, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 51, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 51, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 52, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 53, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 54, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 55, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 56, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 58, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 59, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 61, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 62, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 63, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 70, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 70, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 71, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 71, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 72, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 72, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 73, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 73, "usage_type": "name"}]} +{"seq_id": "330147627", "text": "\nimport os\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n#from utils import load, save\n#from layers import Deconv2D\nfrom keras import backend as K\nfrom keras.layers import Input, Dense, Reshape, Activation, Conv2D, LeakyReLU, Flatten, BatchNormalization as BN\nfrom keras.models import Sequential, Model\n#from keras import initializations\n\nlearning_rate = 0.0003\nbeta1 = .5\nz_dim = 128\n#df_dim = 64\ndef encoder(num_filters, ch, rows, cols):\n\n model = Sequential()\n X = Input(shape=(rows[-1], cols[-1], ch))\n\n model = Conv2D(num_filters, kernel_size=(5,5), strides=(2,2), padding='same', name='enc_conv2D_01', input_shape=(rows, cols, ch))(X)\n model = BN(axis=3, name=\"enc_bn_01\", epsilon=1e-5)(model)\n model = LeakyReLU(.2)(model)\n\n model = Conv2D(num_filters*2,kernel_size=(5,5), strides=(2,2), padding='same', name='enc_conv2D_02')(model)\n model = BN(axis=3, name=\"enc_bn_02\", epsilon=1e-5)(model)\n model = LeakyReLU(.2)(model)\n\n model = Conv2D(num_filters*4,kernel_size=(5,5), strides=(2,2), padding='same', name='enc_conv2D_03')(model)\n model = BN(axis=3, name=\"enc_bn_03\", epsilon=1e-5)(model)\n model = LeakyReLU(.2)(model)\n\n #model = Reshape((8,8,256))(model)\n model = Flatten()(model)\n model = Dense(2048, name=\"enc_dense_01\")(model)\n model = BN(name=\"enc_bn_04\", epsilon=1e-5)(model)\n encoded_model = LeakyReLU(.2)(model)\n\n mean = Dense(z_dim, name=\"e_h3_lin\")(encoded_model)\n logsigma = Dense(z_dim, name=\"e_h4_lin\", activation=\"tanh\")(encoded_model)\n meansigma = Model([X], [mean, logsigma])\n\n\n #X_decode = Input(shape=(8,8,256))\n #model = Dense(256, name=\"dec_dense_01\")(encoded_model)\n\n# enc_model = Model(X, encoded_model)\n# dec_model = Model(X, model)\n return meansigma\n\n\ndf_dim = 64\nbatch_size = 64\nchannels = 3\nheight = np.array([64])\nwidth = np.array([64])\n\nvae_encoder = encoder(num_filters=df_dim, ch=channels, rows=height, cols=width)\nvae_encoder.compile(optimizer='RMSProp', loss='binary_crossentropy')\nvae_encoder.summary()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2072, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 3, "usage_type": "attribute"}, {"api_name": "keras.models.Sequential", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 28, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.Conv2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 33, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.LeakyReLU", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "570560748", "text": "import os\nfrom collections import OrderedDict\n\n# compatibility with python 2/3\ntry:\n basestring\nexcept NameError:\n basestring = str\n\nclass EnvModifierError(Exception):\n \"\"\"Env modifier error.\"\"\"\n\nclass EnvModifierInvalidVarError(EnvModifierError):\n \"\"\"Env modifier invalid var error.\"\"\"\n\nclass EnvModifierInvalidVarValueError(EnvModifierError):\n \"\"\"Env modifier invalid var value error.\"\"\"\n\nclass EnvModifier(object):\n \"\"\"\n Creates a new modified environment.\n \"\"\"\n\n def __init__(self, baseEnv={}):\n \"\"\"\n Create an env modifier object.\n \"\"\"\n self.__env = {\n 'prepend': OrderedDict(),\n 'append': OrderedDict(),\n 'override': OrderedDict(),\n 'unset': set()\n }\n\n self.__setBaseEnv(baseEnv)\n\n def baseEnv(self):\n \"\"\"\n Return a dict containing the base environment used for modification.\n \"\"\"\n return self.__baseEnv\n\n def addFromEnvModifier(self, envModifier):\n \"\"\"\n Add the contents from another envModifier object.\n \"\"\"\n assert isinstance(envModifier, EnvModifier), \\\n \"Invalid EnvModifier type!\"\n\n # prepend\n for varName in envModifier.prependVarNames():\n self.addPrependVar(varName, envModifier.prependVar(varName))\n\n # append\n for varName in envModifier.appendVarNames():\n self.addAppendVar(varName, envModifier.appendVar(varName))\n\n # override\n for varName in envModifier.overrideVarNames():\n self.setOverrideVar(varName, envModifier.overrideVar(varName))\n\n # unset\n for varName in envModifier.unsetVarNames():\n self.addUnsetVar(varName)\n\n def addFromDict(self, inputDict):\n \"\"\"\n Add the contents from a dict containing the vars inside of the operation type.\n\n The dictionary must follow the specification where the operation types\n should contain another dict with the environment variables, except from\n unset that expects a list of variable names. The operation types are\n (append, prepend, override and unset) and they can be optional:\n {\n \"append\": [\n \"VAR_NAME\": [\n \"VAR_VALUE\"\n ]\n ],\n \"prepend\": [\n \"VAR_NAME\": [\n \"VAR_VALUE\"\n ]\n ],\n \"override\": [\n \"VAR_NAME\": [\n \"VAR_VALUE\"\n ]\n ],\n \"unset\": [\n \"VAR_NAME\"\n ]\n }\n \"\"\"\n assert isinstance(inputDict, dict), \\\n \"Invalid dictionary!\"\n\n # prepend\n if 'prepend' in inputDict:\n for envName, envValue in inputDict[\"prepend\"].items():\n self.addPrependVar(envName, envValue)\n\n # append\n if 'append' in inputDict:\n for envName, envValue in inputDict[\"append\"].items():\n self.addAppendVar(envName, envValue)\n\n # override\n if 'override' in inputDict:\n for envName, envValue in inputDict[\"override\"].items():\n self.setOverrideVar(envName, envValue)\n\n # unset\n if 'unset' in inputDict:\n for envName in inputDict[\"unset\"]:\n self.addUnsetVar(envName)\n\n def addPrependVar(self, name, value):\n \"\"\"\n Add a value that is going to be prepended to the env.\n \"\"\"\n if name not in self.__env['prepend']:\n self.__env['prepend'][name] = []\n\n if isinstance(value, list):\n self.__env['prepend'][name] = value + self.__env['prepend'][name]\n else:\n self.__env['prepend'][name].insert(0, value)\n\n def prependVar(self, name):\n \"\"\"\n Return a list of values used by the prepend var.\n \"\"\"\n if name not in self.__env['prepend']:\n raise EnvModifierInvalidVarError(\n 'Invalid Variable \"{0}\"'.format(name)\n )\n\n return self.__env['prepend'][name]\n\n def prependVarNames(self):\n \"\"\"\n Return a list of prepend var names.\n \"\"\"\n return self.__env['prepend'].keys()\n\n def addAppendVar(self, name, value):\n \"\"\"\n Add a value that is going to be appended to the env.\n \"\"\"\n if name not in self.__env['append']:\n self.__env['append'][name] = []\n\n if isinstance(value, list):\n self.__env['append'][name] += value\n else:\n self.__env['append'][name].append(value)\n\n def appendVar(self, name):\n \"\"\"\n Return a list of values used by the append var.\n \"\"\"\n if name not in self.__env['append']:\n raise EnvModifierInvalidVarError(\n 'Invalid Variable \"{0}\"'.format(name)\n )\n\n return self.__env['append'][name]\n\n def appendVarNames(self):\n \"\"\"\n Return a list of append var names.\n \"\"\"\n return self.__env['append'].keys()\n\n def setOverrideVar(self, name, value):\n \"\"\"\n Set a variable value, use it when you want to override a variable.\n \"\"\"\n self.__env['override'][name] = value\n\n def overrideVar(self, name):\n \"\"\"\n Return the value that is going to be used to override the original value.\n \"\"\"\n if name not in self.__env['override']:\n raise EnvModifierInvalidVarError(\n 'Invalid Variable \"{0}\"'.format(name)\n )\n\n return self.__env['override'][name]\n\n def overrideVarNames(self):\n \"\"\"\n Return a list of override var names.\n \"\"\"\n return self.__env['override'].keys()\n\n def addUnsetVar(self, name):\n \"\"\"\n Add a variable name that is going to be unset.\n \"\"\"\n self.__env['unset'].add(name)\n\n def unsetVarNames(self):\n \"\"\"\n Return a list of variables that are going to be unset.\n \"\"\"\n return list(self.__env['unset'])\n\n def generate(self):\n \"\"\"\n Return brand new environment based on the current configuration.\n \"\"\"\n result = dict(self.baseEnv())\n\n self.__modifyPrependVars(result)\n self.__modifyAppendVars(result)\n self.__modifyOverrideVars(result)\n self.__modifyUnsetVars(result)\n\n return result\n\n def __modifyUnsetVars(self, env):\n \"\"\"\n Modify in place the env by unseating the variables.\n \"\"\"\n for varName in self.unsetVarNames():\n if varName in env:\n del env[varName]\n\n def __modifyOverrideVars(self, env):\n \"\"\"\n Modify in place the env by overriding variables.\n \"\"\"\n for varName in self.overrideVarNames():\n env[varName] = self.__convertEnvValue(\n self.overrideVar(varName)\n )\n\n def __modifyPrependVars(self, env):\n \"\"\"\n Modify in place the env by prepending variables.\n \"\"\"\n for varName in self.prependVarNames():\n convertedValue = self.__convertEnvValue(self.prependVar(varName))\n\n if varName in env and len(env[varName]):\n convertedValue = '{0}{1}{2}'.format(\n convertedValue,\n os.pathsep,\n env[varName]\n )\n\n env[varName] = convertedValue\n\n def __modifyAppendVars(self, env):\n \"\"\"\n Modify in place the env by appending variables.\n \"\"\"\n for varName in self.appendVarNames():\n convertedValue = self.__convertEnvValue(self.appendVar(varName))\n\n if varName in env and len(env[varName]):\n convertedValue = '{2}{1}{0}'.format(\n convertedValue,\n os.pathsep,\n env[varName]\n )\n\n env[varName] = convertedValue\n\n def __convertEnvValue(self, value):\n \"\"\"\n Convert a value to an environment convention.\n \"\"\"\n result = []\n\n if isinstance(value, basestring):\n result.append(value)\n elif isinstance(value, list):\n result = value\n else:\n raise EnvModifierInvalidVarValueError(\n 'Could not convert value: \"{0}\"'.format(str(value))\n )\n\n # in case of performance deterioration, it may worth to\n # process all values in one go.\n for index, value in enumerate(result):\n result[index] = value\n\n return os.pathsep.join(map(str, result))\n\n def __setBaseEnv(self, env):\n \"\"\"\n Set a dict with the base environment that should be used for modification.\n \"\"\"\n self.__baseEnv = dict(env)\n", "sub_path": "src/lib/kombi/EnvModifier.py", "file_name": "EnvModifier.py", "file_ext": "py", "file_size_in_byte": 8775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.OrderedDict", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 30, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 31, "usage_type": "call"}, {"api_name": "os.pathsep", "line_number": 251, "usage_type": "attribute"}, {"api_name": "os.pathsep", "line_number": 267, "usage_type": "attribute"}, {"api_name": "os.pathsep.join", "line_number": 293, "usage_type": "call"}, {"api_name": "os.pathsep", "line_number": 293, "usage_type": "attribute"}]} +{"seq_id": "256582113", "text": "\n\n\nimport gzip\nimport pickle\nimport math\nfrom random import shuffle\nimport numpy\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation\nfrom keras.layers import Convolution1D, MaxPooling1D\nfrom keras.layers import Dropout, Flatten\nfrom keras.initializations import normal, identity\nfrom keras.optimizers import Adadelta, RMSprop\nfrom keras.regularizers import l1,l2\nimport matplotlib.pyplot as plt\n\nfrom sklearn.svm import LinearSVC\nfrom sklearn.feature_selection import SelectFromModel\n\n\n\n\n\n\n# separate as train and test\ntrain_percentage = 0.8\nvalid_percentage = 0.1\ntest_percentage = 0.1\n\nGroups = 2\nhd_notes = 20\nBATCH_SIZE = 30\nnb_epoch = 250\n\n\n\nclass _Subject_with_data:\n def __init__(self, SubjectID, DX_Group):\n self.DX_Group = DX_Group\n self.SubjectID = SubjectID\n # baseline \n self.MRI_baseline = dict()\n self.fMRI_baseline = dict()\n # otherdata after baseline \n self.MRI_other = list()\n self.fMRI_other = list()\n\n\ndef read_data(index_list, total_data):\n _data = list()\n label = list()\n for index in index_list:\n subject = total_data[index]\n if subject.fMRI_baseline:\n imageID = list(subject.fMRI_baseline.keys())[0]\n imageData = subject.fMRI_baseline[imageID]\n _data.append(imageData)\n if subject.DX_Group == 'AD':\n label.append('1')\n else:\n label.append('0')\n if subject.fMRI_other:\n for subj in subject.fMRI_other:\n imageID = list(subj.keys())[0]\n imageData = subj[imageID]\n _data.append(imageData)\n if subject.DX_Group == 'AD':\n label.append('1')\n else:\n label.append('0')\n\n row_of_imageData = _data[0].shape[0]\n col_of_imageData = _data[0].shape[1]\n\n _array = numpy.empty([2,2])\n for index, content in enumerate(_data):\n if index == 0:\n _array = content;\n else:\n _array = numpy.concatenate((_array, content))\n\n _array = _array.reshape((len(_data), row_of_imageData, col_of_imageData))\n return _array, label\n\ndef normalize(_data):\n ## the shape is (ImageNo, featureNo, timesteps)\n ## the output should be (ImageNo, timesteps, featureNo)\n output = numpy.zeros((_data.shape[0], _data.shape[2], _data.shape[1]))\n for iNo in range(_data.shape[0]):\n for tNo in range(_data.shape[2]):\n _tmp = _data[iNo, :, tNo].astype(numpy.float)\n #_tmp_mean = numpy.mean(_tmp)\n #_tmp_std = numpy.std(_tmp)\n #_tmp = (_tmp - _tmp_mean)/_tmp_std\n _tmp = _tmp/(numpy.linalg.norm(_tmp))\n output[iNo, tNo, :] = _tmp\n\n return output\n\ndef balance(_data, _label):\n rate = numpy.mean(_label)\n label_list = list(_label)\n print(_data.shape)\n if rate < 0.5:\n # normal samples more than AD\n augment_rate = math.floor(1/rate)-1\n for label_no, label in enumerate(label_list):\n if label == 1:\n for i in range(augment_rate):\n # between -0.2 to 0.2\n _tmp = numpy.random.rand(1, _data.shape[1], _data.shape[2])*0.4-0.2\n _new_sample = _data[label_no,:,:]+_tmp\n _data = numpy.concatenate((_data, _new_sample))\n label_list.append('1')\n\n else:\n raise ValueError('No implementation for this situation')\n\n label_list = numpy.asarray(label_list).astype(numpy.float)\n return _data, label_list\n\n\ndef feature_selction(_train_data, _valid_data, _test_data, _train_label, _valid_label, _test_label):\n train_imageNo = _train_data.shape[0]\n valid_imageNo = _valid_data.shape[0]\n whole_data = numpy.concatenate((_train_data, _valid_data, _test_data))\n whole_data = whole_data.reshape((-1, 120))\n\n whole_label = numpy.concatenate((_train_label, _valid_label, _test_label))\n whole_label = list(whole_label)\n\n new_label_list = list()\n for i in whole_label:\n for j in range(100):\n new_label_list.append(i)\n\n assert len(new_label_list) == whole_data.shape[0]\n\n lsvc = LinearSVC(C=0.1, penalty=\"l1\", dual=False).fit(whole_data, new_label_list)\n model = SelectFromModel(lsvc, prefit=True)\n data_new = model.transform(whole_data)\n print ('After feature selection we have', data_new.shape[1], 'features.')\n\n data_new = data_new.reshape((-1, 100, data_new.shape[1]))\n _train_data = data_new[:train_imageNo,:,:]\n _valid_data = data_new[train_imageNo:train_imageNo+valid_imageNo,:,:]\n _test_data = data_new[train_imageNo+valid_imageNo:,:,:]\n\n return _train_data, _valid_data, _test_data\n\ndef section_ofData(_data, _data_label):\n _data_label_newList = list()\n _data = _data.reshape((-1, 20, _data.shape[2]))\n for i in _data_label:\n for j in range(5):\n _data_label_newList.append(i)\n\n _data_label_newList = numpy.asarray(_data_label_newList).astype(numpy.float)\n\n return _data, _data_label_newList\n\n# read data\nwith gzip.open('Clean_imageID_with_Data_Bandpass.gz', 'rb') as input_file:\n subjects_list = pickle.load(input_file)\n\n\n\n\ntotal_number = len(subjects_list)\ntrain_number = math.ceil(total_number*train_percentage)\nvalid_number = math.ceil(total_number*valid_percentage)\ntest_number = total_number-train_number-valid_number\n\nindex_of_subjects = [i for i in range(total_number)]\nshuffle(index_of_subjects)\n\ntrain_index = index_of_subjects[:train_number]\nvalid_index = index_of_subjects[train_number:train_number+valid_number]\ntest_index = index_of_subjects[train_number+valid_number:]\n\n# combine data\ntrain_data, train_label = read_data(train_index, subjects_list)\nvalid_data, valid_label = read_data(valid_index, subjects_list)\ntest_data, test_label = read_data(test_index, subjects_list)\n\ntrain_label = numpy.asarray(train_label).astype(numpy.float)\nvalid_label = numpy.asarray(valid_label).astype(numpy.float)\ntest_label = numpy.asarray(test_label).astype(numpy.float)\n\n\n# normalize data\ntrain_data = normalize(train_data)\nvalid_data = normalize(valid_data)\ntest_data = normalize(test_data)\n\n# feature slection\n\n#train_data, valid_data, test_data = feature_selction(train_data, valid_data, test_data, \\\n# train_label, valid_label, test_label)\n\n\n# data balance\ntrain_data, train_label = balance(train_data, train_label)\n\n# data inverse\ntrain_data_inverse = train_data[:,::-1,:]\n\ntrain_data = numpy.concatenate((train_data, train_data_inverse))\ntrain_label = numpy.concatenate((train_label, train_label))\n\n# separate as sections\n\n#train_data, train_label = section_ofData(train_data, train_label)\n#valid_data, valid_label = section_ofData(valid_data, valid_label)\n#test_data, test_label = section_ofData(test_data, test_label)\n\n\nprint ('*'*40)\nprint ('We have training subjects:',train_number)\nprint ('We have validation subjects:',valid_number)\nprint ('We have test subjects:',test_number)\nprint ('We have training images:', train_data.shape[0])\nprint ('We have validation images:', valid_data.shape[0])\nprint ('We have test images:', test_data.shape[0])\nprint ('*'*40)\n\n\n\nY_train = np_utils.to_categorical(train_label, Groups)\nY_test = np_utils.to_categorical(test_label, Groups)\nY_valid = np_utils.to_categorical(valid_label, Groups)\n\ntimesteps = train_data.shape[1]\nfeatureNo = train_data.shape[2]\n\nprint (\"Building model...\")\nmodel = Sequential()\nmodel.add(Convolution1D(200, 10, border_mode='same', input_shape=(timesteps, featureNo)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution1D(100, 5, border_mode='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling1D(pool_length=4, stride=None, border_mode='same'))\nmodel.add(Convolution1D(50, 5, border_mode='same'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution1D(10, 3, border_mode='same'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling1D(pool_length=2, stride=None, border_mode='same'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution1D(1, 3, border_mode='same'))\n# CNN, convolution on several brain images of timesteps\nmodel.add(Flatten())\nmodel.add(Activation('relu'))\nmodel.add(Dense(Groups))\nmodel.add(Activation('softmax'))\nadad = Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)\nmodel.compile(loss='categorical_crossentropy', optimizer=adad, \\\n metrics=[\"accuracy\"])\n\nprint (\"Training model...\")\nhistory = model.fit(train_data, Y_train, \\\n batch_size = BATCH_SIZE, nb_epoch=nb_epoch, \\\n verbose=1, validation_data=(valid_data, Y_valid))\n\nscores = model.evaluate(test_data, Y_test, verbose=1)\nprint ('RNN test score:', scores[0])\nprint ('RNN test accuracy:', scores[1])\nprint ('True Labels:', test_label)\nprint (model.predict_classes(test_data))\nprint ('Baseline of training is:',numpy.mean(train_label))\nprint ('Baseline of validation is:', numpy.mean(valid_label))\n\n# summarize history for accuracy\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'validation'], loc='upper left')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n", "sub_path": "fMRI_CSV_Analysis/SIEMENS/RNN/CNN.py", "file_name": "CNN.py", "file_ext": "py", "file_size_in_byte": 9468, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.empty", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 93, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 97, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 103, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 131, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 141, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectFromModel", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 160, "usage_type": "attribute"}, {"api_name": "gzip.open", "line_number": 165, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 166, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 172, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 173, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 188, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 189, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 190, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 211, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 231, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 231, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 232, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 232, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 233, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 233, "usage_type": "name"}, {"api_name": "keras.models.Sequential", "line_number": 239, "usage_type": "call"}, {"api_name": "keras.layers.Convolution1D", "line_number": 240, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 241, "usage_type": "call"}, {"api_name": "keras.layers.Convolution1D", "line_number": 242, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 243, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 244, "usage_type": "call"}, {"api_name": "keras.layers.Convolution1D", "line_number": 245, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 246, "usage_type": "call"}, {"api_name": "keras.layers.Convolution1D", "line_number": 247, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 248, "usage_type": "call"}, {"api_name": "keras.layers.MaxPooling1D", "line_number": 249, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 250, "usage_type": "call"}, {"api_name": "keras.layers.Convolution1D", "line_number": 251, "usage_type": "call"}, {"api_name": "keras.layers.Flatten", "line_number": 253, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 254, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 255, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 256, "usage_type": "call"}, {"api_name": "keras.optimizers.Adadelta", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 278, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 278, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 287, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 287, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 288, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}]} +{"seq_id": "599016414", "text": "#\n# pylint: disable=no-member,comparison-with-callable,undefined-variable,singleton-comparison\nimport logging\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nfrom sqlalchemy.orm import aliased\nfrom sqlalchemy import or_\n\nfrom core import config\nfrom core.db.models import (\n db,\n SEARCH_ACTION,\n STARTED_STATE,\n SUBSCRIPTION_ACTIVE_STATE,\n ORDER_SHIPPED_STATE,\n COMPLETED_STATE,\n EXCEPTION_STATE_SET,\n)\nfrom core.db.models.order import Order\nfrom core.db.models.order_history import OrderHistory\nfrom core.db.models.pipeline_sequence import PipelineSequence\nfrom core.db.models.user import User\nfrom core.db.models.user_address import UserAddress\nfrom core.db.models.device_token import DeviceToken\nfrom core.db.models.shipping_rate import ShippingRate\nfrom core.db.models.source_location import SourceLocation\nfrom core.db.models.salestax_rate import SalestaxRate\nfrom core.db.models.user_subscription import UserSubscription\nfrom core.db.models.user_subscription_snapshot import UserSubscriptionSnapshot\n\nUSA_STATES_POSTCODE = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'SouthCarolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n 'American Samoa': 'AS',\n 'District of Columbia': 'DC',\n 'Federated States of Micronesia': 'FM',\n 'Guam': 'GU',\n 'Marshall Islands': 'MH',\n 'Northern Mariana Islands': 'MP',\n 'Palau': 'PW',\n 'Puerto Rico': 'PR',\n 'Virgin Islands': 'VI',\n}\n\nMONTH = [\n 'January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December',\n]\n\n\ndef get_order_creation_month(order):\n return MONTH[order.order_history[0].created_at.month - 1]\n\n\ndef state_to_postcode(state_name):\n return USA_STATES_POSTCODE.get(state_name)\n\n\ndef get_orders_to_run():\n orders = db.session.query(\n Order\n ).join(\n Order.subscription\n ).filter(\n Order.scheduled_for <= datetime.utcnow(),\n Order.action != None,\n or_(\n UserSubscription.state == SUBSCRIPTION_ACTIVE_STATE,\n Order.state != STARTED_STATE\n ),\n ).all()\n db.session.close()\n return orders\n\n\ndef get_timed_out_orders():\n orders = db.session.query(\n Order\n ).join(\n Order.subscription\n ).filter(\n UserSubscription.state == SUBSCRIPTION_ACTIVE_STATE,\n Order.state_changed_at < datetime.utcnow() - timedelta(days=config.ORDER_TIMEOUT_DAYS),\n Order.timed_out == False,\n Order.state.notin_(\n {STARTED_STATE, ORDER_SHIPPED_STATE, COMPLETED_STATE} | EXCEPTION_STATE_SET\n )\n ).all()\n return orders\n\n\ndef get_order(order_id):\n return db.session.query(Order).filter_by(id=order_id).first()\n\n\ndef get_current_order(user_id):\n order = db.session.query(Order).filter_by(\n user_id=user_id,\n ).order_by(\n Order.created_at.desc()\n ).first()\n return order\n\n\ndef get_order_tracking_url(order):\n try:\n return order.shipping_method.carrier.tracking_url_template.format(\n order.shipping_tracking_num\n )\n except Exception as e:\n logging.warning(\"Can not get tracking url for order: %s: %s\", order.id, e)\n\n\ndef get_default_wine_expert():\n expert = db.session.query(\n User\n ).filter(User.wine_expert_id == None).first()\n return expert\n\n\ndef create_order(user_id, subscription_id, scheduled_for=None):\n address, user = db.session.query(\n UserAddress, User\n ).join(\n User, User.primary_user_address_id == UserAddress.id\n ).filter(\n User.id == user_id\n ).first()\n\n subscription = db.session.query(\n UserSubscription\n ).filter_by(id=subscription_id).first()\n\n order = Order(\n subscription_id=subscription_id,\n user_id=user_id,\n state=STARTED_STATE,\n action=SEARCH_ACTION,\n shipping_name=user.first_name,\n shipping_street1=address.street1,\n shipping_street2=address.street2,\n shipping_state_region=address.state_region,\n shipping_country=address.country,\n shipping_city=address.city,\n shipping_postcode=address.postcode,\n shipping_phone=user.phone\n )\n if scheduled_for is not None:\n order.scheduled_for = scheduled_for\n db.session.add(order)\n db.session.commit()\n\n order.order_number = f'{order.id:07}'\n\n order_history = OrderHistory(\n order_id=order.id,\n state=order.state\n )\n subscription_snapshot = UserSubscriptionSnapshot(\n order_id=order.id,\n type=subscription.type,\n bottle_qty=subscription.bottle_qty,\n budget=subscription.budget\n )\n db.session.add(order_history)\n db.session.add(subscription_snapshot)\n db.session.commit()\n return order\n\n\ndef move_order(order, action, state, exception_msg=None):\n order.action = action\n order.state = state\n order.scheduled_for = None\n order.exception_message = exception_msg\n\n parent_order_history = db.session.query(OrderHistory).filter_by(\n order_id=order.id\n ).order_by(OrderHistory.created_at.desc()).first()\n\n order_history = OrderHistory(\n order_id=order.id,\n state=order.state,\n parent_id=parent_order_history.id,\n exception_message=exception_msg\n )\n\n db.session.add(order_history)\n db.session.flush()\n order.state_changed_at = order_history.created_at\n db.session.commit()\n\n return order\n\n\ndef get_wine_expert_for_order(order_id):\n expert = aliased(User)\n\n return db.session.query(\n expert\n ).join(\n User, User.wine_expert_id == expert.id\n ).join(\n Order, Order.user_id == User.id\n ).filter(User.id == Order.user_id).first()\n\n\ndef save_device_token(user_id, token):\n exists = db.session.query(\n DeviceToken\n ).filter_by(token=token).all()\n\n if exists:\n return\n\n device_token = DeviceToken(\n user_id=user_id,\n token=token\n )\n db.session.add(device_token)\n db.session.commit()\n\n\ndef get_shipping_cost(source_id, bottle_qty, postcode):\n try:\n shipping_cost = db.session.query(ShippingRate.shipping_cost).join(SourceLocation).filter(\n SourceLocation.source_id == source_id,\n ShippingRate.bottle_qty == bottle_qty,\n ShippingRate.from_postcode <= postcode,\n ShippingRate.to_postcode >= postcode\n ).first()\n except Exception as e:\n logging.exception(\n 'Error when getting shipping cost for source: %s, postcode: %s, bottle_qty: %s, %s' %\n (source_id, postcode, bottle_qty, e)\n )\n raise\n\n if shipping_cost is None:\n raise Exception(\n 'Shipping cost not found for source: %s, postcode: %s, bottle_qty: %s' %\n (source_id, postcode, bottle_qty)\n )\n\n return shipping_cost[0]\n\n\ndef get_tax_rate(source_id, postcode):\n try:\n tax_rate = db.session.query(SalestaxRate.taxrate).join(SourceLocation).filter(\n SourceLocation.source_id == source_id,\n SalestaxRate.from_postcode <= postcode,\n SalestaxRate.to_postcode >= postcode\n ).first()\n except Exception as e:\n logging.exception(\n 'Error when getting tax rate for source: %s, postcode: %s, %s' %\n (source_id, postcode, e)\n )\n raise\n\n if tax_rate is None:\n raise Exception(\n 'Tax rate not found for source: %s, postcode: %s' %\n (source_id, postcode)\n )\n\n return tax_rate[0]\n\n\ndef get_sources_budget(sources, budget, bottle_qty, postcode):\n res = {}\n for source_id in sources:\n try:\n shipping_cost = get_shipping_cost(source_id, bottle_qty, postcode)\n tax_rate = get_tax_rate(source_id, postcode)\n\n res[source_id] = int((budget - shipping_cost) / (1 + tax_rate))\n except Exception:\n logging.exception(\n 'Error when getting shipping or tax rates for source: %s' % source_id\n )\n\n return res\n\n\ndef fetch_schedules_by_ids(schedule_ids):\n return PipelineSequence.query.filter(\n PipelineSequence.id.in_(schedule_ids)\n )\n", "sub_path": "core/dbmethods/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 9212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "core.db.models.db.session.query", "line_number": 121, "usage_type": "call"}, {"api_name": "core.db.models.order.Order", "line_number": 122, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 121, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 121, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.subscription", "line_number": 124, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 124, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.scheduled_for", "line_number": 126, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 126, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 126, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 126, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.action", "line_number": 127, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 127, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 128, "usage_type": "call"}, {"api_name": "core.db.models.user_subscription.UserSubscription.state", "line_number": 129, "usage_type": "attribute"}, {"api_name": "core.db.models.user_subscription.UserSubscription", "line_number": 129, "usage_type": "name"}, {"api_name": "core.db.models.SUBSCRIPTION_ACTIVE_STATE", "line_number": 129, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.state", "line_number": 130, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 130, "usage_type": "name"}, {"api_name": "core.db.models.STARTED_STATE", "line_number": 130, "usage_type": "name"}, {"api_name": "core.db.models.db.session.close", "line_number": 133, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 133, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 133, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 138, "usage_type": "call"}, {"api_name": "core.db.models.order.Order", "line_number": 139, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 138, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 138, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.subscription", "line_number": 141, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 141, "usage_type": "name"}, {"api_name": "core.db.models.user_subscription.UserSubscription.state", "line_number": 143, "usage_type": "attribute"}, {"api_name": "core.db.models.user_subscription.UserSubscription", "line_number": 143, "usage_type": "name"}, {"api_name": "core.db.models.SUBSCRIPTION_ACTIVE_STATE", "line_number": 143, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.state_changed_at", "line_number": 144, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 144, "usage_type": "call"}, {"api_name": "core.config.ORDER_TIMEOUT_DAYS", "line_number": 144, "usage_type": "attribute"}, {"api_name": "core.config", "line_number": 144, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.timed_out", "line_number": 145, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 145, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.state.notin_", "line_number": 146, "usage_type": "call"}, {"api_name": "core.db.models.order.Order.state", "line_number": 146, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 146, "usage_type": "name"}, {"api_name": "core.db.models.STARTED_STATE", "line_number": 147, "usage_type": "name"}, {"api_name": "core.db.models.ORDER_SHIPPED_STATE", "line_number": 147, "usage_type": "name"}, {"api_name": "core.db.models.COMPLETED_STATE", "line_number": 147, "usage_type": "name"}, {"api_name": "core.db.models.EXCEPTION_STATE_SET", "line_number": 147, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 154, "usage_type": "call"}, {"api_name": "core.db.models.order.Order", "line_number": 154, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 154, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 154, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 158, "usage_type": "call"}, {"api_name": "core.db.models.order.Order", "line_number": 158, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 158, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 158, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.created_at.desc", "line_number": 161, "usage_type": "call"}, {"api_name": "core.db.models.order.Order.created_at", "line_number": 161, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 161, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 172, "usage_type": "call"}, {"api_name": "core.db.models.db.session.query", "line_number": 176, "usage_type": "call"}, {"api_name": "core.db.models.user.User", "line_number": 177, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 176, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 176, "usage_type": "name"}, {"api_name": "core.db.models.user.User.wine_expert_id", "line_number": 178, "usage_type": "attribute"}, {"api_name": "core.db.models.user.User", "line_number": 178, "usage_type": "name"}, {"api_name": "core.db.models.user.User", "line_number": 186, "usage_type": "argument"}, {"api_name": "core.db.models.db.session.query", "line_number": 183, "usage_type": "call"}, {"api_name": "core.db.models.user_address.UserAddress", "line_number": 184, "usage_type": "argument"}, {"api_name": "core.db.models.user.User", "line_number": 184, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 183, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 183, "usage_type": "name"}, {"api_name": "core.db.models.user.User.primary_user_address_id", "line_number": 186, "usage_type": "attribute"}, {"api_name": "core.db.models.user_address.UserAddress.id", "line_number": 186, "usage_type": "attribute"}, {"api_name": "core.db.models.user_address.UserAddress", "line_number": 186, "usage_type": "name"}, {"api_name": "core.db.models.user.User.id", "line_number": 188, "usage_type": "attribute"}, {"api_name": "core.db.models.user.User", "line_number": 188, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 191, "usage_type": "call"}, {"api_name": "core.db.models.user_subscription.UserSubscription", "line_number": 192, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 191, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 191, "usage_type": "name"}, {"api_name": "core.db.models.order.Order", "line_number": 195, "usage_type": "call"}, {"api_name": "core.db.models.STARTED_STATE", "line_number": 198, "usage_type": "name"}, {"api_name": "core.db.models.SEARCH_ACTION", "line_number": 199, "usage_type": "name"}, {"api_name": "core.db.models.db.session.add", "line_number": 211, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 211, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 211, "usage_type": "name"}, {"api_name": "core.db.models.db.session.commit", "line_number": 212, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 212, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 212, "usage_type": "name"}, {"api_name": "core.db.models.order_history.OrderHistory", "line_number": 216, "usage_type": "call"}, {"api_name": "core.db.models.user_subscription_snapshot.UserSubscriptionSnapshot", "line_number": 220, "usage_type": "call"}, {"api_name": "core.db.models.db.session.add", "line_number": 226, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 226, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 226, "usage_type": "name"}, {"api_name": "core.db.models.db.session.add", "line_number": 227, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 227, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 227, "usage_type": "name"}, {"api_name": "core.db.models.db.session.commit", "line_number": 228, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 228, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 228, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 238, "usage_type": "call"}, {"api_name": "core.db.models.order_history.OrderHistory", "line_number": 238, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 238, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 238, "usage_type": "name"}, {"api_name": "core.db.models.order_history.OrderHistory.created_at.desc", "line_number": 240, "usage_type": "call"}, {"api_name": "core.db.models.order_history.OrderHistory.created_at", "line_number": 240, "usage_type": "attribute"}, {"api_name": "core.db.models.order_history.OrderHistory", "line_number": 240, "usage_type": "name"}, {"api_name": "core.db.models.order_history.OrderHistory", "line_number": 242, "usage_type": "call"}, {"api_name": "core.db.models.db.session.add", "line_number": 249, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 249, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 249, "usage_type": "name"}, {"api_name": "core.db.models.db.session.flush", "line_number": 250, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 250, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 250, "usage_type": "name"}, {"api_name": "core.db.models.db.session.commit", "line_number": 252, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 252, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 252, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.aliased", "line_number": 258, "usage_type": "call"}, {"api_name": "core.db.models.user.User", "line_number": 258, "usage_type": "argument"}, {"api_name": "core.db.models.order.Order", "line_number": 265, "usage_type": "argument"}, {"api_name": "core.db.models.user.User", "line_number": 263, "usage_type": "argument"}, {"api_name": "core.db.models.db.session.query", "line_number": 260, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 260, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 260, "usage_type": "name"}, {"api_name": "core.db.models.user.User.wine_expert_id", "line_number": 263, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order.user_id", "line_number": 265, "usage_type": "attribute"}, {"api_name": "core.db.models.user.User.id", "line_number": 265, "usage_type": "attribute"}, {"api_name": "core.db.models.user.User", "line_number": 265, "usage_type": "name"}, {"api_name": "core.db.models.user.User.id", "line_number": 266, "usage_type": "attribute"}, {"api_name": "core.db.models.user.User", "line_number": 266, "usage_type": "name"}, {"api_name": "core.db.models.order.Order.user_id", "line_number": 266, "usage_type": "attribute"}, {"api_name": "core.db.models.order.Order", "line_number": 266, "usage_type": "name"}, {"api_name": "core.db.models.db.session.query", "line_number": 270, "usage_type": "call"}, {"api_name": "core.db.models.device_token.DeviceToken", "line_number": 271, "usage_type": "argument"}, {"api_name": "core.db.models.db.session", "line_number": 270, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 270, "usage_type": "name"}, {"api_name": "core.db.models.device_token.DeviceToken", "line_number": 277, "usage_type": "call"}, {"api_name": "core.db.models.db.session.add", "line_number": 281, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 281, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 281, "usage_type": "name"}, {"api_name": "core.db.models.db.session.commit", "line_number": 282, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 282, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 282, "usage_type": "name"}, {"api_name": "core.db.models.source_location.SourceLocation", "line_number": 287, "usage_type": "argument"}, {"api_name": "core.db.models.db.session.query", "line_number": 287, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 287, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 287, "usage_type": "name"}, {"api_name": "core.db.models.shipping_rate.ShippingRate.shipping_cost", "line_number": 287, "usage_type": "attribute"}, {"api_name": "core.db.models.shipping_rate.ShippingRate", "line_number": 287, "usage_type": "name"}, {"api_name": "core.db.models.source_location.SourceLocation.source_id", "line_number": 288, "usage_type": "attribute"}, {"api_name": "core.db.models.source_location.SourceLocation", "line_number": 288, "usage_type": "name"}, {"api_name": "core.db.models.shipping_rate.ShippingRate.bottle_qty", "line_number": 289, "usage_type": "attribute"}, {"api_name": "core.db.models.shipping_rate.ShippingRate", "line_number": 289, "usage_type": "name"}, {"api_name": "core.db.models.shipping_rate.ShippingRate.from_postcode", "line_number": 290, "usage_type": "attribute"}, {"api_name": "core.db.models.shipping_rate.ShippingRate", "line_number": 290, "usage_type": "name"}, {"api_name": "core.db.models.shipping_rate.ShippingRate.to_postcode", "line_number": 291, "usage_type": "attribute"}, {"api_name": "core.db.models.shipping_rate.ShippingRate", "line_number": 291, "usage_type": "name"}, {"api_name": "logging.exception", "line_number": 294, "usage_type": "call"}, {"api_name": "core.db.models.source_location.SourceLocation", "line_number": 311, "usage_type": "argument"}, {"api_name": "core.db.models.db.session.query", "line_number": 311, "usage_type": "call"}, {"api_name": "core.db.models.db.session", "line_number": 311, "usage_type": "attribute"}, {"api_name": "core.db.models.db", "line_number": 311, "usage_type": "name"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate.taxrate", "line_number": 311, "usage_type": "attribute"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate", "line_number": 311, "usage_type": "name"}, {"api_name": "core.db.models.source_location.SourceLocation.source_id", "line_number": 312, "usage_type": "attribute"}, {"api_name": "core.db.models.source_location.SourceLocation", "line_number": 312, "usage_type": "name"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate.from_postcode", "line_number": 313, "usage_type": "attribute"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate", "line_number": 313, "usage_type": "name"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate.to_postcode", "line_number": 314, "usage_type": "attribute"}, {"api_name": "core.db.models.salestax_rate.SalestaxRate", "line_number": 314, "usage_type": "name"}, {"api_name": "logging.exception", "line_number": 317, "usage_type": "call"}, {"api_name": "logging.exception", "line_number": 341, "usage_type": "call"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence.query.filter", "line_number": 349, "usage_type": "call"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence.query", "line_number": 349, "usage_type": "attribute"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence", "line_number": 349, "usage_type": "name"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence.id.in_", "line_number": 350, "usage_type": "call"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence.id", "line_number": 350, "usage_type": "attribute"}, {"api_name": "core.db.models.pipeline_sequence.PipelineSequence", "line_number": 350, "usage_type": "name"}]} +{"seq_id": "210988550", "text": "\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom . import views\n\n# Create your urls here.\n\nurlpatterns = [\n\turl(r'^login$',views.login,name='Main Page'),\n url(r'^manager/(?P[^/]+)/$',views.manager,name='Main Page'),\n url(r'^delivery/(?P[^/]+)/$',views.delivery,name='Main Page'),\n url(r'^api/getnexttask',views.getNextTask,name='api'),\n url(r'^api/deleteTask',views.deleteTask,name='api'),\n url(r'^api/login',views.loginApi,name='api'),\n url(r'^api/getmytasksquota/(?P[^/]+)/$',views.getmytasksquota,name='api'),\n url(r'^api/getmytasks/(?P[^/]+)/$',views.getMyTaks,name='api'),\n url(r'^$', views.login, name='index'),\n url(r'^chat/(?P[^/]+)/$', views.room, name='room'),\n ]\n", "sub_path": "app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 786, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "207547481", "text": "\nimport plugin\n\nfrom twisted.python import log\n\nclass Invitejoiner(plugin.Plugin):\n\n def __init__(self):\n plugin.Plugin.__init__(self, \"Invitejoiner\")\n\n def invited(self, server_id, channel):\n log.msg(\"Invited to: \", channel)\n self.join(server_id, channel)\n\nif __name__ == \"__main__\":\n sys.exit(Invitejoiner.run())\n\n", "sub_path": "plugins/invitejoiner/invitejoiner.py", "file_name": "invitejoiner.py", "file_ext": "py", "file_size_in_byte": 346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "plugin.Plugin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "plugin.Plugin.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "plugin.Plugin", "line_number": 9, "usage_type": "attribute"}, {"api_name": "twisted.python.log.msg", "line_number": 12, "usage_type": "call"}, {"api_name": "twisted.python.log", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "446926690", "text": "from fundfunctions import *\nfrom scipy.special import gamma as gfunc\n\n# Phase\n# @jit(nopython = True)\ndef phi(uvec, rF2, lc, ax, ay):\n \"\"\" Returns the phase at a stationary point. \"\"\"\n ux, uy = uvec\n return 0.5*rF2*lc**2*((gauss10(ux, uy)/ax)**2 + (gauss01(ux, uy)/ay)**2) + lc*gauss(ux, uy)\n\n# Amplitude\n# @jit(nopython=True)\ndef GOAmplitude(uvec, rF2, lc, ax, ay):\n \"\"\" Returns the geometrical optics amplitude. \"\"\"\n ux, uy = uvec\n alp = rF2*lc\n phi20 = ax**2/rF2 + lc*gauss20(ux, uy)\n phi02 = ay**2/rF2 + lc*gauss02(ux, uy)\n phi11 = lc*gauss11(ux, uy)\n H = phi20*phi02 - phi11**2\n ans = (ax*ay/rF2)*np.abs(H)**-0.5\n return ans\n\n# Field\n# @jit(nopython=True)\ndef GOfieldA(uvec, rF2, lc, ax, ay):\n \"\"\" Returns the geometrical optics field, ie. the geometrical optics amplitude multiplied by the phase factor. Includes only the first order term. \"\"\"\n ux, uy = uvec\n alp = rF2*lc\n phi20 = ax**2/rF2 + lc*gauss20(ux, uy)\n phi02 = ay**2/rF2 + lc*gauss02(ux, uy)\n phi11 = lc*gauss11(ux, uy)\n sigma = np.sign(phi02)\n # print(sigma)\n H = phi20*phi02 - phi11**2\n delta = np.sign(H)\n ans = (ax*ay/rF2)*np.abs(H)**-0.5*exp(1j*(phi(uvec, rF2, lc, ax, ay) + pi*(delta + 1)*sigma*0.25)) # Cooke 1982\n return ans\n\ndef GOfieldB(uvec, rF2, lc, ax, ay):\n \"\"\" Returns the geometrical optics field, including the second order term. \"\"\"\n # Stamnes 1986, Dingle 1973\n ux, uy = uvec\n alp = rF2*lc\n phi20 = ax**2/rF2 + lc*gauss20(ux, uy)\n phi02 = ay**2/rF2 + lc*gauss02(ux, uy)\n phi11 = lc*gauss11(ux, uy)\n H = phi20*phi02 - phi11**2\n phi21, phi12, phi30, phi03, phi22, phi31, phi13, phi40, phi04 = lc*np.array([gauss21(ux, uy), gauss12(ux, uy), gauss30(ux, uy), gauss03(ux, uy), gauss22(ux, uy), gauss31(ux, uy), gauss13(ux, uy), gauss40(ux, uy), gauss04(ux, uy)])\n A = phi02**3*(5*phi30**2 - 3*phi20*phi40)\n B = phi20**3*(5*phi03**2 - 3*phi02*phi04)\n C = 3*phi20*phi02*(phi20*(2*phi21*phi03 + 3*phi12**2) + phi02*(2*phi12*phi30 + 3*phi21**2) - 2*phi20*phi02*phi22)\n D = -6*phi11*(phi20**2*(5*phi12*phi03 - 2*phi02*phi13) + phi02**2*(5*phi21*phi30 - 2*phi20*phi31) + phi20*phi02*(9*phi12*phi21 + phi30*phi03))\n E = 3*phi11**2*(phi20*(8*phi21*phi03 + 12*phi12**2 + phi20*phi04) + phi02*(8*phi12*phi30 + 12*phi21**2 + phi02*phi40) - 2*phi20*phi02*phi22)\n F = -4*phi11**3*(3*phi20*phi13 + 3*phi02*phi31 + 9*phi12*phi21 + phi30*phi03)\n G = 12*phi11**4*phi22\n q2 = (A + B + C + D + E + F + G)/(24.*H**3)\n sigma = np.sign(phi02)\n delta = np.sign(H)\n return ax*ay/(rF2*np.abs(H)**0.5) * exp(1j*(phi(uvec, rF2, lc, ax, ay) + pi*(delta + 1)*sigma*0.25)) * (1. + 1j*q2)\n\n@jit(nopython=True)\ndef physField(uvec, rF2, lc, ax, ay):\n \"\"\" Returns an approximation of the field at the caustic, using the formula from Cooke 1982. \"\"\"\n ux, uy = uvec\n alp = rF2*lc\n phi20, phi02 = ax**2/rF2 + lc*gauss20(ux, uy), ay**2/rF2 + lc*gauss02(ux, uy)\n phi11 = lc*gauss11(ux, uy)\n phi30 = lc*gauss30(ux, uy)\n phi21 = lc*gauss21(ux, uy)\n phi12 = lc*gauss12(ux, uy)\n phi03 = lc*gauss03(ux, uy)\n B = phi20**3*phi03 - 3*phi20**2*phi11*phi12 + 3*phi20*phi11**2*phi21 - phi11**3*phi30\n U = ax*ay/(2*pi*rF2) * exp(1j*(phi(uvec, rF2, lc, ax, ay) + 0.25*pi*np.sign(phi20))) * 2.**(5./6.) * pi**(1./3.) * gfunc(1./3.) * np.abs(phi20)**0.5/(3.**(1./6.) * np.abs(B)**(1./3.))\n return U\n\n\n# TOA perturbation\ndef deltatA(uvec, tg0, tdm0, alp, ax, ay):\n \"\"\" Returns TOA perturbation in ms as a function of root of lens equation uvec, lens parameters, and coefficients tg0 = dso/(2*c*dsl*dlo), tdm0 = c*re*dm/(2*pi*f**2). \"\"\"\n return 1e6*(tg0*alp**2*((gauss10(*uvec)/ax)**2 + (gauss01(*uvec)/ay)**2) + tdm0*gauss(*uvec))\n\ndef deltatB(uvec, upvec, tg0, tdm0, ax, ay):\n \"\"\" Returns TOA perturbation as a function of root of lens equation uvec, observation point upvec, lens parameters, and coefficients tg0 = dso/(2*c*dsl*dlo), tdm0 = c*re*dm/(2*pi*f**2). \"\"\"\n ux, uy = uvec\n upx, upy = upvec\n return 1e6*(tg0*((ax*(ux-upx))**2 + (ay*(uy-upy))**2) + tdm0*gauss(ux, uy))\n\n# DM perturbation\ndef deltaDMA(uvec, tg0, tdm0, alp, ax, ay, f, sgnG):\n \"\"\" Returns DM perturbation as a function of root of lens equation uvec, lens parameters, coefficients tg0 = dso/(2*c*dsl*dlo), tdm0 = c*re*dm/(2*pi*f**2), alpma = -c**2*re*dsl*dlo*dm/(2*pi*f**2*dso), and signed value of G. \"\"\"\n g = gauss(*uvec)\n gx, gy = gauss10(*uvec), gauss01(*uvec)\n gxx, gyy = gauss20(*uvec), gauss02(*uvec)\n gxy = gauss11(*uvec)\n coeff = -2*alp*sgnG/((ax*ay)**2*f)\n dxdf = coeff*(alp*gy*gxy - gx*(ay**2 + alp*gyy))\n dydf = coeff*(alp*gx*gxy - gy*(ax**2 + alp*gxx))\n return -pi*f**3*((tdm0 - 2*tg0*alp)*(dxdf*gx + dydf*gy) - 2*tdm0*g/f)/(c*re*pctocm)\n\ndef deltaDMB(uvec, upvec, tg0, tdm0, alp, ax, ay, f, sgnG):\n \"\"\" Returns DM perturbation as a function of root of lens equation uvec, observation point upvec, lens parameters, coefficients tg0 = dso/(2*c*dsl*dlo), tdm0 = c*re*dm/(2*pi*f**2), alpma = -c**2*re*dsl*dlo*dm/(2*pi*f**2*dso), and signed value of G. \"\"\"\n ux, uy = uvec\n upx, upy = upvec\n psix, psiy = gauss10(*uvec), gauss01(*uvec)\n psixx, psiyy, psixy = gauss20(*uvec), gauss02(*uvec), gauss11(*uvec)\n duxdf = -2*alp*sgnG*(alp*psiy*psixy - psix*(ay**2 + alp*psiyy))/((ax*ay)**2*f)\n duydf = -2*alp*sgnG*(alp*psix*psixy - psiy*(ax**2 + alp*psixx))/((ax*ay)**2*f)\n deltadm = -pi*f**3*(tdm0*(duydf*psiy + duxdf*psix - 2*gauss(*uvec)/f) + 2*tg0*(ax**2*duxdf*(ux-upx) + ay**2*duydf*(uy-upy)))/(c*re*pctocm)\n return deltadm\n", "sub_path": "observables.py", "file_name": "observables.py", "file_ext": "py", "file_size_in_byte": 5571, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scipy.special.gamma", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "238061330", "text": "from __future__ import print_function\n\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Dropout, Activation\nfrom sklearn.preprocessing import MinMaxScaler\nnp.random.seed(42)\n\n\ndef prepare_data(data, seq_len=7):\n\n y = data[1:]\n x = data[:-1]\n\n new_x = []\n new_y = []\n for i in range(len(x) - seq_len + 1):\n new_x.append(x[i: i + seq_len])\n new_y.append(y[i + seq_len-1])\n new_x = np.array(new_x)\n new_x = new_x.reshape(new_x.shape[0], new_x.shape[1], 1)\n new_y = np.array(new_y)\n\n return new_x, new_y\n\ndef build_model(layers):\n model = Sequential()\n\n model.add(LSTM(\n input_dim=layers[0],\n output_dim=layers[1],\n return_sequences=True))\n model.add(Dropout(0.2))\n\n model.add(LSTM(\n layers[2],\n return_sequences=False))\n model.add(Dropout(0.2))\n\n model.add(Dense(\n output_dim=layers[3]))\n model.add(Activation(\"linear\"))\n\n start = time.time()\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n print(\"Compilation Time : \", time.time() - start)\n return model\n\ndef predict_point_by_point(model, data):\n #Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time\n predicted = model.predict(data)\n predicted = np.reshape(predicted, (predicted.size,))\n return predicted\n\ndef predict_sequence_full(model, data, window_size):\n #Shift the window by 1 new prediction each time, re-run predictions on new window\n curr_frame = data[0]\n predicted = []\n for i in range(len(data)):\n predicted.append(model.predict(curr_frame[np.newaxis,:,:])[0,0])\n curr_frame = curr_frame[1:]\n curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)\n return predicted\n\ndef plot_results(predicted_data, true_data):\n fig = plt.figure(facecolor='white')\n ax = fig.add_subplot(111)\n ax.plot(true_data, label='True Data')\n plt.plot(predicted_data, label='Prediction')\n plt.legend()\n plt.show()\n\ndef plot_results_multiple(predicted_data, true_data, prediction_len):\n fig = plt.figure(facecolor='white')\n ax = fig.add_subplot(111)\n ax.plot(true_data, label='True Data')\n #Pad the list of predictions to shift it in the graph to it's correct start\n for i, data in enumerate(predicted_data):\n padding = [None for p in range(i * prediction_len)]\n plt.plot(np.concatenate((padding,data)), label='Prediction')\n plt.legend()\n plt.show()\n\ndef predict_sequences_multiple(model, data, window_size, prediction_len=7):\n #Predict sequence of 50 steps before shifting prediction run forward by 50 steps\n prediction_seqs = []\n for i in range(int(len(data)/prediction_len)):\n curr_frame = np.array(data[i*prediction_len])\n predicted = []\n for j in range(prediction_len):\n m = model.predict(curr_frame[np.newaxis,:,:])\n predicted.append(m[0,0])\n curr_frame = curr_frame[1:]\n curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)\n prediction_seqs.append(predicted)\n return prediction_seqs\n\ndef normalise_windows(window_data):\n normalised_data = []\n norms = []\n for window in window_data:\n norms.append(float(window[0]))\n normalised_window = [[((float(p) / norms[-1]) - 1)] for p in window]\n normalised_data.append(normalised_window)\n return normalised_data, norms\n\ndef de_normalise_pred(pred, norms):\n pred = np.array(pred)\n return [norms[i]*(pred[i] + 1) for i in range(len(pred))]\n\n\npath = '/home/user/PycharmProjects/Jupyter_notebooks/portugal_datatable.csv'\n\n# чтение файла с данными\ndatatable = pd.read_csv(path, index_col=0)\n\nsum_data_type = datatable.groupby('data type').sum().ix[:'clicks'].transpose()\nsum_data_type['rate'] = sum_data_type['bookings'] / sum_data_type['clicks']\nsum_data_type['rate'] = sum_data_type['rate'].fillna(0.0)\n\nfiltered_table = sum_data_type[(sum_data_type.clicks >= 5) & (sum_data_type.bookings >= 1)]\n\ndata_from_feb1 = sum_data_type['01.02.2016':]\nrate = data_from_feb1['rate'].values\n# rate = 5*np.sin(np.arange(0,2/10*500,2/10)) + np.arange(0,2/10*500,2/10)\nepochs = 1\nseq_len = 7\n\nprint('Loading data... ')\n\nrate = np.array(rate) + 1\n\nsplit = int(len(rate) * 0.67)\ndata = rate[:split]\ntest = rate[split:]\n\nX_train, y_train = prepare_data(data, seq_len)\nX_test, y_test = prepare_data(test, seq_len)\n\nX_train, train_norm = normalise_windows(X_train)\nX_test, test_norm = normalise_windows(X_test)\nprint(np.array(X_test).shape, np.array(test_norm).shape)\n\nmodel = build_model([1, 50, 100, 1])\nmodel.fit(\n X_train,\n y_train,\n batch_size=1,\n nb_epoch=epochs,\n shuffle=False)\n\npredictions = predict_sequences_multiple(model, X_test, seq_len, 7)\n\npredictions = de_normalise_pred(predictions, test_norm)\n\nplot_results_multiple(predictions, y_test, 7)\n\n# split = int(len(rate) * 0.67)\n# data = rate[:split]\n# test = rate[split:]\n#\n# data = np.concatenate(([data[0]], np.diff(data, 1)))\n# data_scaler = MinMaxScaler()\n# data = data_scaler.fit_transform(data)\n#\n# X_train, y_train = prepare_data(data, seq_len)\n#\n# print('\\nData Loaded. Compiling...\\n')\n#\n# model = build_model([1, 50, 100, 1])\n#\n# model.fit(\n# X_train,\n# y_train,\n# batch_size=1,\n# nb_epoch=epochs, shuffle=False)\n#\n# print('\\nPrediction...\\n')\n#\n# X_test, y_test = prepare_data(test, seq_len)\n#\n# # test_scalers = []\n# last_el = []\n# for i in range(X_test.shape[0]):\n# l = X_test[i].reshape(seq_len)\n# last_el.append(l[-1])\n# l = np.concatenate((l[:1], np.diff(l, 1)))\n# # scaler = MinMaxScaler()\n# # l = scaler.fit_transform(l)\n# # test_scalers.append(scaler)\n# l = data_scaler.transform(l)\n# X_test[i] = l.reshape((seq_len, 1))\n#\n# predicted = predict_point_by_point(model, X_test)\n#\n# # for i in range(len(predicted)):\n# # predicted[i] = test_scalers[i].inverse_transform([predicted[i]])[0] + last_el[i]\n# for i in range(len(predicted)):\n# predicted[i] = data_scaler.inverse_transform([predicted[i]])[0] + last_el[i]\n# # for i in range(len(predicted)):\n# # predicted[i] = predicted[i] + last_el[i]\n#\n# print(sum(abs(y_test - predicted)))\n#\n# fig = plt.figure(facecolor='gray')\n# graph_1 = fig.add_subplot(111)\n#\n# graph_1.plot(y_test, 'k-')\n#\n# graph_1.plot(predicted, 'r-')\n#\n# plt.show()\n\n\n", "sub_path": "lstm2_pred.py", "file_name": "lstm2_pred.py", "file_ext": "py", "file_size_in_byte": 6471, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.random.seed", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 10, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers.LSTM", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.Dropout", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 45, "usage_type": "call"}, {"api_name": "time.time", "line_number": 47, "usage_type": "call"}, {"api_name": "time.time", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.insert", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "541161277", "text": "import sys\nimport logging\n\n\ndef get_logger(logger_name='default'):\n \"\"\"\n Get logging and format\n All logs will be saved into logs/log-DATE (default)\n Default size of log file = 15m\n :param logger_name:\n :return:\n \"\"\"\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n log_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_format)\n if log.hasHandlers():\n log.handlers.clear()\n log.addHandler(ch)\n\n return log\n\n\ndef ignore_runtime_error(func, _logger=None, *args, **kwargs):\n if _logger is None:\n logger = get_logger('JFA Issuer')\n else:\n logger = get_logger(_logger)\n\n def wrapper(*args, **kwargs):\n try:\n rv = func(*args, **kwargs)\n except Exception as err:\n logger.info(f'{func.__repr__()} {err}')\n rv = None\n finally:\n return rv\n \n return wrapper\n", "sub_path": "utils/logger.py", "file_name": "logger.py", "file_ext": "py", "file_size_in_byte": 1007, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.StreamHandler", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "558028470", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n\n__author__ = '090h'\n__license__ = 'GPL'\n\n\nfrom libnmap.plugins.sql import NmapSqlPlugin\nfrom recon2db.helper import get_report\nimport logging\n\n\ndef nmap2sql(xmlscan, database):\n # Try to parse nmap report\n nmap_report = get_report(xmlscan)\n if nmap_report is None:\n logging.error('Error parsing nmap XML report')\n return None\n\n # Insert report to database\n try:\n ns = NmapSqlPlugin(url=database)\n return ns.insert(nmap_report)\n except:\n logging.error('Error saving to database: %s' % database)\n return None\n\n\ndef main():\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n parser = ArgumentParser('nmap2sql', formatter_class=ArgumentDefaultsHelpFormatter)\n parser.add_argument('nmap', help='nmap report XML file')\n parser.add_argument('database', help='SQLAlchemy database url')\n parser.add_argument('-D', '--debug', action='store_true', help='enable debug output')\n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n\n if nmap2sql(args.nmap, args.database) is not None:\n print('Export to MongoDB finished succesfully.')\n else:\n print('Export to MongoDB failed.')\n\nif __name__ == '__main__':\n main()\n", "sub_path": "recon2db/nmap/sql.py", "file_name": "sql.py", "file_ext": "py", "file_size_in_byte": 1345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "recon2db.helper.get_report", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 18, "usage_type": "call"}, {"api_name": "libnmap.plugins.sql.NmapSqlPlugin", "line_number": 23, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 26, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 32, "usage_type": "name"}, {"api_name": "logging.basicConfig", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 40, "usage_type": "attribute"}]} +{"seq_id": "453224324", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/8/23 22:21\n# @Author : zhoujun\n\nimport os\nos.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = 0\nimport numpy as np\nimport mxnet as mx\nfrom mxnet import image, nd\nfrom mxnet.gluon.data.vision import transforms\n\n\ndef try_gpu(gpu):\n \"\"\"If GPU is available, return mx.gpu(0); else return mx.cpu()\"\"\"\n try:\n ctx = mx.gpu(gpu)\n _ = nd.array([0], ctx=ctx)\n except:\n ctx = mx.cpu()\n return ctx\n\n\ndef decode(preds, alphabet, raw=False):\n if len(preds.shape) > 2:\n preds_idx = preds.argmax(axis=2)\n preds_prob = preds.max(axis=2)\n else:\n preds_idx = preds\n preds_prob = np.ones_like(preds)\n result_list = []\n alphabet_size = len(alphabet)\n for word, prob in zip(preds_idx, preds_prob):\n if raw:\n result_list.append((''.join([alphabet[int(i)] for i in word]), prob))\n else:\n result = []\n conf = []\n for i, index in enumerate(word):\n if i < len(word) - 1 and word[i] == word[i + 1] and word[-1] != -1: # Hack to decode label as well\n continue\n if index == -1 or index == alphabet_size - 1:\n continue\n else:\n result.append(alphabet[int(index)])\n conf.append(prob[i])\n result_list.append((''.join(result), conf))\n return result_list\n\n\nclass GluonNet:\n def __init__(self, model_path, gpu_id=None):\n \"\"\"\n 初始化gluon模型\n :param model_path: 模型地址\n :param gpu_id: 在哪一块gpu上运行\n \"\"\"\n config = pickle.load(open(model_path.replace('.params', '.info'), 'rb'))['config']\n alphabet = config['data_loader']['args']['dataset']['alphabet']\n net = get_model(len(alphabet), config['arch']['args'])\n\n self.gpu_id = gpu_id\n self.img_w = config['data_loader']['args']['dataset']['img_w']\n self.img_h = config['data_loader']['args']['dataset']['img_h']\n self.img_channel = config['data_loader']['args']['dataset']['img_channel']\n self.alphabet = alphabet\n self.ctx = try_gpu(gpu_id)\n self.net = net\n self.net.load_parameters(model_path, self.ctx)\n self.net.hybridize()\n\n def predict(self, img_path):\n \"\"\"\n 对传入的图像进行预测,支持图像地址和numpy数组\n :param img_path: 图像地址\n :return:\n \"\"\"\n assert self.img_channel in [1, 3], 'img_channel must in [1.3]'\n assert os.path.exists(img_path), 'file is not exists'\n img = self.pre_processing(img_path)\n img1 = transforms.ToTensor()(img)\n img1 = img1.expand_dims(axis=0)\n\n img1 = img1.as_in_context(self.ctx)\n preds = self.net(img1)\n\n preds = preds.softmax().asnumpy()\n # result = decode(preds, self.alphabet, raw=True)\n # print(result)\n result = decode(preds, self.alphabet)\n print(result)\n return result, img\n\n def pre_processing(self, img_path):\n \"\"\"\n 对图片进行处理,先按照高度进行resize,resize之后如果宽度不足指定宽度,就补黑色像素,否则就强行缩放到指定宽度\n :param img_path: 图片地址\n :return:\n \"\"\"\n img = image.imdecode(open(img_path, 'rb').read(), 1 if self.img_channel == 3 else 0)\n h, w = img.shape[:2]\n ratio_h = float(self.img_h) / h\n new_w = int(w * ratio_h)\n img = image.imresize(img, w=new_w, h=self.img_h)\n # if new_w < self.img_w:\n # step = nd.zeros((self.img_h, self.img_w - new_w, self.img_channel), dtype=img.dtype)\n # img = nd.concat(img, step, dim=1)\n return img\n\n\nif __name__ == '__main__':\n from models import get_model\n import pickle\n import time\n from matplotlib import pyplot as plt\n from matplotlib.font_manager import FontProperties\n\n font = FontProperties(fname=r\"msyh.ttc\", size=14)\n\n img_path = 'E:/zj/dataset/train/0_song5_0_3_w.jpg'\n model_path = 'output/crnn_DenseNet_RNN_CTC/checkpoint/model_best.params'\n\n gluon_net = GluonNet(model_path=model_path, gpu_id=None)\n start = time.time()\n result, img = gluon_net.predict(img_path)\n print(time.time() - start)\n\n # 输出用于部署的模型\n # gluon_net.net.export('./output/txt4')\n\n label = result[0][0]\n plt.title(label, fontproperties=font)\n plt.imshow(img.asnumpy().squeeze(), cmap='gray')\n plt.show()\n", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 4525, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 6, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 16, "usage_type": "call"}, {"api_name": "mxnet.nd.array", "line_number": 17, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 17, "usage_type": "name"}, {"api_name": "mxnet.cpu", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.data.vision.transforms.ToTensor", "line_number": 80, "usage_type": "call"}, {"api_name": "mxnet.gluon.data.vision.transforms", "line_number": 80, "usage_type": "name"}, {"api_name": "mxnet.image.imdecode", "line_number": 99, "usage_type": "call"}, {"api_name": "mxnet.image", "line_number": 99, "usage_type": "name"}, {"api_name": "mxnet.image.imresize", "line_number": 103, "usage_type": "call"}, {"api_name": "mxnet.image", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.font_manager.FontProperties", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 123, "usage_type": "call"}, {"api_name": "time.time", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}]} +{"seq_id": "170688267", "text": "import os\nimport shutil\nimport shlex\nimport pytest\nfrom functools import partial\nfrom assertions import assert_minion_key_state\nfrom jinja2 import Environment, PackageLoader\nfrom config import SALT_CALL\nfrom utils import check_output, get_suse_release\n\n\npytestmark = pytest.mark.usefixtures(\"master\", \"minion\")\n\n\ndef post_12_required():\n if get_suse_release()['VERSION'] < 12:\n pytest.skip(\"incompatible with this version\")\n\n\ndef pre_12_required():\n if get_suse_release()['VERSION'] >= 12:\n pytest.skip(\"incompatible with this version\")\n\n\ndef minor_0_required():\n if get_suse_release()['PATCHLEVEL'] != 0:\n pytest.skip(\"incompatible with this minor version\")\n\n\ndef minor_non_0_required():\n if get_suse_release()['PATCHLEVEL'] == 0:\n pytest.skip(\"incompatible with this minor version\")\n\n\n@pytest.fixture(scope=\"module\")\ndef add_repo_sls(file_roots, env):\n jinja_env = Environment(loader=PackageLoader('tests', 'config'))\n template = jinja_env.get_template('systemsmanagement_saltstack.sls')\n env.update({\n 'REPO_URL': 'http://download.opensuse.org/repositories/devel:/libraries:/c_c++/SLE_12/',\n 'GPGKEY_URL': 'http://download.opensuse.org/repositories/devel:/libraries:/c_c++/SLE_12//repodata/repomd.xml.key'\n })\n content = template.render(**env)\n with (file_roots / 'systemsmanagement_saltstack.sls').open('wb') as f:\n f.write(content)\n\n\ndef test_minion_key_cached(env, wait_minion_key_cached):\n assert_minion_key_state(env, \"unaccepted\")\n\n\ndef test_minion_key_accepted(env, accept_minion_key):\n assert_minion_key_state(env, \"accepted\")\n\n\ndef test_ping_minion(env, minion_ready):\n cmd = shlex.split(SALT_CALL.format(**env))\n cmd.append(\"test.ping\")\n output = check_output(cmd, env)\n assert [env['HOSTNAME'], 'True'] == [it.strip() for it in output.split(':')]\n\n\ndef remove_repo(caller_client, identifier, env):\n caller_client.cmd('pkg.del_repo', identifier)\n\n\ndef test_pkg_list(caller_client, minion_ready):\n assert caller_client.cmd('pkg.list_pkgs')\n\n\ndef test_zypper_pkg_owner(caller_client, minion_ready):\n assert caller_client.cmd('pkg.owner', '/etc/zypp') == 'libzypp'\n\n\ndef test_zypper_pkg_list_products_post_12(caller_client, minion_ready):\n post_12_required()\n [output] = caller_client.cmd('pkg.list_products')\n assert output['name'] == 'SLES'\n assert output['release'] == '0'\n\n\ndef test_zypper_pkg_list_products_pre_12(caller_client, minion_ready):\n pre_12_required()\n [output] = caller_client.cmd('pkg.list_products')\n assert output['name'] == 'SUSE_SLES'\n\n\ndef test_zypper_pkg_list_products_with_minor_0(caller_client, minion_ready, suse_release):\n minor_0_required()\n [output] = caller_client.cmd('pkg.list_products')\n assert output['version'] == unicode(suse_release['VERSION'])\n\n\ndef test_zypper_pkg_list_products_with_minor_non_0(caller_client, minion_ready, suse_release):\n minor_non_0_required()\n [output] = caller_client.cmd('pkg.list_products')\n assert output['version'] == \"{VERSION}.{PATCHLEVEL}\".format(**suse_release)\n\n\ndef test_zypper_pkg_list_products_with_OEM_release(request, caller_client, minion_ready, suse_release):\n suse_register = '/var/lib/suseRegister'\n filepath = suse_register + '/OEM/sles'\n os.makedirs(suse_register + '/OEM')\n request.addfinalizer(lambda: shutil.rmtree(suse_register))\n with open(filepath, 'w+b') as f:\n f.write('OEM')\n [output] = caller_client.cmd('pkg.list_products')\n assert output['productline'] == 'sles'\n assert output['release'] == 'OEM'\n\n\ndef test_zypper_pkg_modrepo_create(request, env, caller_client, minion_ready, tmpdir_factory):\n repo_name = 'repotest'\n repo_dir = tmpdir_factory.mktemp(repo_name)\n caller_client.cmd(\n 'pkg.mod_repo', repo_name, url=\"file:///{0}\".format(repo_dir.strpath))\n request.addfinalizer(partial(remove_repo, caller_client, repo_name, env))\n\n\ndef test_zypper_pkg_modrepo_modify(request, env, caller_client, minion_ready, tmpdir_factory):\n repo_name = 'repotest-1'\n request.addfinalizer(partial(remove_repo, caller_client, repo_name, env))\n repo_dir = tmpdir_factory.mktemp(repo_name)\n caller_client.cmd(\n 'pkg.mod_repo', repo_name, url=\"file:///{0}\".format(repo_dir.strpath))\n output = caller_client.cmd(\n 'pkg.mod_repo', repo_name, refresh=True, enabled=False, output=\"json\")\n assert output['enabled'] is False\n assert output['autorefresh'] is True\n\n\ndef test_zypper_refresh_repo_with_gpgkey(request, env, local_client, caller_client, minion_ready):\n repo_name = 'Repo-With-GPGkey'\n request.addfinalizer(partial(remove_repo, caller_client, repo_name, env))\n caller_client.cmd(\n 'pkg.mod_repo',\n repo_name,\n disabled=False,\n url=\"http://download.opensuse.org/repositories/devel:/libraries:/c_c++/SLE_12/\",\n refresh=True,\n gpgautoimport=True\n )\n # do not use caller_client next\n # assert `zypper refresh` doesn't ask for gpg confirmation anymore\n res = local_client.cmd(env['HOSTNAME'], 'cmd.run', ['zypper refresh'])\n assert \"Repository '{0}' is up to date.\".format(repo_name) in res[env['HOSTNAME']]\n\n\ndef test_zypper_pkg_del_repo(request, env, caller_client, minion_ready, tmpdir_factory):\n repo_name = 'repotest-2'\n repo_dir = tmpdir_factory.mktemp(repo_name)\n caller_client.cmd(\n 'pkg.mod_repo', repo_name, url=\"file:///{0}\".format(repo_dir.strpath))\n res = caller_client.cmd('pkg.del_repo', repo_name)\n assert res[repo_name] is True\n\n\ndef test_zypper_pkg_refresh_db(request, env, caller_client, minion_ready):\n res = caller_client.cmd('pkg.refresh_db')\n assert res['testpackages'] is True\n\n\ndef test_zypper_pkg_list_patterns(request, env, caller_client, minion_ready):\n res = caller_client.cmd('pkg.list_patterns')\n assert res['Minimal']['installed'] is False\n\n\ndef test_zypper_pkg_search(request, env, caller_client, minion_ready):\n res = caller_client.cmd('pkg.search', 'test-package')\n assert res['test-package-zypper']['summary'] == u\"Test package for Salt's pkg.latest\"\n\n\ndef test_zypper_pkg_download(request, env, caller_client, minion_ready):\n post_12_required()\n res = caller_client.cmd('pkg.download', 'test-package')\n assert res['test-package']['repository-alias'] == 'salt_testing'\n\n\ndef install_package(caller_client, package):\n caller_client.cmd('pkg.install', 'test-package')\n\n\ndef test_zypper_pkg_remove(request, env, caller_client, minion_ready):\n res = caller_client.cmd('pkg.remove', 'test-package')\n request.addfinalizer(partial(install_package, caller_client, 'test-package'))\n assert res['test-package']['new'] == ''\n", "sub_path": "tests/test_minion.py", "file_name": "test_minion.py", "file_ext": "py", "file_size_in_byte": 6694, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pytest.mark.usefixtures", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "utils.get_suse_release", "line_number": 16, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.get_suse_release", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 22, "usage_type": "call"}, {"api_name": "utils.get_suse_release", "line_number": 26, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.get_suse_release", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest.skip", "line_number": 32, "usage_type": "call"}, {"api_name": "jinja2.Environment", "line_number": 37, "usage_type": "call"}, {"api_name": "jinja2.PackageLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 35, "usage_type": "call"}, {"api_name": "assertions.assert_minion_key_state", "line_number": 49, "usage_type": "call"}, {"api_name": "assertions.assert_minion_key_state", "line_number": 53, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 57, "usage_type": "call"}, {"api_name": "config.SALT_CALL.format", "line_number": 57, "usage_type": "call"}, {"api_name": "config.SALT_CALL", "line_number": 57, "usage_type": "name"}, {"api_name": "utils.check_output", "line_number": 59, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 103, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 104, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 117, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 122, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 134, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 185, "usage_type": "call"}]} +{"seq_id": "124909150", "text": "# encoding=UTF-8\nfrom pyspark import SparkContext, SparkConf\nimport re\n\n\ndef execute():\n conf = SparkConf().setAppName('test04')\n sc = SparkContext(conf=conf)\n\n try:\n text_rdd = sc.textFile('/Users/toru/spark/program/data/README.md')\n results = text_rdd \\\n .flatMap(lambda value: re.split('[ .,]', value)) \\\n .map(lambda value: (value, 1)) \\\n .reduceByKey(lambda result, elem: result + elem) \\\n .map(lambda value: (value[1], value[0])) \\\n .sortByKey(False) \\\n .map(lambda value: (value[1], value[0]))\n\n for result in results.take(3):\n print(result)\n\n finally:\n sc.stop()\n\n\nif __name__ == '__main__':\n execute()", "sub_path": "program/test04.py", "file_name": "test04.py", "file_ext": "py", "file_size_in_byte": 730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pyspark.SparkConf", "line_number": 7, "usage_type": "call"}, {"api_name": "pyspark.SparkContext", "line_number": 8, "usage_type": "call"}, {"api_name": "re.split", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "431290630", "text": "import json\nfrom urllib.parse import urlencode\nfrom tornado import httpclient\nfrom tornado.httpclient import HTTPRequest\n\nSMS = {\n 'apikey': 'd4539d672b85e0247b823bcf655d65b0',\n 'url': 'https://sms.yunpian.com/v2/sms/single_send.json',\n 'template': '【北京瑞默特】您的验证码是{}。如非本人操作,请忽略本短信'\n}\n\n\nasync def send_single_sms(code, mobile):\n http_client = httpclient.AsyncHTTPClient()\n url = SMS['url']\n text = SMS['template'].format(code)\n post_request = HTTPRequest(url=url, method='POST', body=urlencode({\n 'apikey': SMS['apikey'],\n 'mobile': mobile,\n 'text': text\n }))\n try:\n response = await http_client.fetch(post_request)\n return json.loads(response.body.decode('utf8'))\n except Exception as error:\n return json.loads(error['response'].body.decode('utf8'))\n\n\nif __name__ == '__main__':\n import tornado.ioloop\n\n io_loop = tornado.ioloop.IOLoop.current()\n\n from functools import partial\n\n new_func = partial(send_single_sms, '1234', '13439093625')\n io_loop.run_sync(new_func)\n", "sub_path": "tools/send_sms.py", "file_name": "send_sms.py", "file_ext": "py", "file_size_in_byte": 1107, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tornado.httpclient.AsyncHTTPClient", "line_number": 14, "usage_type": "call"}, {"api_name": "tornado.httpclient", "line_number": 14, "usage_type": "name"}, {"api_name": "tornado.httpclient.HTTPRequest", "line_number": 17, "usage_type": "call"}, {"api_name": "urllib.parse.urlencode", "line_number": 17, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 24, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 26, "usage_type": "call"}, {"api_name": "tornado.ioloop.IOLoop.current", "line_number": 32, "usage_type": "call"}, {"api_name": "tornado.ioloop", "line_number": 32, "usage_type": "attribute"}, {"api_name": "functools.partial", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "121932583", "text": "# -*- coding: utf-8 -*-\r\nfrom __future__ import unicode_literals\r\n\r\nfrom django.shortcuts import render\r\n\r\n# Create your views here.\r\nfrom django.conf.urls import url\r\nfrom . import views \r\n # This line is new!\r\n\r\nurlpatterns = [\r\n url(r'^main$', views.main, name='register'),\r\n url(r'^logout$', views.logout, name='logout_appointment'),\r\n # # url(r'^message$', views.message, name='the_wall_message'),\r\n url(r'^home$', views.home),\r\n url(r'^view/(?P\\d+)$', views.view,),\r\n url(r'^remove/(?P\\d+)$', views.remove, name='delete_appointment'),\r\n url(r'^add/(?P\\d+)$', views.add),\r\n url(r'^friends$', views.friends, name='appointments_success'),\r\n url(r'^login$', views.login, name='the_wall_login'),\r\n url(r'^register$', views.register, name='appointments_register')\r\n]", "sub_path": "Python Using Django With User Login/apps/Belt_2_app/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 13, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "472542700", "text": "\"\"\"Registry of environment reward functions in PyTorch.\"\"\"\nimport torch\nimport torch.nn as nn\nfrom ray.rllib.utils import override\nfrom ray.tune.registry import _global_registry\nfrom ray.tune.registry import ENV_CREATOR\n\nREWARDS = {}\n\n\ndef get_reward_fn(env_id, env_config=None):\n \"\"\"Return the reward funtion for the given environment name and configuration.\n\n Only returns reward functions for environments which have been registered with Tune.\n \"\"\"\n assert env_id in REWARDS, f\"{env_id} environment reward not registered.\"\n assert _global_registry.contains(\n ENV_CREATOR, env_id\n ), f\"{env_id} environment not registered with Tune.\"\n\n env_config = env_config or {}\n reward_fn = REWARDS[env_id](env_config)\n if env_config.get(\"time_aware\", False):\n reward_fn = TimeAwareRewardFn(reward_fn)\n return reward_fn\n\n\ndef register(*ids):\n \"\"\"Register reward function class for environments with given ids.\"\"\"\n\n def librarian(cls):\n REWARDS.update({i: cls for i in ids})\n return cls\n\n return librarian\n\n\nclass RewardFn(nn.Module):\n \"\"\"Module that computes an environment's reward function for batches of inputs.\"\"\"\n\n def __init__(self, _):\n super().__init__()\n\n @override(nn.Module)\n def forward(self, state, action, next_state): # pylint:disable=arguments-differ\n raise NotImplementedError\n\n\nclass TimeAwareRewardFn(RewardFn):\n \"\"\"Wraps a reward function and removes time dimension before forwarding.\"\"\"\n\n def __init__(self, reward_fn):\n super().__init__(None)\n self.reward_fn = reward_fn\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n return self.reward_fn(state[..., :-1], action, next_state[..., :-1])\n\n\n@register(\"CartPoleSwingUp-v0\", \"TorchCartPoleSwingUp-v0\")\nclass CartPoleSwingUpV0Reward(RewardFn):\n \"\"\"\n Compute CartPoleSwingUp's reward given a possibly batched transition.\n Assumes all but the last dimension are batch ones.\n \"\"\"\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n return next_state[..., 2]\n\n\n@register(\"CartPoleSwingUp-v1\", \"TorchCartPoleSwingUp-v1\")\nclass CartPoleSwingUpV1Reward(RewardFn):\n \"\"\"\n Compute CartPoleSwingUp's reward given a possibly batched transition.\n Assumes all but the last dimension are batch ones.\n \"\"\"\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n return (1 + next_state[..., 2]) / 2\n\n\n@register(\"HalfCheetah-v3\")\nclass HalfCheetahReward(RewardFn):\n \"\"\"Compute rewards given a possibly batched transition.\n\n Assumes all but the last dimension are batch ones.\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n assert (\n config.get(\"exclude_current_positions_from_observation\", True) is False\n ), \"Need x position for HalfCheetah-v3 reward function\"\n self.delta_t = 0.05\n self._ctrl_cost_weight = config.get(\"ctrl_cost_weight\", 0.1)\n self._forward_reward_weight = config.get(\"forward_reward_weight\", 1.0)\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n x_position_before = state[..., 0]\n x_position_after = next_state[..., 0]\n x_velocity = (x_position_after - x_position_before) / self.delta_t\n\n control_cost = self._ctrl_cost_weight * (action ** 2).sum(dim=-1)\n\n forward_reward = self._forward_reward_weight * x_velocity\n\n return forward_reward - control_cost\n\n\n@register(\"HVAC\")\nclass HVACReward(RewardFn):\n \"\"\"Compute HVAC's reward function.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n from .environments.hvac import DEFAULT_CONFIG\n\n config = {**DEFAULT_CONFIG, **config}\n self.air_max = torch.as_tensor(config[\"AIR_MAX\"]).float()\n self.is_room = torch.as_tensor(config[\"IS_ROOM\"])\n self.cost_air = torch.as_tensor(config[\"COST_AIR\"]).float()\n self.temp_low = torch.as_tensor(config[\"TEMP_LOW\"]).float()\n self.temp_up = torch.as_tensor(config[\"TEMP_UP\"]).float()\n self.penalty = torch.as_tensor(config[\"PENALTY\"]).float()\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n air = action * self.air_max\n temp = next_state[..., :-1]\n\n reward = -(\n self.is_room\n * (\n air * self.cost_air\n + ((temp < self.temp_low) | (temp > self.temp_up)) * self.penalty\n + 10.0 * torch.abs((self.temp_up + self.temp_low) / 2.0 - temp)\n )\n ).sum(dim=-1)\n\n return reward\n\n\n@register(\"IndustrialBenchmark-v0\")\nclass IndustrialBenchmarkReward(RewardFn):\n \"\"\"IndustrialBenchmarks's reward function.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.reward_type = config.get(\"reward_type\", \"classic\")\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n con_coeff, fat_coeff = 1, 3\n consumption, fatigue = next_state[..., 4], next_state[..., 5]\n reward = -(con_coeff * consumption + fat_coeff * fatigue)\n\n if self.reward_type == \"delta\":\n old_consumption, old_fatigue = state[..., 4], state[..., 5]\n old_reward = -(con_coeff * old_consumption + fat_coeff * old_fatigue)\n reward -= old_reward\n\n return reward / 100\n\n\n@register(\"Navigation\")\nclass NavigationReward(RewardFn):\n \"\"\"Navigation's reward function.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n from .environments.navigation import DEFAULT_CONFIG\n\n config = {**DEFAULT_CONFIG, **config}\n self._end = torch.as_tensor(config[\"end\"]).float()\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n next_state = next_state[..., :2]\n goal = self._end\n return -torch.norm(next_state - goal, p=2, dim=-1)\n\n\n@register(\"Reacher-v2\")\nclass ReacherReward(RewardFn):\n \"\"\"Reacher-v3's reward function.\"\"\"\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n dist = state[..., -3:]\n reward_dist = -torch.norm(dist, dim=-1)\n reward_ctrl = -torch.sum(action ** 2, dim=-1)\n return reward_dist + reward_ctrl\n\n\n@register(\"Reservoir\")\nclass ReservoirReward(RewardFn):\n \"\"\"Reservoir's reward function.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n from .environments.reservoir import DEFAULT_CONFIG\n\n config = {**DEFAULT_CONFIG, **config}\n self.lower_bound = torch.as_tensor(config[\"LOWER_BOUND\"])\n self.upper_bound = torch.as_tensor(config[\"UPPER_BOUND\"])\n\n self.low_penalty = torch.as_tensor(config[\"LOW_PENALTY\"])\n self.high_penalty = torch.as_tensor(config[\"HIGH_PENALTY\"])\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n rlevel = next_state[..., :-1]\n\n penalty = torch.where(\n (rlevel >= self.lower_bound) & (rlevel <= self.upper_bound),\n torch.zeros_like(rlevel),\n torch.where(\n rlevel < self.lower_bound,\n self.low_penalty * (self.lower_bound - rlevel),\n self.high_penalty * (rlevel - self.upper_bound),\n ),\n )\n\n return penalty.sum(dim=-1)\n\n\n@register(\"MountainCarContinuous-v0\")\nclass MountainCarContinuousReward(RewardFn):\n \"\"\"MountainCarContinuous' reward function.\"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n goal_position = 0.45\n goal_velocity = config.get(\"goal_velocity\", 0.0)\n self.goal = torch.as_tensor([goal_position, goal_velocity])\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n done = (next_state >= self.goal).all(-1)\n shape = state.shape[:-1]\n reward = torch.where(done, torch.empty(shape).fill_(200), torch.zeros(shape))\n reward -= torch.pow(action, 2).squeeze(-1) * 0.1\n return reward\n\n\n@register(\"ReacherBulletEnv-v0\")\nclass ReacherBulletEnvReward(RewardFn):\n \"\"\"ReacherBulletEnv-v0's reward function.\"\"\"\n\n @override(RewardFn)\n def forward(self, state, action, next_state):\n to_target_vec_old = state[..., 2:4]\n to_target_vec_new = next_state[..., 2:4]\n potential_old = -100 * to_target_vec_old.norm(p=2, dim=-1)\n potential_new = -100 * to_target_vec_new.norm(p=2, dim=-1)\n\n theta_dot = next_state[..., -3]\n gamma = next_state[..., -2]\n gamma_dot = next_state[..., -1]\n\n electricity_cost = -0.10 * (\n (action[..., 0] * theta_dot).abs() + (action[..., 1] * gamma_dot).abs()\n ) - 0.01 * (action[..., 0].abs() + action[..., 1].abs())\n\n stuck_joint_cost = torch.where(\n (gamma.abs() - 1).abs() < 0.01,\n torch.empty_like(electricity_cost).fill_(-0.1),\n torch.zeros_like(electricity_cost),\n )\n\n rewards = (potential_new - potential_old) + electricity_cost + stuck_joint_cost\n return rewards\n", "sub_path": "raylab/envs/rewards.py", "file_name": "rewards.py", "file_ext": "py", "file_size_in_byte": 9023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "ray.tune.registry._global_registry.contains", "line_number": 17, "usage_type": "call"}, {"api_name": "ray.tune.registry.ENV_CREATOR", "line_number": 18, "usage_type": "argument"}, {"api_name": "ray.tune.registry._global_registry", "line_number": 17, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "ray.rllib.utils.override", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "ray.rllib.utils.override", "line_number": 56, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 68, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 80, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 101, "usage_type": "call"}, {"api_name": "environments.hvac.DEFAULT_CONFIG", "line_number": 122, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 124, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 128, "usage_type": "call"}, {"api_name": "torch.abs", "line_number": 140, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 130, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 155, "usage_type": "call"}, {"api_name": "environments.navigation.DEFAULT_CONFIG", "line_number": 177, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 178, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 184, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 180, "usage_type": "call"}, {"api_name": "torch.norm", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 195, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 191, "usage_type": "call"}, {"api_name": "environments.reservoir.DEFAULT_CONFIG", "line_number": 207, "usage_type": "name"}, {"api_name": "torch.as_tensor", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 212, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 221, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.as_tensor", "line_number": 239, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.empty", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.pow", "line_number": 246, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 241, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 269, "usage_type": "call"}, {"api_name": "torch.empty_like", "line_number": 271, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 272, "usage_type": "call"}, {"api_name": "ray.rllib.utils.override", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "214012625", "text": "# Diese Datei ist für den Schwesterbot der Pixeldrohne, PixelDev gedacht.\n\nimport discord\nimport sys\nimport asyncio\nimport keys\nimport random\nimport urllib.request\n\nclient = discord.Client()\n\n\n@client.event\nasync def on_ready():\n print('Eingeloggt als')\n print(client.user.name)\n print(client.user.id)\n print('------------------------')\n await client.change_presence(game=discord.Game(name='mit dev.help', type=1, url=\"https://twitch.tv/pilleniusmc\"))\n\n\n@client.event\nasync def on_message(message):\n # Hilfe für Dev-Branch und Cutting Edge\n if message.content.lower().startswith('dev.help'):\n user = message.author\n embed = discord.Embed(\n title=\"Kategorie: Test\",\n description=\"Alle Befehle, die hier aufgeführt sind noch in der Testphase, heißt sie können komplett \"\n \"verbuggt sein.\\n[Beta]: Sollte soweit stabil sein.\\n[Alpha]: Könnte zu Abstürzen führen.\"\n )\n embed.add_field(name=\"Error 404\", value=\"There seems to be nothing.\")\n await client.send_message(user, embed=embed)\n if message.content.lower().startswith('dev.lsd'):\n # öffnen = open(\"config/zitate.txt\", \"r\", encoding='utf-8')\n öffnen = urllib.request.urlopen(\"https://sherlock-holm.es/stories/plain-text/cano.txt\")\n for line in öffnen:\n line = line.strip()\n line = str(line)\n if not line == \"b''\":\n line = line[2:]\n line = line.rstrip('\\'')\n await client.send_message(message.author, line)\n await asyncio.sleep(1)\n\n if message.content.lower().startswith('dev.tt'):\n ttemb = discord.Embed(\n title=\"PilleniusMC\",\n description=\"So könnte ein Twitch Chat Embed aussehen.\",\n color=0x6441a4\n )\n await client.send_message(message.channel, embed=ttemb)\n\n if message.author.id == keys.pmcid and message.content.lower().startswith('dev.halt'):\n await client.close()\n sys.exit(1)\n\n\n@client.event\nasync def on_reaction_add(reaction, user):\n msg = reaction.message\n chat = reaction.message.channel\n\n if reaction.emoji == \"👎\" and msg.id == messageid and not user.id == client.user.id:\n lol_msg = await client.send_message(chat, \"Hey {0}! Du sollst dem bitte nicht auch noch den Daumen geben.\".format(user.mention))\n await client.remove_reaction(msg, \"👎\", user)\n await asyncio.sleep(3)\n await client.delete_message(lol_msg)\n\n# client.start(keys.dev)\n# client.run(keys.dev)\nclient.run(keys.eng)\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "discord.Client", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.Game", "line_number": 19, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 27, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 36, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 36, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 36, "usage_type": "name"}, {"api_name": "asyncio.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "discord.Embed", "line_number": 47, "usage_type": "call"}, {"api_name": "keys.pmcid", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 67, "usage_type": "call"}, {"api_name": "keys.eng", "line_number": 72, "usage_type": "attribute"}]} +{"seq_id": "345181042", "text": "import pandas as pd\r\nimport numpy as np\r\nimport scaling\r\nfrom sklearn import preprocessing\r\n\r\ndef normaliseToBandMax(df):\r\n df = pd.DataFrame(df)\r\n\r\n ## IERG Scale to band number:\r\n ## band = 747.52\r\n band = 1106\r\n df = scaling.scaleToOneBand(df,band)\r\n ##IERG commented out .- DataScaledPath=sDirectory+'/dfDataScaled.csv'\r\n ##IERG commented out .- df.to_csv(sDataScaledPath,sep=',')\r\n\r\n #TCL normalisation vector to be saved\r\n dfMaxXband=df.max(axis=1)\r\n\r\n ## IERG Data here should reach with the standard IUNNA formatting. Comment the following code when formatting is guaranteed.\r\n df = df.divide(dfMaxXband,axis=0) #(axis=0: Column wise calculation)\r\n return (df,dfMaxXband)\r\n\r\ndef normaliseToMaxPerChannel(df):\r\n df = pd.DataFrame(df)\r\n dfNames = list(df)\r\n #print(dfNames)\r\n\r\n #dfResult=df.copy()[[]]\r\n # IERG Define separation band between VIS and NIR\r\n separationBand = 1003.08\r\n dfSplittingIndexLoc = df.index.get_loc(separationBand)\r\n\r\n\r\n # IERG truncate data according to separation band\r\n df1 = df.iloc[:dfSplittingIndexLoc,:]\r\n df1Indexes = pd.DataFrame(df1.index)\r\n df2 = df.iloc[dfSplittingIndexLoc:,:]\r\n df2Indexes = pd.DataFrame(df2.index)\r\n\r\n # IERG Normalise each sub dataframe\r\n # IERG Normalise df1\r\n x = df1.values #returns a numpy array\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n x_scaled = min_max_scaler.fit_transform(x)\r\n df1norm = pd.DataFrame(x_scaled)\r\n df1norm_final=pd.concat([df1Indexes,df1norm],axis=1,sort=False)\r\n df1norm_final.set_index('b',inplace=True)\r\n\r\n # IERG Normalise df2\r\n x = df2.values #returns a numpy array\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n x_scaled = min_max_scaler.fit_transform(x)\r\n df2norm = pd.DataFrame(x_scaled)\r\n df2norm_final=pd.concat([df2Indexes,df2norm],axis=1,sort=False)\r\n df2norm_final.set_index('b',inplace=True)\r\n\r\n\r\n # IERG Concatenate df1 and df2 into dfResult\r\n frames = [df1norm_final,df2norm_final]\r\n dfResult = pd.concat(frames)\r\n dfResult.columns = dfNames\r\n\r\n\r\n return (dfResult)\r\n", "sub_path": "Hazelnut_Classification/normalising.py", "file_name": "normalising.py", "file_ext": "py", "file_size_in_byte": 2111, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.DataFrame", "line_number": 7, "usage_type": "call"}, {"api_name": "scaling.scaleToOneBand", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 36, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 43, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 51, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 51, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 54, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "85420520", "text": "\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n'''\nprint(math.floor(1.5))\nfor inception_weight in np.linspace(0,1,endpoint=False):\n for resnet_weight in np.linspace(0,1,endpoint=False):\n inception_weight = round(inception_weight,2)\n resnet_weight = round(resnet_weight,2)\n if inception_weight+resnet_weight>=round(1,2):\n continue\n else:\n mobilenet_weight = round(1-inception_weight-resnet_weight,2)\n'''\nx = np.arange(1,13)\ny = []\nfor a in np.arange(0.5,1,0.01):\n a = np.around(a,2)\n y = np.power(x,np.ones(x.shape[0])*(1-a))/(1-a)\n plt.plot(range(len(y)), y, \"-*\",label=str(a))\n#plt.plot(range(len(y)),range(len(y)),\"-D\")\n\nplt.legend(loc=\"upper left\",ncol=10)\nplt.title(\"model utility with various alpha\")\nplt.show()\n\n", "sub_path": "temp.py", "file_name": "temp.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.arange", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.around", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.power", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}]} +{"seq_id": "57413977", "text": "#!/usr/bin/env python\n\n\"\"\"\nCreated on 2015-04-04T11:28:18\n\"\"\"\n\nfrom __future__ import division, print_function\nimport sys\nimport subprocess\nfrom sqlalchemy import create_engine\n\ntry:\n import pymysql\nexcept ImportError:\n print('You need pymysql installed')\n sys.exit(1)\n\n__author__ = \"Matt Giguere (github: @mattgiguere)\"\n__license__ = \"MIT\"\n__version__ = '0.0.1'\n__maintainer__ = \"Matt Giguere\"\n__email__ = \"matthew.giguere@yale.edu\"\n__status__ = \"Production\"\n\n\ndef get_credentials_dir():\n \"\"\"\n PURPOSE: A routine for pointing to the credentials directory.\n \"\"\"\n cmd = 'echo $CredDir'\n #read in the CredDir string\n cdir = subprocess.check_output(cmd, shell=True)\n #chop off the newline character at the end\n cdir = cdir[0:len(cdir)-1]\n #and return it\n return cdir\n\n\ndef connect_aws_db(legacy=False, write_unicode=False):\n \"\"\"PURPOSE:\n A function for connecting to the doglodge.io AWS RDS MySQL database.\n\n :param legacy: [optional]\n If legacy is set, a PyMySQL connection will be returned.\n Otherwise, a SQLAlchemy engine will be returned. This is\n to handle the deprecated MySQL connections in pandas.\n\n :param write_unicode: [optional]\n If set, text will be written to the MySQL DB as unicode.\n \"\"\"\n\n #retrieve credentials:\n cdir = get_credentials_dir()\n credsf = open(cdir+'.credentials/SQL/cawsi', 'r')\n creds = credsf.read().split('\\n')\n if legacy:\n conn = pymysql.connect(host=creds[0],\n port=int(creds[1]),\n user=creds[2],\n passwd=creds[3],\n db=creds[4])\n #cur = conn.cursor()\n return conn\n else:\n #example:\n #mysql+pymysql://:@/[?]\n cmd = \"mysql+pymysql://\"\n cmd += creds[2]+':'\n cmd += creds[3]+'@'\n cmd += creds[0]+'/'\n cmd += creds[4]\n\n if write_unicode:\n cmd += '?charset=utf8'\n\n engine = create_engine(cmd, pool_recycle=600)\n return engine\n\n", "sub_path": "flaskapp/app/connect_aws_db.py", "file_name": "connect_aws_db.py", "file_ext": "py", "file_size_in_byte": 2119, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.exit", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.check_output", "line_number": 32, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "29868372", "text": "import torch\nimport numpy as np\nimport torch.nn as nn\nfrom ..utils import find_node, get_input_node\n\n\ndef find_unused_node(node_infos, in_ids, out_ids, strip_inputs=True):\n all_ids = [node['name'] for node in node_infos]\n flags = dict((id_, False) for id_ in all_ids)\n left_ids = set(out_ids)\n while(left_ids) :\n for id_ in left_ids:\n flags[id_] = True\n needed_ids = []\n for id_ in left_ids:\n node = find_node(node_infos, id_)\n needed_ids += get_input_node(node)\n needed_ids = [id_ for id_ in needed_ids if id_ not in in_ids]\n left_ids = set(needed_ids)\n\n if not strip_inputs:\n for id_ in in_ids:\n flags[id_] = True\n return [id_ for id_ in all_ids if not flags[id_]]\n\ndef update_cfg(nodes, node_cfg):\n node = find_node(nodes, node_cfg['name'])\n node.update(node_cfg)\n\nclass BaseReconstructor(object):\n # supported_ops = ['nn.conv2d', 'nn.relu', 'nn.batch_norm',\n # 'nn.global_avg_pool2d', 'nn.batch_flatten', 'nn.dense',\n # 'nn.max_pool2d', 'add', 'nn.avg_pool2d', 'nn.bias_add',\n # 'nn.conv2d_transpose', 'sigmoid', 'multiply', 'divide',\n # 'subtract', 'nn.pad', 'clip']\n\n def __init__(self, graph, params, input_node_ids=[], output_node_ids=[], update_node_cfg=[], strip_inputs=True):\n super(BaseReconstructor, self).__init__()\n if update_node_cfg:\n for node_cfg in update_node_cfg:\n update_cfg(graph, node_cfg)\n\n self.input_node_ids = input_node_ids\n self.input_node_shapes = []\n self.output_node_ids = output_node_ids\n self.strip_inputs = strip_inputs\n self.graph = self._find_input_output_nodes(graph)\n if params is not None:\n self.params = self._strip_unused_params(params)\n else:\n self.params = {}\n\n def get_node_inputs(self, name):\n node = find_node(self.graph, name)\n if node is None: return []\n inputs = node['inputs']\n if inputs is None: inputs = []\n return [l for l in inputs if not l.startswith('params')]\n\n def _strip_unused_params(self, params):\n used_params = []\n for node in self.graph:\n inputs = node['inputs']\n if inputs is None: inputs = []\n used_params.extend(inputs)\n\n del_keys = []\n for kk in params:\n if kk not in used_params:\n del_keys.append(kk)\n for key in del_keys:\n del params[key]\n return params\n\n def _find_input_output_nodes(self, node_infos):\n # node_infos = [node for node in node_infos if node.get('op_type') in self.supported_ops]\n all_node_ids = [node['name'] for node in node_infos]\n all_needed_ids = []\n for node in node_infos:\n all_needed_ids += get_input_node(node)\n\n if not self.input_node_ids:\n for id_ in all_needed_ids:\n if find_node(node_infos, id_)['inputs'] is None:\n self.input_node_ids.append(id_)\n assert self.input_node_ids\n if not self.output_node_ids:\n for id_ in all_node_ids:\n if id_ not in all_needed_ids:\n self.output_node_ids.append(id_)\n\n for node_name in self.input_node_ids:\n node = find_node(node_infos, node_name)\n if node.get('op_type') == 'Const':\n self.input_node_shapes.append(node['attrs']['shape'])\n else:\n self.input_node_shapes.append(node['attrs']['O_shape'][0])\n\n unused_node_ids = find_unused_node(node_infos, self.input_node_ids, self.output_node_ids, self.strip_inputs)\n node_infos = [node for node in node_infos if node['name'] not in unused_node_ids]\n return node_infos\n\n", "sub_path": "src/reconstructor/reconstructor.py", "file_name": "reconstructor.py", "file_ext": "py", "file_size_in_byte": 3818, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "utils.find_node", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.get_input_node", "line_number": 17, "usage_type": "call"}, {"api_name": "utils.find_node", "line_number": 27, "usage_type": "call"}, {"api_name": "utils.find_node", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.get_input_node", "line_number": 80, "usage_type": "call"}, {"api_name": "utils.find_node", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.find_node", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "454578040", "text": "#-*- coding: utf-8 -*-\nimport urllib.request\ntry:\n from pypresence import Presence\nexcept ImportError as e:\n import os\n os.system('pip install pypresence')\n from pypresence import Presence\n \nimport re, time, random, unicodedata, json, base64\n\nimport configparser\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nport = config['Web Interface']['Port']\ncredential = str(config['Web Interface']['Credential'])\nif not 'username:password' in credential:\n ids = base64.b64encode(bytes(credential,encoding='utf8'))\n ids = 'Basic ' + ids.decode('utf-8')\nelse:\n ids = False\napiMovieKey = config['API']['tmdbApi']\n\nclient_id = '527810733317029889'\nRPC = Presence(client_id)\nRPC.connect()\nicon = ''\noldTitle = ''\n\ndef getTitle():\n url ='http://localhost:'+port+'/jsonrpc?request={%22jsonrpc%22:%20%222.0%22,%20%22method%22:%20%22Player.GetItem%22,%20%22params%22:%20{%20%22properties%22:%20[%22title%22,%20%22album%22,%20%22artist%22,%20%22season%22,%20%22episode%22,%20%22duration%22,%20%22showtitle%22,%20%22tvshowid%22,%20%22thumbnail%22,%20%22file%22,%20%22fanart%22,%20%22streamdetails%22],%20%22playerid%22:%201%20},%20%22id%22:%20%22VideoGetItem%22}'\n if ids != False:\n headers = {'Authorization' : ids}\n req = urllib.request.Request(url, None,headers)\n with urllib.request.urlopen(req) as response:\n html = response.read()\n else:\n with urllib.request.urlopen(url) as response:\n html = response.read() \n\n getAddon = re.findall('\"file\":\"plugin://plugin..+?\\.(.+?)/',str(html))\n \n if 'vstream' in getAddon:\n icon = 'vstream'\n else:\n icon = 'kodi'\n \n getTitle = re.findall('\"label\":\"(.+?)\"',str(html))\n \n try:\n Title, rest = getTitle[0].replace('[COLOR lightcoral]','').replace('[/COLOR]','').split('[COLOR skyblue]')\n except ValueError:\n old = str(getTitle[0])\n return False,False\n\n splitTitle = re.match('(?:S(.+?)E(.+?) |)([^\"]+)',Title)\n return splitTitle, icon\n\ndef getGenreSerie(Title1):\n Title1 = Title1.replace(' ','-')\n if Title1.endswith('--'):\n Title1 = Title1[:-2]\n elif Title1.endswith('-'):\n Title1 = Title1[:-1]\n with urllib.request.urlopen('https://www.thetvdb.com/series/'+Title1) as response:\n html = response.read()\n genre = re.findall('Genres.+?(.+?)',str(html))\n return genre\n\ndef getGenreMovie(Title1):\n with urllib.request.urlopen('https://api.themoviedb.org/3/search/movie?api_key=' +apiMovieKey+ '&language=fr-FR&query='+ Title1.replace(' ','+')) as response:\n html = response.read()\n idMovie = re.findall('\"results\":.+?\"id\":(.+?),',str(html))\n if idMovie:\n data = urllib.request.urlopen('https://api.themoviedb.org/3/movie/'+str(idMovie[0])+'?api_key='+apiMovieKey+'&language=fr-FR').read()\n jsonData = json.loads(data)\n genre = jsonData[\"genres\"]\n genre1 = re.findall(\"name.+?'(.+?)'\",str(genre))\n return genre1\n else:\n genre1 = ''\n return genre1\n \nwhile True:\n Title, icon = getTitle()\n if Title == False and oldTitle != False:\n RPC.update(small_image='kodi', small_text='kodi', large_image='kodi', large_text='kodi', details=str('Watch Nothing'))\n oldTitle = False\n time.sleep(15)\n else:\n time.sleep(15)\n \n if Title == False and oldTitle == False:\n pass\n elif str(Title[0]) == str(oldTitle):\n time.sleep(15)\n else:\n Title1 = str(Title.group(3))\n if '[' in Title1:\n Title1 = re.sub('([\\[].+?[\\]])','',Title1)\n if Title.group(1) == None:\n if not 'ApiKeyHere' in apiMovieKey:\n genre = getGenreMovie(Title1)\n else:\n genre = ['unknow']\n if genre == '':\n genre = ['unknow']\n Title1 = unicodedata.normalize('NFD', Title1).encode('ascii', 'ignore').decode(\"unicode_escape\").encode(\"latin1\").decode()\n RPC.update(small_image='kodi', small_text='kodi', large_image=icon, large_text=icon, details=Title1, state=str(','.join(genre)))\n else:\n genre = getGenreSerie(Title1)\n if not genre:\n genre = ['unknow']\n Title1 = unicodedata.normalize('NFD', Title1).encode('ascii', 'ignore').decode(\"unicode_escape\").encode(\"latin1\").decode()\n RPC.update(small_image='kodi', small_text='kodi', large_image=icon, large_text=icon, details='S' + Title.group(1) + ' E' + Title.group(2) + ' ' +Title1, state=str(','.join(genre)))\n oldTitle = str(Title[0])\n time.sleep(15)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4686, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.system", "line_number": 7, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 13, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 18, "usage_type": "call"}, {"api_name": "pypresence.Presence", "line_number": 25, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 34, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 34, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 34, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 35, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 35, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 38, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 38, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 48, "usage_type": "call"}, {"api_name": "re.match", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 65, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 65, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 67, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 71, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 71, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 71, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 73, "usage_type": "call"}, {"api_name": "urllib.request.request.urlopen", "line_number": 75, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 75, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 75, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 78, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 89, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 91, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 96, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 100, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 108, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 114, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "199474708", "text": "import argparse\nfrom cli.version import print_version_and_exit, version\nfrom datetime import datetime\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"source\", help=\"RSS URL\", nargs=\"?\")\n parser.add_argument(\n \"--daemon\",\n help=\"Run py-rss as a daemon to serve the server\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--date\",\n help=\"Try to fetch RSS from given date and use cached result if possible\",\n )\n parser.add_argument(\"--version\", action=\"store_true\", help=\"Print version info\")\n parser.add_argument(\n \"--json\", action=\"store_true\", help=\"Print result as JSON in stdout\"\n )\n parser.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Outputs verbose status messages\"\n )\n parser.add_argument(\"--to_epub\", action=\"store_true\", help=\"Converts news to epub\")\n parser.add_argument(\"--to_html\", action=\"store_true\", help=\"Converts news to html\")\n parser.add_argument(\"--limit\", help=\"Limit news topics if this parameter provided\")\n return parser.parse_args()\n\n\ndef handle_args(args):\n time = datetime.now().strftime(\"%Y%d%m\")\n return {\n \"version\": print_version_and_exit() if args.version else version,\n \"limit\": int(args.limit) if args.limit else None,\n \"json\": True if args.json else False,\n \"date\": args.date if args.date else time,\n \"daemon\": True if args.daemon else False,\n \"verbose\": True if args.verbose else False,\n \"export_queue\": [\n \"epub\" if args.to_epub else None,\n \"html\" if args.to_html else None,\n ],\n \"source\": args.source if args.source else None,\n }\n", "sub_path": "JahorLaryn/py-rss/py_rss/cli/args.py", "file_name": "args.py", "file_ext": "py", "file_size_in_byte": 1697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 32, "usage_type": "name"}, {"api_name": "cli.version.print_version_and_exit", "line_number": 34, "usage_type": "call"}, {"api_name": "cli.version.version", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "203667760", "text": "# Copyright 2013 The LUCI Authors. All rights reserved.\n# Use of this source code is governed by the Apache v2.0 license that can be\n# found in the LICENSE file.\n\n\"\"\"Instance specific settings.\"\"\"\n\nimport logging\nimport posixpath\n\nfrom components import config\nfrom components import gitiles\nfrom components import net\nfrom components import utils\nfrom components.config import validation\n\nfrom proto import config_pb2\n\nSETTINGS_CFG_FILENAME = 'settings.cfg'\n\n\nConfigApi = config.ConfigApi\n\n\n@validation.self_rule(SETTINGS_CFG_FILENAME, config_pb2.SettingsCfg)\ndef validate_settings(cfg, ctx):\n \"\"\"Validates settings.cfg file against proto message schema.\"\"\"\n def within_year(value):\n if value < 0:\n ctx.error('cannot be negative')\n elif value > 60 * 60 * 24 * 365:\n ctx.error('cannot be more than a year')\n\n with ctx.prefix('bot_death_timeout_secs '):\n within_year(cfg.bot_death_timeout_secs)\n with ctx.prefix('reusable_task_age_secs '):\n within_year(cfg.reusable_task_age_secs)\n\n\n@utils.memcache('config:get_configs_url', time=60)\ndef _get_configs_url():\n \"\"\"Returns URL where luci-config fetches configs from.\"\"\"\n url = None\n try:\n url = config.get_config_set_location(config.self_config_set())\n except net.Error:\n logging.info(\n 'Could not get configs URL. Possibly config directory for this '\n 'instance of swarming does not exist')\n return url or 'about:blank'\n\n\ndef _gitiles_url(configs_url, rev, path):\n \"\"\"URL to a directory in gitiles -> URL to a file at concrete revision.\"\"\"\n try:\n loc = gitiles.Location.parse(configs_url)\n return str(loc._replace(\n treeish=rev or loc.treeish,\n path=posixpath.join(loc.path, path)))\n except ValueError:\n # Not a gitiles URL, return as is.\n return configs_url\n\n\ndef _get_settings():\n \"\"\"Returns (rev, cfg) where cfg is a parsed SettingsCfg message.\n\n If config does not exists, returns (None, ).\n\n The config is cached in the datastore.\n \"\"\"\n rev = None\n cfg = None\n try:\n # store_last_good=True tells config component to update the config file\n # in a cron job. Here we just read from datastore.\n rev, cfg = config.get_self_config(\n SETTINGS_CFG_FILENAME, config_pb2.SettingsCfg, store_last_good=True)\n except config.CannotLoadConfigError as ex:\n logging.info('Could not load settings.cfg: %s; using defaults', ex)\n if not cfg:\n cfg = config_pb2.SettingsCfg(\n reusable_task_age_secs=7*24*60*60,\n bot_death_timeout_secs=10*60)\n return rev, cfg\n\n\ndef settings_info():\n \"\"\"Returns information about the settings file.\n\n Returns a dict with keys:\n 'cfg': parsed SettingsCfg message\n 'rev': revision of cfg\n 'rev_url': URL of a human-consumable page that displays the config\n 'config_service_url': URL of the config_service.\n \"\"\"\n rev, cfg = _get_settings()\n rev_url = _gitiles_url(_get_configs_url(), rev, SETTINGS_CFG_FILENAME)\n cfg_service_hostname = config.config_service_hostname()\n return {\n 'cfg': cfg,\n 'rev': rev,\n 'rev_url': rev_url,\n 'config_service_url': (\n 'https://%s' % cfg_service_hostname if cfg_service_hostname else ''\n ),\n }\n\n\n@utils.cache_with_expiration(60)\ndef settings():\n \"\"\"Loads settings from an NDB-based cache or a default one if not present.\"\"\"\n return _get_settings()[1]\n", "sub_path": "appengine/swarming/server/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 3333, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "components.config.ConfigApi", "line_number": 21, "usage_type": "attribute"}, {"api_name": "components.config", "line_number": 21, "usage_type": "name"}, {"api_name": "components.config.validation.self_rule", "line_number": 24, "usage_type": "call"}, {"api_name": "components.config.validation", "line_number": 24, "usage_type": "name"}, {"api_name": "proto.config_pb2.SettingsCfg", "line_number": 24, "usage_type": "attribute"}, {"api_name": "proto.config_pb2", "line_number": 24, "usage_type": "name"}, {"api_name": "components.config.get_config_set_location", "line_number": 44, "usage_type": "call"}, {"api_name": "components.config", "line_number": 44, "usage_type": "name"}, {"api_name": "components.config.self_config_set", "line_number": 44, "usage_type": "call"}, {"api_name": "components.net.Error", "line_number": 45, "usage_type": "attribute"}, {"api_name": "components.net", "line_number": 45, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 46, "usage_type": "call"}, {"api_name": "components.utils.memcache", "line_number": 39, "usage_type": "call"}, {"api_name": "components.utils", "line_number": 39, "usage_type": "name"}, {"api_name": "components.gitiles.Location.parse", "line_number": 55, "usage_type": "call"}, {"api_name": "components.gitiles.Location", "line_number": 55, "usage_type": "attribute"}, {"api_name": "components.gitiles", "line_number": 55, "usage_type": "name"}, {"api_name": "posixpath.join", "line_number": 58, "usage_type": "call"}, {"api_name": "components.config.get_self_config", "line_number": 76, "usage_type": "call"}, {"api_name": "components.config", "line_number": 76, "usage_type": "name"}, {"api_name": "proto.config_pb2.SettingsCfg", "line_number": 77, "usage_type": "attribute"}, {"api_name": "proto.config_pb2", "line_number": 77, "usage_type": "name"}, {"api_name": "components.config.CannotLoadConfigError", "line_number": 78, "usage_type": "attribute"}, {"api_name": "components.config", "line_number": 78, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 79, "usage_type": "call"}, {"api_name": "proto.config_pb2.SettingsCfg", "line_number": 81, "usage_type": "call"}, {"api_name": "proto.config_pb2", "line_number": 81, "usage_type": "name"}, {"api_name": "components.config.config_service_hostname", "line_number": 98, "usage_type": "call"}, {"api_name": "components.config", "line_number": 98, "usage_type": "name"}, {"api_name": "components.utils.cache_with_expiration", "line_number": 109, "usage_type": "call"}, {"api_name": "components.utils", "line_number": 109, "usage_type": "name"}]} +{"seq_id": "605507588", "text": "'''read out json elements from files in folder'''\n\nimport os\nimport json\n\ndef parse_folder(datadir, cb):\n\t'''\n\tparse all json files in folder\n\t@param datadir: relative directory string to data folder\n\t@param cb: function callback on every line of parsed json\n\t'''\n\tfor root, dirs, files in os.walk(datadir):\n\t\tfor file in files:\n\t\t\tif file.endswith('.json'):\n\t\t\t\tprint(\"parsing: \" + file)\n\t\t\t\twith open(datadir + file) as f:\n\t\t\t\t\tfor line in f:\n\t\t\t\t\t\tcode = json.loads(line)\n\t\t\t\t\t\tcb(code)\n\t\t\t\t\tf.close()\n\ndef parse_file(filedir, cb):\n\t'''\n\tparse json file\n\t@param datadir: relative directory string to data folder\n\t@param cb: function callback on every line of parsed json\n\t'''\n\twith open(filedir) as f:\n\t\tfor line in f:\n\t\t\tcode = json.loads(line)\n\t\t\tcb(code)\n", "sub_path": "DataParsing/read_json.py", "file_name": "read_json.py", "file_ext": "py", "file_size_in_byte": 761, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "51580301", "text": "\nfrom django.urls import path\n\nfrom mtgo import views\n\nurlpatterns = [\n path('api/chart-data', views.MtgoCharts.as_view(), name='chart_data'),\n path('api/mtgo-card-list', views.MtgoCardList.as_view(), name='card_list'),\n\n path('charts/', views.charts, name='charts'),\n path('search/', views.search, name='search'),\n]\n\n\n\n", "sub_path": "mtgo/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "mtgo.views.MtgoCharts.as_view", "line_number": 7, "usage_type": "call"}, {"api_name": "mtgo.views.MtgoCharts", "line_number": 7, "usage_type": "attribute"}, {"api_name": "mtgo.views", "line_number": 7, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "mtgo.views.MtgoCardList.as_view", "line_number": 8, "usage_type": "call"}, {"api_name": "mtgo.views.MtgoCardList", "line_number": 8, "usage_type": "attribute"}, {"api_name": "mtgo.views", "line_number": 8, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "mtgo.views.charts", "line_number": 10, "usage_type": "attribute"}, {"api_name": "mtgo.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "mtgo.views.search", "line_number": 11, "usage_type": "attribute"}, {"api_name": "mtgo.views", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "213190527", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 27 17:14:24 2017\n\n@author: 凯风\n\"\"\"\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV,cross_val_score,KFold\nimport numpy as np\n\n'''\n 通过交叉验证来获取模型中的某些参数:\n 方法有两种:\n 1、GridSearchCV 只要你给我的参数我全部组合\n 2、RandomizedSearchCV 你给我的参数,我随机组合,有它的好处,见它的文档\n \n GridSearchCV的一般使用方法:\n 1、要先划分测试集和训练集\n 2、设置要交叉验证的参数集\n 3、在训练集上进行模型的训练和参数的搜索\n 4、在测试集上查看预测效果\n \n ps:我下面并没有这么做\n'''\n\n# 读取数据\niris_dataSet = load_iris()\nX = iris_dataSet.data\nY = iris_dataSet.target\n\n# 设定实验次数\nnum_trials = 30\n\n# 设置超参数字典\nparam = {'C':[0.001,0.01,0.1,1,10],'gamma':[0.1,0.01]}\n\n# 创建一个模型实例,用高斯核\nsvc = SVC(kernel='rbf')\n\n# 创建两个数组来对比不同的方法的分数\nnon_nested_scores = np.zeros(num_trials)\nnested_scores = np.zeros(num_trials)\n\n# 开始实验\nfor i in range(num_trials):\n # 公平起见用一样的参数\n inner_cv = KFold(n_splits=4,shuffle=True,random_state=i)\n outer_cv = KFold(n_splits=4,shuffle=True,random_state=i)\n \n # 非嵌套方法\n clf = GridSearchCV(estimator=svc,param_grid=param,cv=inner_cv) \n clf.fit(X,Y)\n non_nested_scores[i] = clf.best_score_\n\n # 嵌套方法\n nested_score = cross_val_score(clf,X=X,y=Y,cv=outer_cv)\n nested_scores[i] = nested_score.mean()\n\nnested_scores.mean()\nnon_nested_scores.mean()\n\nclf.best_params_ # 获取最佳组合\nclf.grid_scores_ # 每次不同超参数组合的分数\n\n'''\n 对比:\n 非嵌套:容易产生过于乐观的结果\n 嵌套:过度应用数据,主要取决于数据集大小和模型的稳定性\n'''\n\n'''\n estimator ——模型\n param_grid ——超参数字典\n scoring ——评分标准\n fit_params ——其他要传给模型的参数\n n_jobs ——不重要,暂时\n iid ——是否独立同分布\n refit ——可以根据之前是否训练过模型来确定该参数是True还是False\n cv ——迭代器,可以穿入不同的自己想要的方法\n verbose ——暂时不重要\n pre_dispatch ——暂时不重要\n error_score ——如果遇到错误肿么办\n return_train_score ——\n'''", "sub_path": "Model_selection/cross_validation/GridSearchCV.py", "file_name": "GridSearchCV.py", "file_ext": "py", "file_size_in_byte": 2780, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sklearn.datasets.load_iris", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 44, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 49, "usage_type": "call"}, {"api_name": "sklearn.model_selection.KFold", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 53, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "86005133", "text": "# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nfrom collections import defaultdict\n\n\nclass Solution:\n def verticalOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if not root:\n return []\n colMap = defaultdict(list)\n self.traverse(root, colMap)\n cols = list(colMap.items())\n cols.sort(key=lambda itm: itm[0])\n ret = []\n for k, v in cols:\n ret.append(v)\n return ret\n\n def traverse(self, root, colMap):\n q = [(root, 0)]\n while len(q) > 0:\n nq = []\n for node, col in q:\n colMap[col].append(node.val)\n if node.left:\n nq.append((node.left, col - 1))\n if node.right:\n nq.append((node.right, col + 1))\n q = nq\n", "sub_path": "src/binary-tree-vertical-order-traversal.py", "file_name": "binary-tree-vertical-order-traversal.py", "file_ext": "py", "file_size_in_byte": 988, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.defaultdict", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "475841289", "text": "# -*- coding: utf-8 -*-\n# Copyright 2019 Green Valley NV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# @@license_version:1.5@@\nimport json\nimport logging\n\nfrom google.appengine.api import urlfetch\n\nfrom framework.plugin_loader import get_config\nfrom plugins.rogerthat_control_center.bizz.backends import get_backend_server\nfrom plugins.rogerthat_control_center.plugin_consts import NAMESPACE\n\n\ndef _request(auth_header, path, method=urlfetch.GET, payload=None):\n headers = {'Authorization': auth_header, 'Content-Type': 'application/json'}\n app_result = urlfetch.fetch(path, json.dumps(payload) if payload else None, method, headers)\n if app_result.status_code == 200:\n return json.loads(app_result.content)\n else:\n logging.debug(app_result.content)\n raise Exception('Invalid response: %s' % app_result.status_code)\n\n\ndef register_app(backend_id, jwt, app_id, official_id):\n base_url = get_config(NAMESPACE).configurator_url\n auth_header = 'Bearer %s' % jwt\n app_info = _request(auth_header, '%s/api/apps/%s' % (base_url, app_id))\n dev_acc_id = app_info['ios_developer_account']\n if not dev_acc_id:\n raise Exception('App %s has no iOS developer account' % app_id)\n dev_acc_info = _request(auth_header, '%s/api/developer-accounts/%s' % (base_url, dev_acc_id))\n backend = get_backend_server(backend_id)\n payload = {\n 'official_id': official_id,\n 'ios_dev_team': dev_acc_info['ios_dev_team'],\n }\n _request(auth_header, '%s/bob/api/apps/%s/register' % (backend.proxy_url, app_id), urlfetch.POST, payload)\n", "sub_path": "plugins/rogerthat_control_center/bizz/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 2089, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "google.appengine.api.urlfetch.GET", "line_number": 27, "usage_type": "attribute"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 27, "usage_type": "name"}, {"api_name": "google.appengine.api.urlfetch.fetch", "line_number": 29, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 29, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 31, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 33, "usage_type": "call"}, {"api_name": "framework.plugin_loader.get_config", "line_number": 38, "usage_type": "call"}, {"api_name": "plugins.rogerthat_control_center.plugin_consts.NAMESPACE", "line_number": 38, "usage_type": "argument"}, {"api_name": "plugins.rogerthat_control_center.bizz.backends.get_backend_server", "line_number": 45, "usage_type": "call"}, {"api_name": "google.appengine.api.urlfetch.POST", "line_number": 50, "usage_type": "attribute"}, {"api_name": "google.appengine.api.urlfetch", "line_number": 50, "usage_type": "name"}]} +{"seq_id": "630299445", "text": "# -*- coding:utf-8 -*-\n\nfrom django.urls import re_path,path\n\nfrom . import consumers\n\n\nwebsocket_urlpatterns = [\n #re_path(r'ws/chat/(?P\\w+)/$',consumers.ChatConsumer.as_asgi()),\n path('ws/cmd/', consumers.Cmd.as_asgi()),\n path('ws/script/', consumers.Script),\n path('ws/file/', consumers.File),\n path('ws/playbook/', consumers.Playbook),\n path('ws/module/', consumers.Module),\n\n]", "sub_path": "assets/routing.py", "file_name": "routing.py", "file_ext": "py", "file_size_in_byte": 411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "299380077", "text": "from server import patient_db, attending_db\nimport pytest\n\n\ndef test_process_new_attending():\n from server import process_new_attending\n in_data1 = {\"attending_username\": \"Everett\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": 919-1111-110}\n result1 = process_new_attending(in_data1)\n expected1 = \"attending_phone key value has wrong variable type, \" \\\n \"please make sure all your info are in the type of \" \\\n \"string!\", 400\n assert result1 == expected1\n\n in_data2 = {\"name\": \"Everett\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": \"919 - 1111 - 110\"}\n result2 = process_new_attending(in_data2)\n expected2 = \"attending_username key not found in input, \" \\\n \"please make sure all your info are in the type of \" \\\n \"string!\", 400\n assert result2 == expected2\n\n in_data3 = {\"attending_username\": \"Everett\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": \"919 - 1111 - 110\"}\n result3 = process_new_attending(in_data3)\n expected3 = \"Attending:'Everett' successfully added\", 200\n assert result3 == expected3\n\n in_data4 = {\"attending_username\": \"Everett\",\n \"attending_email\": \"Ev@.com\",\n \"attending_phone\": \"666\"}\n result4 = process_new_attending(in_data4)\n expected4 = \"The attending already exists in database! Please \" \\\n \"create a non redundant username to write a new \" \\\n \"attending into database\", 400\n assert result4 == expected4\n\n in_data5 = {\"attending_username\": \"Aby\",\n \"attending_email\": \"Aby.com\",\n \"attending_phone\": \"666666\"}\n result5 = process_new_attending(in_data5)\n expected5 = \"You entered a invalid email address, please \" \\\n \"make sure you've entered correct info\", 400\n assert result5 == expected5\n\n\ndef test_if_attending_exist():\n from server import if_attending_exist\n in_data = {\"attending_username\": \"Everett\",\n \"attending_email\": \"Evy.com\",\n \"attending_phone\": \"666666\"}\n result = if_attending_exist(in_data)\n expected = \"The attending already exists in database! \"\n assert result == expected\n\n\ndef test_validate_post_input():\n from server import validate_post_input\n expected_key = [\"patient_id\", \"attending_username\", \"patient_age\"]\n expected_types = [int, str, int]\n\n in_data1 = {\"patient_id\": \"110\",\n \"attending_username\": \"Kobe\",\n \"patient_age\": 99}\n expected1 = \"patient_id key value has wrong variable type\"\n result1 = validate_post_input(in_data1, expected_key, expected_types)\n assert result1 == expected1\n\n in_data2 = {\"id\": \"110\",\n \"attending_username\": \"Kobe\",\n \"patient_age\": 99}\n expected2 = \"patient_id key not found in input\"\n result2 = validate_post_input(in_data2, expected_key, expected_types)\n assert result2 == expected2\n\n\ndef test_attending_info_detect():\n from server import attending_info_detect\n in_data = {\"attending_username\": \"Everett\",\n \"attending_email\": \"Evy.com\",\n \"attending_phone\": \"666666\"}\n expected = \"You entered a invalid email address, \"\n result = attending_info_detect(in_data)\n assert result == expected\n\n\ndef test_get_test():\n from server import get_test\n\n patient_id1 = 500\n expected1 = \"This patient doesn't have any heart rate history\", 400\n result1 = get_test(patient_id1)\n assert result1 == expected1\n\n patient_id2 = 50000\n expected2 = \"Could not find a matched patient in database\", 400\n result2 = get_test(patient_id2)\n assert expected2 == result2\n\n patient_id3 = \"120abcde\"\n expected3 = \"Please use an integer or a numeric string containing \" \\\n \"an ID number but without any letter\", 400\n result3 = get_test(patient_id3)\n assert expected3 == result3\n\n\ndef test_id_is_int():\n from server import id_is_int\n\n id1 = 10000\n expected1 = True\n result1 = id_is_int(id1)\n assert result1 == expected1\n\n id2 = \"10000abc\"\n expected2 = \"Please use an integer or a numeric string containing \" \\\n \"an ID number but without any letter\"\n result2 = id_is_int(id2)\n assert result2 == expected2\n\n\ndef test_latest_hr():\n from server import latest_hr\n\n patient1 = {'patient_id': 111111, 'attending_username': 'abcde',\n 'patient_age': 999,\n 'heart_rate_history': [{'heart_rate': 0.0001,\n 'status': 'not tachycardic',\n 'timestamp': '2099-13-13 11:00:36'},\n {'heart_rate': 0.01,\n 'status': 'not tachycardic',\n 'timestamp': '2059-13-13 11:00:36'}]}\n expected1 = {\"heart_rate\": 0.01,\n \"status\": 'not tachycardic',\n \"timestamp\": '2059-13-13 11:00:36'}\n result1 = latest_hr(patient1)\n assert result1 == expected1\n\n patient2 = {'patient_id': 222222, 'attending_username': 'lol',\n 'patient_age': 666,\n 'heart_rate_history': []}\n expected2 = False\n result2 = latest_hr(patient2)\n assert result2 == expected2\n\n\n# def test_get_heart_rate_list()\n\n\ndef test_get_average():\n from server import get_average\n\n patient_id1 = \"666hahaha\"\n expected1 = \"Please use an integer or a numeric string containing \" \\\n \"an ID number but without any letter\", 400\n result1 = get_average(patient_id1)\n assert result1 == expected1\n\n patient_id2 = 666\n expected2 = \"Could not find a matched patient in database\", 400\n result2 = get_average(patient_id2)\n assert result2 == expected2\n\n patient_id3 = 500\n expected3 = \"This patient doesn't have any heart rate history\", 400\n result3 = get_average(patient_id3)\n assert result3 == expected3\n\n\ndef test_average_hr():\n from server import average_hr\n\n patient1 = {'patient_id': 222222, 'attending_username': 'lol',\n 'patient_age': 666,\n 'heart_rate_history': []}\n expected1 = False\n result1 = average_hr(patient1)\n assert result1 == expected1\n\n patient2 = {'patient_id': 111111, 'attending_username': 'abcde',\n 'patient_age': 999,\n 'heart_rate_history': [{'heart_rate': 50,\n 'status': 'not tachycardic',\n 'timestamp': '2099-13-13 11:00:36'},\n {'heart_rate': 70,\n 'status': 'not tachycardic',\n 'timestamp': '2059-13-13 11:00:36'}]}\n expected2 = 60\n result2 = average_hr(patient2)\n assert result2 == expected2\n\n\ndef test_all_patients():\n from server import all_patients\n\n attending_name1 = \"hahaha666\"\n expected1 = \"Please enter a valid username string \" \\\n \"with no numbers!\", 400\n result1 = all_patients(attending_name1)\n assert result1 == expected1\n\n attending_name2 = \"Jerry\"\n expected2 = \"Sorry, this physician attending doesn't have any \" \\\n \"matched patient in the database\", 400\n result2 = all_patients(attending_name2)\n assert result2 == expected2\n\n\ndef test_str_username():\n from server import str_username\n\n name1 = \"good\"\n expected1 = True\n result1 = str_username(name1)\n assert result1 == expected1\n\n name2 = \"bad2name\"\n expected2 = \"Please enter a valid username string with no numbers!\"\n result2 = str_username(name2)\n assert result2 == expected2\n\n\ndef test_match_username():\n from server import match_username\n\n name1 = \"Tom\"\n expected1 = True\n result1 = match_username(name1)\n assert result1 == expected1\n\n name2 = \"Jerry\"\n expected2 = \"Sorry, this physician attending doesn't have any \" \\\n \"matched patient in the database\"\n result2 = match_username(name2)\n assert result2 == expected2\n\n\ndef test_return_data_list():\n from server import return_data_list\n\n attending_name = \"Tom\"\n expected = [{\"patient_id\": 120,\n \"last_heart_rate\": 104,\n \"last_time\": \"2018-03-10 11:00:36\",\n \"status\": \"tachycardic\"},\n {\"patient_id\": 300,\n \"last_heart_rate\": 75,\n \"last_time\": \"2019-10-10 11:00:36\",\n \"status\": \"not tachycardic\"},\n {\"patient_id\": 500,\n \"last_heart_rate\": \"No heart rate available\"}]\n result = return_data_list(attending_name)\n assert result == expected\n\n\ndef test_init_db():\n from server import init_db\n answer1, answer2 = init_db()\n expected1 = [{'patient_id': 120, 'attending_username': 'Tom',\n 'patient_age': 23,\n 'heart_rate_history': [{'heart_rate': 101,\n 'status': 'tachycardic',\n 'timestamp': '2018-03-09 11:00:36'},\n {'heart_rate': 104,\n 'status': 'tachycardic',\n 'timestamp':\n '2018-03-10 11:00:36'}]},\n {'patient_id': 300, 'attending_username': 'Tom',\n 'patient_age': 25,\n 'heart_rate_history': [{'heart_rate': 75,\n 'status': 'not tachycardic',\n 'timestamp':\n '2019-10-10 11:00:36'}]},\n {'patient_id': 500, 'attending_username': 'Tom',\n 'patient_age': 29, 'heart_rate_history': []},\n {'patient_id': 250, 'attending_username': 'Josh',\n 'patient_age': 20, 'heart_rate_history': []}\n ]\n expected2 = [{'attending_username': 'Tom',\n 'attending_email': 'tom@gmail.com',\n 'attending_phone': '919-865-5674'},\n {'attending_username': 'Lady',\n 'attending_email': 'Lady@gmail.com',\n 'attending_phone': '919-222-333'}]\n assert expected1 == answer1\n assert expected2 == answer2\n\n\ndef test_ecg_logging():\n from testfixtures import LogCapture\n from server import logging\n with LogCapture() as log_c:\n logging(0, \"a log\")\n log_c.check((\"root\", \"INFO\", \"a log\"))\n\n\ndef test_add_patient_to_database():\n from server import add_patient_to_database\n patient_id = 999\n attending_username = \"Attending1\"\n patient_age = 18\n add_patient_to_database(patient_id, attending_username, patient_age)\n last_patient_in_db = patient_db[-1]\n expected = {\"patient_id\": patient_id,\n \"attending_username\": attending_username,\n \"patient_age\": patient_age,\n \"heart_rate_history\": []}\n assert last_patient_in_db == expected\n\n\ndef test_add_attending_to_database():\n from server import add_attending_to_database\n add_attending_to_database(\"Attending1\",\n \"attending1@gmail.com\",\n \"777888999\")\n expected_name = \"Attending1\"\n assert attending_db[-1][\"attending_username\"] == expected_name\n\n\nnew_attending_test1_patient_id = 998\nnew_attending_test1_attending_username = \"attending2\"\nnew_patient_test1_patient_age = 18\n\nnew_attending_test2_patient_id = 997\nnew_attending_test2_attending_username = \"attending3\"\nnew_patient_test2_patient_age = \"18\"\n\nnew_attending_test3_patient_id = 996\nnew_attending_test3_attending_username = \"attending4\"\nnew_patient_test3_patient_age = \"an_age\"\n\nnew_attending_test4_patient_id = 998\nnew_attending_test4_attending_username = \"attending2\"\nnew_patient_test4_patient_age = 800\n\n\ndef test_process_new_patient():\n from server import process_new_patient\n in_data1 = {\"patient_id\": new_attending_test1_patient_id,\n \"attending_username\": new_attending_test1_attending_username,\n \"patient_age\": new_patient_test1_patient_age}\n answer1 = process_new_patient(in_data1)\n expected1 = 'Patient successfully added', 200\n assert answer1 == expected1\n in_data2 = {\"patient_id\": new_attending_test2_patient_id,\n \"attending_username\": new_attending_test2_attending_username,\n \"patient_age\": new_patient_test2_patient_age}\n answer2 = process_new_patient(in_data2)\n expected2 = 'Patient successfully added', 200\n assert answer2 == expected2\n in_data3 = {\"patient_id\": new_attending_test3_patient_id,\n \"attending_username\": new_attending_test3_attending_username,\n \"patient_age\": new_patient_test3_patient_age}\n answer3 = process_new_patient(in_data3)\n expected3 = 'patient_age key value has wrong variable type', 400\n assert answer3 == expected3\n in_data4 = {\"patient_id\": new_attending_test4_patient_id,\n \"attending_username\": new_attending_test4_attending_username,\n \"patient_age\": new_patient_test4_patient_age}\n answer4 = process_new_patient(in_data4)\n expected4 = \"Invalid age, human can't live so long!\", 400\n assert answer4 == expected4\n answer5 = process_new_patient(in_data1)\n expected5 = 'patient_id is the primary key, should be unique!', 400\n assert answer5 == expected5\n\n\ndef test_validate_age():\n from server import validate_age\n age1 = 18\n age2 = 200\n age3 = -1\n answer1 = validate_age(age1)\n expected1 = True\n assert answer1 == expected1\n answer2 = validate_age(age2)\n expected2 = \"Invalid age, human can't live so long!\"\n assert answer2 == expected2\n answer3 = validate_age(age3)\n expected3 = \"Invalid age, must be greater than 0!\"\n assert answer3 == expected3\n\n\ndef test_primary_key():\n from server import primary_key\n answer1 = primary_key(patient_db, \"patient_id\", 500)\n expected1 = \"patient_id is the primary key, should be unique!\"\n assert answer1 == expected1\n answer2 = primary_key(patient_db, \"patient_id\", 400)\n expected2 = True\n assert expected2 == answer2\n\n\ndef test_primary_key_exception():\n from server import primary_key\n with pytest.raises(Exception):\n primary_key(patient_db, \"doctor_id\", 500)\n\n\ndef test_parse_string():\n from server import parse_string\n in_data1 = {\"patient_id\": new_attending_test1_patient_id,\n \"attending_username\": new_attending_test1_attending_username,\n \"patient_age\": new_patient_test1_patient_age}\n answer1 = parse_string(in_data1, \"patient_age\")\n expected1 = \"No need to parse.\"\n assert answer1 == expected1\n in_data2 = {\"patient_id\": new_attending_test2_patient_id,\n \"attending_username\": new_attending_test2_attending_username,\n \"patient_age\": new_patient_test2_patient_age}\n answer2 = parse_string(in_data2, \"patient_age\")\n expected2 = \"Successfully parsed!\"\n assert answer2 == expected2\n in_data3 = {\"patient_id\": new_attending_test3_patient_id,\n \"attending_username\": new_attending_test3_attending_username,\n \"patient_age\": new_patient_test3_patient_age}\n answer3 = parse_string(in_data3, \"patient_age\")\n expected3 = \"No need to parse.\"\n assert answer3 == expected3\n\n\ndef test_add_heart_rate_to_database():\n from server import add_heart_rate_to_database\n test_patient = {'patient_id': 120, 'attending_username': 'Tom',\n 'patient_age': 23,\n 'heart_rate_history': []}\n add_heart_rate_to_database(test_patient, 100, \"tachycardia\",\n '2018-03-09 11:00:36')\n answer = test_patient['heart_rate_history'][-1]\n expected = {'heart_rate': 100,\n 'status': 'tachycardia',\n 'timestamp': '2018-03-09 11:00:36'}\n assert answer == expected\n\n\ndef test_is_tachycardic():\n from server import is_tachycardic\n import datetime\n time = datetime.datetime.now()\n dict_in1 = {\"patient_id\": 995, \"heart_rate\": 99}\n patient1 = {\"patient_id\": 995,\n \"attending_username\": \"attending5\",\n \"patient_age\": 18}\n attending1 = {\"attending_username\": \"attending995\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": 919-1111-110}\n answer1 = is_tachycardic(dict_in1, patient1, attending1, time)\n expected1 = \"not tachycardic\"\n assert answer1 == expected1\n dict_in2 = {\"patient_id\": 995, \"heart_rate\": 110}\n patient2 = {\"patient_id\": 995,\n \"attending_username\": \"attending5\",\n \"patient_age\": 18}\n attending2 = {\"attending_username\": \"attending995\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": 919-1111-110}\n answer2 = is_tachycardic(dict_in2, patient2, attending2, time)\n expected2 = \"tachycardic\"\n assert answer2 == expected2\n dict_in3 = {\"patient_id\": 995, \"heart_rate\": 110}\n patient3 = {\"patient_id\": 995,\n \"attending_username\": \"attending5\",\n \"patient_age\": 9}\n attending3 = {\"attending_username\": \"attending995\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": 919-1111-110}\n answer3 = is_tachycardic(dict_in3, patient3, attending3, time)\n expected3 = \"not tachycardic\"\n assert answer3 == expected3\n dict_in4 = {\"patient_id\": 995, \"heart_rate\": 150}\n patient4 = {\"patient_id\": 995,\n \"attending_username\": \"attending5\",\n \"patient_age\": 9}\n attending4 = {\"attending_username\": \"attending995\",\n \"attending_email\": \"Every@outlook.com\",\n \"attending_phone\": 919-1111-110}\n answer4 = is_tachycardic(dict_in4, patient4, attending4, time)\n expected4 = \"tachycardic\"\n assert answer4 == expected4\n\n\ndef test_email_sender():\n from server import email_sender\n answer = email_sender(\"123@gmail.com\", 140, 100, '2019-10-10 11:00:36')\n expected = (200,\n 'E-mail sent to 123@gmail.com from sentinel_server@duke.edu',\n {'from_email': 'sentinel_server@duke.edu',\n 'to_email': '123@gmail.com',\n 'subject': 'Tachycardia Detected! Patient ID: 140',\n 'content': 'Warning! The heart rate of patient ID 140'\n ' is 100 bpm @ 2019-10-10 11:00:36!'\n 'A tachycardia happened!'})\n assert answer == expected\n\n\ndef test_find_correct_patient():\n from server import add_patient_to_database, find_correct_patient\n add_patient_to_database(111, \"111\", 50)\n answer = find_correct_patient(111)\n expected = {'patient_id': 111,\n 'attending_username': '111',\n 'patient_age': 50, 'heart_rate_history': []}\n assert answer == expected\n\n\ndef test_find_correct_attending():\n from server import add_attending_to_database, find_correct_attending\n add_attending_to_database(\"a_name\", \"a_name@gmail.com\", \"919-654-9192\")\n answer = find_correct_attending(\"a_name\")\n expected = {'attending_username': 'a_name',\n 'attending_email': 'a_name@gmail.com',\n 'attending_phone': '919-654-9192'}\n assert answer == expected\n\n\ndef test_process_add_heart_rate():\n from server import process_add_heart_rate\n in_data1 = {\"patient_id\": 500,\n \"heart_rate\": 100}\n answer = process_add_heart_rate(in_data1)\n expected = 'Heart rate info successfully added', 200\n assert answer == expected\n in_data2 = {\"patient_id\": 130,\n \"heart_rate\": 100}\n answer = process_add_heart_rate(in_data2)\n expected = 'Could not find this patient in database', 400\n assert answer == expected\n in_data3 = {\"patient_id\": 250,\n \"heart_rate\": 100}\n answer = process_add_heart_rate(in_data3)\n expected = 'Could not find attending of this patient in database', 400\n assert answer == expected\n in_data4 = {\"patient_id\": \"Not a number\",\n \"heart_rate\": 100}\n answer = process_add_heart_rate(in_data4)\n expected = 'patient_id key value has wrong variable type', 400\n assert answer == expected\n\n\ndef test_list_average():\n from server import list_average\n list1 = [1, 3, 2]\n list2 = [-1, 0, 10]\n list3 = [-1, -1, -1]\n answer1 = list_average(list1)\n expected1 = 2\n answer2 = list_average(list2)\n expected2 = 3\n answer3 = list_average(list3)\n expected3 = -1\n assert answer1 == expected1\n assert answer2 == expected2\n assert answer3 == expected3\n\n\ndef test_find_interval_rates():\n from server import find_interval_rates\n in_data1 = {\"patient_id\": 120,\n \"heart_rate_average_since\": \"2018-03-09 11:00:30\"}\n patient1 = {'patient_id': 120,\n 'attending_username': 'Tom',\n 'patient_age': 23,\n 'heart_rate_history': [{'heart_rate': 101,\n 'status': 'tachycardic',\n 'timestamp': '2018-03-09 11:00:56'},\n {'heart_rate': 99,\n 'status': 'not tachycardic',\n 'timestamp': '2018-03-10 15:07:36'},\n {'heart_rate': 150,\n 'status': 'tachycardic',\n 'timestamp': '2018-03-10 20:00:11'}\n ]}\n answer1 = find_interval_rates(in_data1, patient1)\n expected1 = [101, 99, 150]\n assert answer1 == expected1\n in_data2 = {\"patient_id\": 120,\n \"heart_rate_average_since\": \"2018-03-10 11:00:30\"}\n answer2 = find_interval_rates(in_data2, patient1)\n expected2 = [99, 150]\n assert answer2 == expected2\n in_data3 = {\"patient_id\": 120,\n \"heart_rate_average_since\": \"2019-03-10 11:00:30\"}\n answer3 = find_interval_rates(in_data3, patient1)\n expected3 = []\n assert answer3 == expected3\n\n\ndef test_validate_time_format():\n from server import validate_time_format\n time_in1 = {\n \"patient_id\": 1,\n \"heart_rate_average_since\": \"2018-03-09 11:00:36\"\n }\n answer1 = validate_time_format(time_in1)\n assert answer1 is True\n time_in2 = {\n \"patient_id\": 1,\n \"heart_rate_average_since\": \"2018.03-09 11:00:36.4567\"\n }\n answer2 = validate_time_format(time_in2)\n expected2 = \"The time in does not satisfy the format,\" \\\n \" e.g. '2018-03-09 11:00:36'\"\n assert answer2 == expected2\n time_in3 = {\n \"patient_id\": 1,\n \"heart_rate_average_since\": \"Year:2018-Month:03-Day:09 11:00:36.4567\"\n }\n answer3 = validate_time_format(time_in3)\n expected3 = \"The time in does not satisfy the format,\" \\\n \" e.g. '2018-03-09 11:00:36'\"\n assert answer3 == expected3\n\n\ndef test_calculate_interval_average():\n from server import calculate_interval_average\n in_data1 = {\n \"patient_id\": 1,\n \"heart_rate_average_since\": \"2018-03-09 11:00:36\"\n }\n answer1 = calculate_interval_average(in_data1)\n expected1 = 'Could not find patient in database', 400\n assert answer1 == expected1\n in_data2 = {\n \"patient_id\": 120,\n \"heart_rate_average_since\": \"2018-03-09 11:00:36\"\n }\n answer2 = calculate_interval_average(in_data2)\n expected2 = 102, 200\n assert answer2 == expected2\n in_data3 = {\n \"patient_id\": 120,\n \"heart_rate_average_since\": \"2021-03-09 11:00:36\"\n }\n answer3 = calculate_interval_average(in_data3)\n expected3 = 'Could not find heart rate since the given time', 400\n assert answer3 == expected3\n\n\ndef test_get_heart_rate_list():\n from server import get_heart_rate_list\n patient_id = \"10000\"\n expected = \"Could not find patient in database\", 400\n result = get_heart_rate_list(patient_id)\n assert result == expected\n", "sub_path": "test_server.py", "file_name": "test_server.py", "file_ext": "py", "file_size_in_byte": 24243, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "server.process_new_attending", "line_number": 10, "usage_type": "call"}, {"api_name": "server.process_new_attending", "line_number": 19, "usage_type": "call"}, {"api_name": "server.process_new_attending", "line_number": 28, "usage_type": "call"}, {"api_name": "server.process_new_attending", "line_number": 35, "usage_type": "call"}, {"api_name": "server.process_new_attending", "line_number": 44, "usage_type": "call"}, {"api_name": "server.if_attending_exist", "line_number": 55, "usage_type": "call"}, {"api_name": "server.validate_post_input", "line_number": 69, "usage_type": "call"}, {"api_name": "server.validate_post_input", "line_number": 76, "usage_type": "call"}, {"api_name": "server.attending_info_detect", "line_number": 86, "usage_type": "call"}, {"api_name": "server.get_test", "line_number": 95, "usage_type": "call"}, {"api_name": "server.get_test", "line_number": 100, "usage_type": "call"}, {"api_name": "server.get_test", "line_number": 106, "usage_type": "call"}, {"api_name": "server.id_is_int", "line_number": 115, "usage_type": "call"}, {"api_name": "server.id_is_int", "line_number": 121, "usage_type": "call"}, {"api_name": "server.latest_hr", "line_number": 139, "usage_type": "call"}, {"api_name": "server.latest_hr", "line_number": 146, "usage_type": "call"}, {"api_name": "server.get_average", "line_number": 159, "usage_type": "call"}, {"api_name": "server.get_average", "line_number": 164, "usage_type": "call"}, {"api_name": "server.get_average", "line_number": 169, "usage_type": "call"}, {"api_name": "server.average_hr", "line_number": 180, "usage_type": "call"}, {"api_name": "server.average_hr", "line_number": 192, "usage_type": "call"}, {"api_name": "server.all_patients", "line_number": 202, "usage_type": "call"}, {"api_name": "server.all_patients", "line_number": 208, "usage_type": "call"}, {"api_name": "server.str_username", "line_number": 217, "usage_type": "call"}, {"api_name": "server.str_username", "line_number": 222, "usage_type": "call"}, {"api_name": "server.match_username", "line_number": 231, "usage_type": "call"}, {"api_name": "server.match_username", "line_number": 237, "usage_type": "call"}, {"api_name": "server.return_data_list", "line_number": 255, "usage_type": "call"}, {"api_name": "server.init_db", "line_number": 261, "usage_type": "call"}, {"api_name": "testfixtures.LogCapture", "line_number": 295, "usage_type": "call"}, {"api_name": "server.logging", "line_number": 296, "usage_type": "call"}, {"api_name": "server.add_patient_to_database", "line_number": 305, "usage_type": "call"}, {"api_name": "server.patient_db", "line_number": 306, "usage_type": "name"}, {"api_name": "server.add_attending_to_database", "line_number": 316, "usage_type": "call"}, {"api_name": "server.attending_db", "line_number": 320, "usage_type": "name"}, {"api_name": "server.process_new_patient", "line_number": 345, "usage_type": "call"}, {"api_name": "server.process_new_patient", "line_number": 351, "usage_type": "call"}, {"api_name": "server.process_new_patient", "line_number": 357, "usage_type": "call"}, {"api_name": "server.process_new_patient", "line_number": 363, "usage_type": "call"}, {"api_name": "server.process_new_patient", "line_number": 366, "usage_type": "call"}, {"api_name": "server.validate_age", "line_number": 376, "usage_type": "call"}, {"api_name": "server.validate_age", "line_number": 379, "usage_type": "call"}, {"api_name": "server.validate_age", "line_number": 382, "usage_type": "call"}, {"api_name": "server.primary_key", "line_number": 389, "usage_type": "call"}, {"api_name": "server.patient_db", "line_number": 389, "usage_type": "argument"}, {"api_name": "server.primary_key", "line_number": 392, "usage_type": "call"}, {"api_name": "server.patient_db", "line_number": 392, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 399, "usage_type": "call"}, {"api_name": "server.primary_key", "line_number": 400, "usage_type": "call"}, {"api_name": "server.patient_db", "line_number": 400, "usage_type": "argument"}, {"api_name": "server.parse_string", "line_number": 408, "usage_type": "call"}, {"api_name": "server.parse_string", "line_number": 414, "usage_type": "call"}, {"api_name": "server.parse_string", "line_number": 420, "usage_type": "call"}, {"api_name": "server.add_heart_rate_to_database", "line_number": 430, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 442, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 442, "usage_type": "attribute"}, {"api_name": "server.is_tachycardic", "line_number": 450, "usage_type": "call"}, {"api_name": "server.is_tachycardic", "line_number": 460, "usage_type": "call"}, {"api_name": "server.is_tachycardic", "line_number": 470, "usage_type": "call"}, {"api_name": "server.is_tachycardic", "line_number": 480, "usage_type": "call"}, {"api_name": "server.email_sender", "line_number": 487, "usage_type": "call"}, {"api_name": "server.add_patient_to_database", "line_number": 501, "usage_type": "call"}, {"api_name": "server.find_correct_patient", "line_number": 502, "usage_type": "call"}, {"api_name": "server.add_attending_to_database", "line_number": 511, "usage_type": "call"}, {"api_name": "server.find_correct_attending", "line_number": 512, "usage_type": "call"}, {"api_name": "server.process_add_heart_rate", "line_number": 523, "usage_type": "call"}, {"api_name": "server.process_add_heart_rate", "line_number": 528, "usage_type": "call"}, {"api_name": "server.process_add_heart_rate", "line_number": 533, "usage_type": "call"}, {"api_name": "server.process_add_heart_rate", "line_number": 538, "usage_type": "call"}, {"api_name": "server.list_average", "line_number": 548, "usage_type": "call"}, {"api_name": "server.list_average", "line_number": 550, "usage_type": "call"}, {"api_name": "server.list_average", "line_number": 552, "usage_type": "call"}, {"api_name": "server.find_interval_rates", "line_number": 576, "usage_type": "call"}, {"api_name": "server.find_interval_rates", "line_number": 581, "usage_type": "call"}, {"api_name": "server.find_interval_rates", "line_number": 586, "usage_type": "call"}, {"api_name": "server.validate_time_format", "line_number": 597, "usage_type": "call"}, {"api_name": "server.validate_time_format", "line_number": 603, "usage_type": "call"}, {"api_name": "server.validate_time_format", "line_number": 611, "usage_type": "call"}, {"api_name": "server.calculate_interval_average", "line_number": 623, "usage_type": "call"}, {"api_name": "server.calculate_interval_average", "line_number": 630, "usage_type": "call"}, {"api_name": "server.calculate_interval_average", "line_number": 637, "usage_type": "call"}, {"api_name": "server.get_heart_rate_list", "line_number": 646, "usage_type": "call"}]} +{"seq_id": "362409234", "text": "import pytest\nimport pytest_mock\nfrom unittest.mock import MagicMock\nfrom src.marvelous.usecases.get_user import *\n\n\ndef test_is_user_exist_true(mocker: pytest_mock.MockerFixture):\n discord_id = 0\n mocker.patch.object(data_store.users, \"get_by_id\", return_value=mocker.Mock())\n result: bool = is_user_exist(discord_id)\n assert result\n\n\ndef test_is_user_exist_false(mocker: pytest_mock.MockerFixture):\n discord_id = 0\n mocker.patch.object(data_store.users, \"get_by_id\", return_value=None)\n result: bool = is_user_exist(discord_id)\n assert not result\n\n\ndef test_get_user_failed_not_found(mocker: pytest_mock.MockerFixture):\n \"\"\"\n データストアからNoneが返された場合に、結果として「ユーザーがいない」ことを示すオブジェクトが返される\n \"\"\"\n discord_id = 0\n get_func: MagicMock = mocker.patch.object(data_store.users, \"get_by_id\", return_value=None)\n\n with pytest.raises(UserNotFoundError) as e:\n get_user(discord_id=discord_id)\n\n get_func.assert_called_once_with(discord_id)\n assert e.value.user_id == discord_id\n\n\ndef test_get_user_succeed(mocker: pytest_mock.MockerFixture):\n \"\"\"\n データストアからユーザーが返された場合に、結果として成功を表すオブジェクトが返される\n \"\"\"\n discord_id = 0\n user: MagicMock = mocker.Mock()\n get_func: MagicMock = mocker.patch.object(data_store.users, \"get_by_id\", return_value=user)\n\n result = get_user(discord_id)\n\n get_func.assert_called_once_with(discord_id)\n assert result == user, \"返されるユーザーが正しい\"\n", "sub_path": "app/tests/marvelous/usecases/test_get_user.py", "file_name": "test_get_user.py", "file_ext": "py", "file_size_in_byte": 1623, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pytest_mock.MockerFixture", "line_number": 7, "usage_type": "attribute"}, {"api_name": "pytest_mock.MockerFixture", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest_mock.MockerFixture", "line_number": 21, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 26, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 28, "usage_type": "call"}, {"api_name": "pytest_mock.MockerFixture", "line_number": 35, "usage_type": "attribute"}, {"api_name": "unittest.mock.MagicMock", "line_number": 40, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "60757847", "text": "\"\"\"Custom loss for long tail problem.\n\n- Author: Junghoon Kim\n- Email: placidus36@gmail.com\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom torch.optim.lr_scheduler import _LRScheduler\n\n\nclass CustomCriterion:\n \"\"\"Custom Criterion.\"\"\"\n\n def __init__(self, samples_per_cls, device, fp16=False, loss_type=\"softmax\"):\n if not samples_per_cls:\n loss_type = \"softmax\"\n else:\n self.samples_per_cls = samples_per_cls\n self.frequency_per_cls = samples_per_cls / np.sum(samples_per_cls)\n self.no_of_classes = len(samples_per_cls)\n self.device = device\n self.fp16 = fp16\n\n if loss_type == \"softmax\":\n self.criterion = nn.CrossEntropyLoss()\n elif loss_type == \"logit_adjustment_loss\":\n tau = 1.0\n self.logit_adj_val = (\n torch.tensor(tau * np.log(self.frequency_per_cls))\n .float()\n .to(self.device)\n )\n self.logit_adj_val = (\n self.logit_adj_val.half() if fp16 else self.logit_adj_val.float()\n )\n self.logit_adj_val = self.logit_adj_val.to(device)\n self.criterion = self.logit_adjustment_loss\n\n def __call__(self, logits, labels):\n \"\"\"Call criterion.\"\"\"\n return self.criterion(logits, labels)\n\n def logit_adjustment_loss(self, logits, labels):\n \"\"\"Logit adjustment loss.\"\"\"\n logits_adjusted = logits + self.logit_adj_val.repeat(labels.shape[0], 1)\n loss = F.cross_entropy(input=logits_adjusted, target=labels)\n return loss\n\n\nclass CosineAnnealingWarmupRestarts(_LRScheduler):\n \"\"\"\n optimizer (Optimizer): Wrapped optimizer.\n first_cycle_steps (int): First cycle step size.\n cycle_mult(float): Cycle steps magnification. Default: -1.\n max_lr(float): First cycle's max learning rate. Default: 0.1.\n min_lr(float): Min learning rate. Default: 0.001.\n warmup_steps(int): Linear warmup step size. Default: 0.\n gamma(float): Decrease rate of max learning rate by cycle. Default: 1.\n last_epoch (int): The index of last epoch. Default: -1.\n \"\"\"\n\n def __init__(self,\n optimizer: torch.optim.Optimizer,\n first_cycle_steps: int,\n cycle_mult: float = 1.,\n max_lr: float = 0.1,\n min_lr: float = 0.001,\n warmup_steps: int = 0,\n gamma: float = 1.,\n last_epoch: int = -1\n ):\n assert warmup_steps < first_cycle_steps\n\n self.first_cycle_steps = first_cycle_steps # first cycle step size\n self.cycle_mult = cycle_mult # cycle steps magnification\n self.base_max_lr = max_lr # first max learning rate\n self.max_lr = max_lr # max learning rate in the current cycle\n self.min_lr = min_lr # min learning rate\n self.warmup_steps = warmup_steps # warmup step size\n self.gamma = gamma # decrease rate of max learning rate by cycle\n\n self.cur_cycle_steps = first_cycle_steps # first cycle step size\n self.cycle = 0 # cycle count\n self.step_in_cycle = last_epoch # step size of the current cycle\n\n super(CosineAnnealingWarmupRestarts, self).__init__(optimizer, last_epoch)\n\n # set learning rate min_lr\n self.init_lr()\n\n def init_lr(self):\n self.base_lrs = []\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = self.min_lr\n self.base_lrs.append(self.min_lr)\n\n def get_lr(self):\n if self.step_in_cycle == -1:\n return self.base_lrs\n elif self.step_in_cycle < self.warmup_steps:\n return [(self.max_lr - base_lr) * self.step_in_cycle / self.warmup_steps + base_lr for base_lr in\n self.base_lrs]\n else:\n return [base_lr + (self.max_lr - base_lr) \\\n * (1 + math.cos(math.pi * (self.step_in_cycle - self.warmup_steps) \\\n / (self.cur_cycle_steps - self.warmup_steps))) / 2\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n if epoch is None:\n epoch = self.last_epoch + 1\n self.step_in_cycle = self.step_in_cycle + 1\n if self.step_in_cycle >= self.cur_cycle_steps:\n self.cycle += 1\n self.step_in_cycle = self.step_in_cycle - self.cur_cycle_steps\n self.cur_cycle_steps = int(\n (self.cur_cycle_steps - self.warmup_steps) * self.cycle_mult) + self.warmup_steps\n else:\n if epoch >= self.first_cycle_steps:\n if self.cycle_mult == 1.:\n self.step_in_cycle = epoch % self.first_cycle_steps\n self.cycle = epoch // self.first_cycle_steps\n else:\n n = int(math.log((epoch / self.first_cycle_steps * (self.cycle_mult - 1) + 1), self.cycle_mult))\n self.cycle = n\n self.step_in_cycle = epoch - int(\n self.first_cycle_steps * (self.cycle_mult ** n - 1) / (self.cycle_mult - 1))\n self.cur_cycle_steps = self.first_cycle_steps * self.cycle_mult ** (n)\n else:\n self.cur_cycle_steps = self.first_cycle_steps\n self.step_in_cycle = epoch\n\n self.max_lr = self.base_max_lr * (self.gamma ** self.cycle)\n self.last_epoch = math.floor(epoch)\n for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):\n param_group['lr'] = lr", "sub_path": "src/loss.py", "file_name": "loss.py", "file_ext": "py", "file_size_in_byte": 5710, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.sum", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.tensor", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 49, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler._LRScheduler", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.optim", "line_number": 66, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 108, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 108, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 127, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "290698709", "text": "import numpy as np\nfrom keras.datasets import imdb\nfrom keras import models\nfrom keras import layers\nfrom keras import optimizers\n\n# num_words=10000 para quedarnos solo con las 10000 palabras que más aparezcan en las reviews (+ comunes)\n# train_data y test_data son listas de reviews ya pasadas a enteros.\n# train_labels y test_labels son listas de 0s y 1s que se corresponden con la valoración de la película\n# 0 mala, 1 buena\n(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)\n\n# probar train_data[0] y train_labels[0] para ver la forma\n\n# El diccionario que traduce de palabras a enteros\nword_index = imdb.get_word_index()\n# Lo invertimos para hacer la traducción opuesta\nreverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n\n# En este diccionario en concreto hay que meter un offset de 3 porque 0,1 y 2 son índices reservados.\ndecoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[1]])\n\n# probar decoded_review para ver una review tras hacerle la inversión\n\n\ndef vectorize_sequences(sequences, dimension=10000):\n # Creamos una matriz de tamaño (tamaño de sequences, dimension) inicializada con ceros\n results = np.zeros((len(sequences), dimension))\n for i, sequence in enumerate(sequences):\n print(sequence)\n results[i, sequence] = 1. # cambia el indice especificado en results[i] a 1\n return results\n\n\n# Usamos la función de arriba para pasar las reviews a una matriz de unos y ceros procesable para una red neuronal\nx_train = vectorize_sequences(train_data)\nx_test = vectorize_sequences(test_data)\n\n# Pasamos también los resultados de las reviews de una lista a un array\ny_train = np.asarray(train_labels).astype('float32')\ny_test = np.asarray(test_labels, 'float32')\n\n\n# Creamos el modelo vacío y añadimos las capas\nmodel = models.Sequential()\nmodel.add(layers.Dense(16, activation='relu', input_shape=(10000,)))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n# Datos de 'validación' para comprobar la eficacia de la red con nuevos ejemplos\nx_val = x_train[:10000]\npartial_x_train = x_train[10000:]\n\ny_val = y_train[:10000]\npartial_y_train = y_train[10000:]\n\n#history = model.fit(partial_x_train,\n# partial_y_train,\n # epochs=20,\n # batch_size=512,\n # validation_data=(x_val, y_val))\nmodel.fit(partial_x_train,\n partial_y_train,\n epochs=20,\n batch_size=512)\nhistory = model.evaluate(x_val, y_val)\n\n\n\n", "sub_path": "old/imdbExample.py", "file_name": "imdbExample.py", "file_ext": "py", "file_size_in_byte": 2702, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "keras.datasets.imdb.load_data", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.datasets.imdb", "line_number": 11, "usage_type": "name"}, {"api_name": "keras.datasets.imdb.get_word_index", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.datasets.imdb", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 45, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 46, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 47, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 47, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 48, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "438909285", "text": "# from .config import HOME\nimport os\nimport os.path as osp\nimport sys\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport cv2\nimport numpy as np\n\nKAIST_CLASSES = ('person', )\n\n# KAIST_ROOT = osp.join(HOME, 'data/kaist/')\n\ndef detection_collate_KAIST_YOLO(batch):\n \"\"\"Custom collate fn for dealing with batches of images that have a different\n number of associated object annotations (bounding boxes).\n\n Arguments:\n batch: (tuple) A tuple of tensor images and lists of annotations\n\n Return:\n A tuple containing:\n 1) (tensor) batch of images stacked on their 0 dim\n 2) (list of tensors) annotations for a given image are stacked on\n 0 dim\n \"\"\"\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[1])\n targets.append(torch.FloatTensor(sample[2]))\n return torch.stack(imgs, 0), torch.stack(targets,0)\n\n\ndef detection_collate_KAIST_SSD(batch):\n \"\"\"Custom collate fn for dealing with batches of images that have a different\n number of associated object annotations (bounding boxes).\n\n Arguments:\n batch: (tuple) A tuple of tensor images and lists of annotations\n\n Return:\n A tuple containing:\n 1) (tensor) batch of images stacked on their 0 dim\n 2) (list of tensors) annotations for a given image are stacked on\n 0 dim\n \"\"\"\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[1])\n targets.append(torch.FloatTensor(sample[2]))\n return torch.stack(imgs, 0), targets\n\nclass KAISTAnnotationTransform(object):\n \"\"\"Transforms a KAIST annotation into a Tensor of bbox coords and label index\n Initialised with a dictionary lookup of classnames to indexes\n\n Arguments:\n class_to_ind (dict, optional): dictionary lookup of classnames -> indexes\n (default: alphabetic indexing of VOC's 20 classes)\n keep_difficult (bool, optional): keep difficult instances or not\n (default: False)\n height (int): height\n width (int): width\n \"\"\"\n\n def __init__(self, class_to_ind=None, keep_difficult=False, output_format=None):\n self.class_to_ind = class_to_ind or dict(\n zip(KAIST_CLASSES, range(len(KAIST_CLASSES))))\n self.keep_difficult = keep_difficult\n self.output_format=output_format\n\n def __call__(self, target, width, height):\n \"\"\"\n Arguments:\n target (annotation filename) : the target annotation to be made usable\n will be an filename including path\n Returns:\n a list containing lists of bounding boxes[bbox coords, class name]\n \"\"\"\n #print(\"[VPY] KAISTAnnotationTransform called: target: {}, width: {}, height: {}\".format(target, width, height))\n\n if not osp.exists(target):\n print(\"annotation file not found: {}\".format(target))\n sys.exit(-1)\n\n res = []\n raw_details = []\n if self.output_format in {\"SSD\", \"VOC_EVAL\"}:\n with open(target) as f: #open annoatation file and read all lines\n for line in f.readlines():\n if line.startswith(\"person \"): # only one class supported: \"person\"\n line = line.split(\" \")\n line[1] = float(line[1]) #convert coordinates to float: xmin, ymin, width, height\n line[2] = float(line[2])\n line[3] = float(line[3])\n line[4] = float(line[4])\n\n bbox = [(line[1]/width), (line[2]/height), (line[1] + line[3])/width, (line[2] + line[4])/height] # [xmin, ymin, xax, ymax], all coordinates are [0;1] => diveded by witdh or height\n\n label_idx = self.class_to_ind[line[0]] # label index is always 0 as we have only the \"person\" class supported\n bbox.append(label_idx)\n\n res += [bbox]\n raw_details += [line]\n #print(\"bounding box: {}\".format(bbox))\n\n elif self.output_format == \"YOLO\":\n res = np.zeros((50, 5))\n i = 0\n with open(target) as f:\n for line in f.readlines():\n if line.startswith(\"person \"): # only one class supported: \"person\"\n line = line.split(\" \")\n xmin = float(line[1]) / width\n ymin = float(line[2]) / height\n xmax = (float(line[1]) + float(line[3])) / width\n ymax = (float(line[2]) + float(line[4])) / height\n\n res[i, 0] = self.class_to_ind[line[0]]\n res[i, 1] = ((xmax + xmin) / 2)\n res[i, 2] = ((ymax + ymin) / 2)\n res[i, 3] = (xmax - xmin)\n res[i, 4] = (ymax - ymin)\n i += 1\n else:\n print(\"VPY: must chose output format for KAISTAnnotationTransform()\")\n raise NotImplementedError\n\n return res, raw_details #return all annoations: [[x1min, y1min, x1max, y1max, label_idx=0], [...] ]\n\nclass KAISTDetection(data.Dataset):\n #KAIST Detection Dataset Object\n def __init__(self, root,\n image_set='VPY-train-day.txt',\n transform=None, target_transform=None,\n dataset_name='KAIST',\n image_fusion=0,\n corrected_annotations = False):\n \"\"\"\n Kaist Dataset constructor\n :param root: (string): filepath to KAIST root folder. (root folder must contain \"rgbt-ped-detection\" folder)\n :param image_set: (string): imageset to use (eg. microset.txt). This is the name of the file containing the dataset, which must exist in imageSet folder\n :param transform: (callable, optional): transformation to perform on the input image\n :param target_transform: (callable, optional): transformation to perform on the target `annotation` (eg: take in caption string, return tensor of word indices). Default is: KAISTAnnotationTransform\n :param dataset_name: (string, optional): which dataset to load (default: 'KAIST')\n :param image_fusion: (int): type of fusion used: [0: visible] [1: LWIR] [2: inverted LWIR] [...]\n :param corrected_annotations: (bool, default: False) do we want to use corrected annotations ? (not really working yet)\n \"\"\"\n print(\"{}: ImageSet used is : {}\".format(dataset_name, image_set))\n self.root = root\n self.image_set = image_set\n self.transform = transform\n self.target_transform = target_transform\n self.name = dataset_name\n self.corrected_annotations = corrected_annotations\n if target_transform == None:\n print(\"VPY: must add mannually target_transform=KAISTAnnotationTransform(...)\")\n raise NotImplementedError\n\n if self.corrected_annotations:\n self._annopath = osp.join('%s', 'annotations_corrected', '%s', '%s', '%s.txt')\n else:\n self._annopath = osp.join('%s', 'annotations', '%s', '%s', '%s.txt')\n\n self._img_vis_root_path = osp.join('%s', 'images', '%s', '%s', 'visible', '%s.jpg')\n self._img_lwir_root_path = osp.join('%s', 'images', '%s', '%s', 'lwir', '%s.jpg')\n self.image_fusion = image_fusion\n\n self.ids = list()\n\n # open imageSet file and add files which interrest us in the imageList (ids)\n rootpath = osp.join(self.root, 'rgbt-ped-detection/data/kaist-rgbt/')\n for line in open(osp.join(rootpath, 'imageSets', image_set)): # read imageSet file and loop for each entry\n if not line.startswith(\"#\"): # remove comments\n annofile = self._annopath % tuple([rootpath] + line.replace('\\n', '').replace('\\r', '').split('/')) #get annotation file for the current image\n self.ids.append(tuple([rootpath] + line.replace('\\n', '').replace('\\r', '').split('/') + [line.replace('\\n', '').replace('\\r', '')]))\n\n def __getitem__(self, index):\n img_path, im, gt, h, w = self.pull_item(index)\n return img_path, im, gt, h, w\n\n def __len__(self):\n return len(self.ids)\n\n def pull_item(self, index):\n #TODO VPY: Only visible image is loaded (no lwir)\n img_id = self.ids[index]\n\n #TODO VPY parse annotations => target\n target = self._annopath % img_id[0:4]\n\n if self.image_fusion == 0:\n img = self.pull_visible_image(index)\n elif self.image_fusion == 1:\n img = self.pull_raw_lwir_image(index)\n elif self.image_fusion == 2:\n img = self.pull_raw_lwir_image(index)\n img = 255-img\n else:\n print(\"image fusion not handled\")\n sys.exit(-1)\n\n height, width, channels = img.shape\n\n if self.target_transform is not None:\n target, _ = self.target_transform(target, width, height)\n else:\n print(\"You are required to implement the target_transform method to read annotations!\")\n sys.exit(-1)\n\n if self.transform is not None:\n target = np.array(target)\n #print(\"VPY: img_id: {}, target: {}\".format(img_id, target))\n img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])\n # to rgb\n img = img[:, :, (2, 1, 0)]\n # img = img.transpose(2, 0, 1)\n target = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return \"not defined\", torch.from_numpy(img).permute(2, 0, 1), target, height, width\n\n def pull_image(self, index):\n #TODO VPY: Only visible image is loaded (no lwir)\n '''Returns the original image object at index in PIL form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n\n if self.image_fusion == 0:\n img = self.pull_visible_image(index)\n elif self.image_fusion == 1:\n img = self.pull_raw_lwir_image(index)\n elif self.image_fusion == 2:\n img = self.pull_raw_lwir_image(index)\n img = 255-img\n else:\n print(\"image fusion not handled\")\n sys.exit(-1)\n\n return img\n\n def pull_visible_image(self, index):\n '''Returns the original image object at index in PIL form (visible image)\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show (RGB)\n Return:\n PIL img\n '''\n\n img_id = self.ids[index]\n img = cv2.imread(self._img_vis_root_path % img_id[0:4])\n\n # to rgb\n img = img[:, :, (2, 1, 0)]\n # print(\"pull RGB IMG: index: {}, id:{}\".format(index, img_id[3]))\n return img\n\n def pull_raw_lwir_image(self, index):\n '''Returns the original image object at index in PIL form (LWIR image)\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n PIL img\n '''\n\n img_id = self.ids[index]\n # print(\"pull LIR IMG: index: {}, id:{}\".format(index, img_id[3]))\n img = cv2.imread(self._img_lwir_root_path % img_id[0:4])\n return img\n\n # return cv2.imread(self._img_vis_root_path % img_id, cv2.IMREAD_COLOR)\n\n def pull_img_id(self, index):\n img_id = self.ids[index]\n return img_id[4]\n\n def pull_anno(self, index):\n '''Returns the original annotation of image at index\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to get annotation of\n Return:\n list: (img_id, [bbox coords, label_id])\n eg: ('001718', [96, 13, 438, 332, 12])\n '''\n img_id = self.ids[index]\n target = self._annopath % img_id[0:4]\n if self.target_transform is not None:\n gt, raw_details = self.target_transform(target, 1, 1) # TODO VPY OK ??\n else:\n print(\"no target transform function!\")\n sys.exit(-1)\n return img_id[4], gt, raw_details\n\n def pull_tensor(self, index):\n '''Returns the original image at an index in tensor form\n\n Note: not using self.__getitem__(), as any transformations passed in\n could mess up this functionality.\n\n Argument:\n index (int): index of img to show\n Return:\n tensorized version of img, squeezed\n '''\n return torch.Tensor(self.pull_image(index)).unsqueeze_(0)\n\ndef parse_rec_kaist(filename):\n if not os.path.exists(filename):\n print(\"annotation file not found: {}\".format(filename))\n sys.exit(-1)\n\n with open(filename) as f: # open annoatation file and read all lines\n objects = []\n for line in f.readlines():\n if line.startswith(\"person \"): # only one class supported: \"person\"\n\n line = line.split(\" \")\n line[1] = float(line[1]) # convert coordinates to float: xmin, ymin, width, height\n line[2] = float(line[2])\n line[3] = float(line[3])\n line[4] = float(line[4])\n\n bbox = [(line[1]), (line[2]), (line[1] + line[3]), (line[2] + line[4])] # [xmin, ymin, xax, ymax], all coordinates are [0;width or height] => not divided by witdh or height\n\n obj_struct = {}\n obj_struct['name'] = line[0]\n #obj_struct['pose'] = \"n/d\"\n #obj_struct['truncated'] = \"n/d\"\n #obj_struct['difficult'] = \"n/d\"\n obj_struct['bbox'] = [int(line[1]),\n int(line[2]),\n int(line[1]) + int(line[3]),\n int(line[2]) + int(line[4])]\n objects.append(obj_struct)\n return objects\n\ndef compute_KAIST_dataset_mean(dataset_root, image_set):\n print(\"compute images mean\")\n\n images_mean = np.zeros((3), dtype=np.float64) # [0,0,0]\n #\n # # create batch iterator\n dataset_mean = KAISTDetection(root=dataset_root, image_set=image_set, transform=None)\n data_loader_mean = data.DataLoader(dataset_mean, 1, num_workers=1, shuffle=True, pin_memory=True)\n batch_iterator = iter(data_loader_mean)\n i = 0\n for i in range(len(dataset_mean)): # while True:# iteration in range(args.start_iter, cfg['max_iter']):\n # for i in range(100):\n # print(\"Debug: not all data!!!!!\")\n try:\n # load train data\n image, _ = next(batch_iterator)\n images_mean += image[0].permute(1, 2, 0).numpy().mean(axis=(0, 1))\n except StopIteration:\n break\n\n images_mean = images_mean / i\n print(\"image mean is: {}\".format(images_mean))\n\n return images_mean", "sub_path": "data/kaist.py", "file_name": "kaist.py", "file_ext": "py", "file_size_in_byte": 15316, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.FloatTensor", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.FloatTensor", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 134, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 134, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 166, "usage_type": "call"}, {"api_name": "os.path", "line_number": 166, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path", "line_number": 168, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 175, "usage_type": "call"}, {"api_name": "os.path", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 204, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 222, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 246, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 263, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 284, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 328, "usage_type": "call"}, {"api_name": "os.path", "line_number": 328, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 360, "usage_type": "call"}, {"api_name": "numpy.float64", "line_number": 360, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 364, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 364, "usage_type": "name"}]} +{"seq_id": "164917703", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \nimport bs4\nimport requests\nimport pandas as pd\nimport re\nfrom helpers import clean_text, replace_month_with_digit\n\n\ndef gather_new_articles(site):\n request = requests.get(site)\n soup = bs4.BeautifulSoup(request.text, 'lxml')\n\n top_news = set([art['href'] for art in soup.select('.gtm-TopNews-click')])\n all_articles = set([art['href'] for art in soup.find_all('a', href=True) if art['href'].startswith(site)])\n all_articles = all_articles.difference(top_news)\n\n top_news = crawlLinks(top_news)\n top_news['section'] = 'top_news'\n all_articles = crawlLinks(all_articles)\n all_articles['section'] = None\n\n articles = pd.concat([top_news, all_articles])\n\n return articles\n\n\ndef crawlLinks(links):\n articlesContent = pd.DataFrame()\n\n for link in list(links)[0:5]:\n try:\n rq = requests.get(link)\n if rq.status_code == 200:\n page = bs4.BeautifulSoup(rq.text, 'lxml')\n \n articleTitle = page.select('h1')[0].text\n articleSubtitle = page.select('h2.subtitle')[0].text\n\n articleDate = page.select('.article-time')[0].text.split(', oбновена')[0]\n articleDate = clean_text(articleDate)\n month_name = re.search('([а-яА-Я]+)', articleDate)\n month_name = month_name.group(1) if month_name is not None else None\n articleDate = articleDate.replace(month_name, replace_month_with_digit(month_name)) if month_name is not None else articleDate\n articleDate = pd.to_datetime(articleDate, format='%d %m %Y, %H:%M')\n\n category = page.select('div.article-category')[0].a.text\n comments = page.select('.commentsButtonNumber')[0].text\n article_text = ' '.join(\n [clean_text(par.text) for par in page.select('.article-text')[0].select('p')])\n\n # article-tags\n tags = page.select('.article-tags')\n tags = ' - '.join([clean_text(tag.text) for tag in tags[0].select('a')]) if tags is not None else None\n \n articlesContent = articlesContent.append({'link': link,\n 'comments': clean_text(comments),\n 'title': clean_text(articleTitle),\n 'subtitle': clean_text(articleSubtitle),\n 'date': articleDate,\n 'category': category,\n 'tags': tags,\n 'article_text': article_text},\n ignore_index=True)\n except:\n continue\n\n return articlesContent", "sub_path": "news_scrapper/site1.py", "file_name": "site1.py", "file_ext": "py", "file_size_in_byte": 2991, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 12, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 29, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 41, "usage_type": "call"}, {"api_name": "re.search", "line_number": 42, "usage_type": "call"}, {"api_name": "helpers.replace_month_with_digit", "line_number": 44, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 45, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 50, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 54, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 57, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 58, "usage_type": "call"}, {"api_name": "helpers.clean_text", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "346448436", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport time\nimport robomaster as rm\nfrom app.core.controller import *\nfrom app.controller.msg import *\nfrom app.core import vision\nfrom app.constants import *\n\n__all__ = [\"S1Controller\"]\n\n\nclass S1Controller(Controller):\n MAX_HEAT = 150\n HIT_DMG = 15\n FIRE_HEAT = 25\n COOL_HEAT = 15\n MAX_BURN_DMG = 35\n INITIAL_HP = 600\n INITIAL_BAT = 100\n GIMBAL_SPEED_YAW = 100\n GIMBAL_SPEED_PITCH = 100\n\n def __init__(self, name: str, color: str, debug: bool):\n super(S1Controller, self).__init__()\n\n self.name = name\n self.color = color\n self.debug = debug\n self.hp = S1Controller.INITIAL_HP\n self.bat = S1Controller.INITIAL_BAT\n self.aim_method = DEFAULT_AIM_METHOD\n\n self.last_cool_time = time.time()\n self.hit_times, self.last_hit_times = 0, 0\n self.target = (int(SCREEN_SIZE[0] / 2), int(SCREEN_SIZE[1] / 2))\n\n if not debug:\n self.s1 = rm.robot.Robot()\n self.s1.initialize(conn_type='ap', proto_type='udp')\n logging.debug(self.s1.set_robot_mode(mode=rm.robot.GIMBAL_LEAD))\n self.s1.led.set_led(comp=rm.led.COMP_ALL, r={'red': 255, 'blue': 0}[color],\n g=0, b={'red': 0, 'blue': 255}[color], effect=rm.led.EFFECT_ON)\n self.s1.camera.start_video_stream(display=False)\n self.s1.armor.sub_ir_event(callback=self.ir_hit_callback)\n self.s1.battery.sub_battery_info(freq=5, callback=self.battery_callback)\n self.s1.gimbal.sub_angle(callback=self.angle_callback)\n self.s1.armor.set_hit_sensitivity(sensitivity=10)\n\n logging.debug(self)\n\n def get_img(self):\n return self.s1.camera.read_cv2_image()\n\n def act(self, img, msg: Msg2Controller):\n\n if not self.debug:\n self.s1.chassis.drive_speed(x=msg.speed[0], y=msg.speed[1], z=0, timeout=1)\n\n logging.debug(f\"SPD X{msg.speed[0]} Y{msg.speed[1]}\")\n\n if msg.aim_method != self.aim_method:\n self.aim_method = msg.aim_method\n\n if self.aim_method == \"manual\":\n logging.info(\"AUTO AIM OFF\")\n else:\n logging.info(f\"AUTO AIM SWITCHED TO {self.aim_method.upper()}\")\n\n if self.aim_method != \"manual\":\n auto_aim_x, auto_aim_y = vision.feed(img, debug=self.debug, color=self.color, tag=self.aim_method)\n yaw = (auto_aim_x - int(SCREEN_SIZE[0] / 2)) / SCREEN_SIZE[0] * 125\n pitch = (int(SCREEN_SIZE[1] / 2) - auto_aim_y) / SCREEN_SIZE[1] * 20\n self.target = (auto_aim_x, auto_aim_y)\n\n logging.debug(f\"AUTO AIM {auto_aim_x}, {auto_aim_y}\")\n\n else:\n _, _ = vision.feed(img, debug=self.debug, color=self.color)\n yaw = msg.cur_delta[0] / SCREEN_SIZE[0] * 120\n pitch = msg.cur_delta[1] / SCREEN_SIZE[1] * 20\n\n logging.debug(f\"ROT Y{yaw} P{pitch}\")\n\n if not self.debug:\n if self.action_state:\n if 50 > abs(yaw) >= 3 or 10 > abs(pitch) > 3:\n self.gimbal_action = self.s1.gimbal.move(yaw=yaw, pitch=pitch,\n pitch_speed=S1Controller.GIMBAL_SPEED_PITCH,\n yaw_speed=S1Controller.GIMBAL_SPEED_YAW)\n self.action_state = self.gimbal_action.is_completed\n else:\n self.action_state = self.gimbal_action.is_completed\n\n if msg.fire:\n if not self.debug:\n self.s1.blaster.set_led(200)\n self.s1.blaster.fire(rm.blaster.INFRARED_FIRE, 1)\n\n self.heat += S1Controller.FIRE_HEAT\n if self.heat > S1Controller.MAX_HEAT:\n self.__bleed(tag='burn')\n\n logging.info(\"FIRE\")\n\n def hit(self):\n if self.hit_times > self.last_hit_times:\n for _ in range(self.last_hit_times, self.hit_times):\n self.__bleed(tag='hit')\n self.last_hit_times = self.hit_times\n\n def cool(self):\n if 0.95 < time.time() - self.last_cool_time < 1.05:\n self.last_cool_time = time.time()\n self.heat = max(self.heat - S1Controller.COOL_HEAT, 0)\n\n logging.info(\"COOLING DOWN\")\n\n def __bleed(self, tag: str):\n if tag == 'hit':\n self.hp -= S1Controller.HIT_DMG\n if not self.debug:\n self.s1.led.set_led(comp=rm.led.COMP_ALL,\n r={'red': 0, 'blue': 255}[self.color], g=0,\n b={'red': 255, 'blue': 0}[self.color],\n effect={'red': rm.led.EFFECT_OFF, 'blue': rm.led.EFFECT_FLASH}[self.color])\n time.sleep(0.03)\n self.s1.led.set_led(comp=rm.led.COMP_ALL,\n r={'red': 255, 'blue': 0}[self.color], g=0,\n b={'red': 0, 'blue': 255}[self.color], effect=rm.led.EFFECT_ON)\n elif tag == 'burn':\n burn_hp = max(self.heat - S1Controller.MAX_HEAT, S1Controller.MAX_BURN_DMG)\n self.hp -= burn_hp\n\n if self.hp <= 0:\n self.__die()\n\n def __die(self):\n self.hp = 0\n\n if not self.debug:\n self.s1.chassis.drive_speed(x=0, y=0, z=0, timeout=1)\n self.s1.led.set_led(comp=rm.led.COMP_ALL, r={'red': 0, 'blue': 255}[self.color],\n g=0, b={'red': 255, 'blue': 0}[self.color], effect=rm.led.EFFECT_FLASH, freq=1)\n self.s1.camera.stop_video_stream()\n self.s1.close()\n\n logging.info(\"DIE\")\n\n def battery_callback(self, bat):\n\n logging.info(f\"BAT {bat}\")\n\n self.bat = bat\n\n return bat\n\n def ir_hit_callback(self, hit):\n\n logging.info(f\"HIT {hit} TIMES\")\n\n self.hit_times = hit\n\n return hit\n\n @staticmethod\n def angle_callback(x):\n return x\n", "sub_path": "app/controller/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.time", "line_number": 34, "usage_type": "call"}, {"api_name": "robomaster.robot.Robot", "line_number": 39, "usage_type": "call"}, {"api_name": "robomaster.robot", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 41, "usage_type": "call"}, {"api_name": "robomaster.robot", "line_number": 41, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 42, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 43, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 60, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 68, "usage_type": "call"}, {"api_name": "app.core.vision.feed", "line_number": 71, "usage_type": "call"}, {"api_name": "app.core.vision", "line_number": 71, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "app.core.vision.feed", "line_number": 79, "usage_type": "call"}, {"api_name": "app.core.vision", "line_number": 79, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 83, "usage_type": "call"}, {"api_name": "robomaster.blaster", "line_number": 98, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 104, "usage_type": "call"}, {"api_name": "time.time", "line_number": 113, "usage_type": "call"}, {"api_name": "time.time", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 117, "usage_type": "call"}, {"api_name": "robomaster.led", "line_number": 123, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 126, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 127, "usage_type": "call"}, {"api_name": "robomaster.led", "line_number": 128, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 130, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 143, "usage_type": "attribute"}, {"api_name": "robomaster.led", "line_number": 144, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 148, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "568679958", "text": "from typing import Iterable, List, Literal, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom anndata import AnnData\nfrom matplotlib import pyplot as plt\nfrom scipy.sparse import lil_matrix\nfrom tqdm import tqdm\n\n\ndef GWAS(\n data: pd.DataFrame,\n chr_name: list = [\"Chr_liftover\", \"Start_liftover\", \"End_liftover\"],\n return_name: str = \"GWAS\",\n) -> pd.DataFrame:\n\n \"\"\"\\\n Calculate the GWAS result for the peak in the data.\n GWAS data is downloaded from `GWAS Catalog `__.\n\n\n :param data:\n The pd.DataFrame. Should contain either 3 columns [chr,start,end] or 1 column like 'chr8_64645834_64659215'.\n :param chr_name:\n If the data contains either 3 columns [chr,start,end], input the column names as a list: eg ['Chr_liftover', 'Start_liftover', 'End_liftover'].\n If the data contains either 1 column like 'chr8_64645834_64659215', input the column name as a list: eg ['Peak'].\n :param return_name:\n The name of the column for the result.\n\n\n\n :example:\n >>> import pycallingcards as cc\n >>> adata_cc = cc.datasets.mousecortex_data(data=\"CC\")\n >>> result = cc.tl.pair_peak_gene_bulk(adata_cc,\"https://github.com/The-Mitra-Lab/pycallingcards_data/releases/download/data/deseq_MF.csv\")\n >>> GWAS_result = cc.tl.GWAS(result, chr_name = ['Peak'])\n\n\n \"\"\"\n\n GWAS = pd.read_csv(\n \"https://github.com/The-Mitra-Lab/pycallingcards_data/releases/download/data/GWAS.csv\"\n )\n\n result = data.copy()\n if len(chr_name) == 3:\n data = data[chr_name]\n elif len(chr_name) == 1:\n data = data[chr_name].str.split(\"_\", expand=True)\n\n diseases = []\n for binding in range(len(result)):\n if data.iloc[binding, 0] != \"\":\n chrom = data.iloc[binding, 0]\n start = data.iloc[binding, 1]\n end = data.iloc[binding, 2]\n gwasbind = list(\n GWAS[\n (GWAS[\"CHR_ID\"] == str(chrom)[3:])\n & (GWAS[\"CHR_POS\"] >= int(start))\n & (GWAS[\"CHR_POS\"] <= int(end))\n ][\"DISEASE/TRAIT\"].unique()\n )\n\n if gwasbind != []:\n diseases.append([\"; \".join(gwasbind)])\n else:\n diseases.append([\"\"])\n else:\n diseases.append([\"\"])\n\n finalresult = pd.DataFrame(diseases).set_index(data.index)\n finalresult.columns = [return_name]\n return pd.concat([result, finalresult], axis=1)\n\n\ndef GWAS_adata_sc(\n adata: AnnData,\n number: int = 100,\n bindings: list = [\"Chr\", \"Start\", \"End\"],\n clusters: list = None,\n cluster_name: str = \"cluster\",\n) -> pd.DataFrame:\n\n \"\"\"\\\n Plot GWAS results for different cell types for single-cell calling cards data. It considers the relative number of insertions in each cell type.\n GWAS data is downloaded from `GWAS Catalog `__.\n\n\n :param adata:\n The anndata object of scCC data.\n :param number:\n The minimun total number for each SNP.\n :param bindings:\n The name for binding information.\n :param clusters:\n The cluter to consider.\n If None, it will use all the clusters in adata.obs[cluster_name]\n :param cluster_name:\n The name of cluster in adata.obs.\n\n\n\n :example:\n >>> import pycallingcards as cc\n >>> adata_cc = cc.datasets.mousecortex_data(data=\"CC\")\n >>> adata_cc = cc.tl.liftover(adata_cc, bindings = ['Chr_liftover', 'Start_liftover', 'End_liftover'])\n >>> cc.tl.GWAS_adata_sc(adata_cc)\n\n \"\"\"\n\n GWAS = pd.read_csv(\n \"https://github.com/The-Mitra-Lab/pycallingcards_data/releases/download/data/GWAS.csv\"\n )\n\n diseases = list(GWAS[\"DISEASE/TRAIT\"].unique())\n disease_dict = {}\n\n for i in range(len(GWAS[\"DISEASE/TRAIT\"].unique())):\n disease_dict[diseases[i]] = i\n\n bind_dis = lil_matrix(np.zeros((adata.shape[1], len(diseases))))\n data = adata.var[bindings]\n\n for binding in tqdm(range(adata.shape[1])):\n\n chrom = data.iloc[binding, 0]\n start = data.iloc[binding, 1]\n end = data.iloc[binding, 2]\n if str(start) == \"None\" or str(start) == \"nan\" or str(start) == \"\":\n gwasbind = []\n else:\n gwasbind = GWAS[\n (GWAS[\"CHR_ID\"] == str(chrom)[3:])\n & (GWAS[\"CHR_POS\"] >= int(start))\n & (GWAS[\"CHR_POS\"] <= int(end))\n ]\n\n for dis in range(len(gwasbind)):\n bind_dis[binding, disease_dict[gwasbind.iloc[dis, 3]]] += 1\n\n cell_dis = adata.X.dot(bind_dis)\n\n if clusters == None:\n clusters = list(adata.obs[cluster_name].unique())\n\n final_result = []\n for cluster in clusters:\n final_result.append(\n cell_dis[adata.obs[cluster_name] == cluster].sum(axis=0).tolist()[0]\n )\n\n final_result = np.array(final_result).T\n total = final_result.sum(axis=1)\n diseases = np.array(diseases)\n diseases = diseases[total > number]\n final_result = final_result[total > number]\n final_result = pd.DataFrame(final_result, index=diseases, columns=clusters)\n\n return final_result\n\n\ndef GWAS_adata_bulk(\n adata: AnnData, number: int = 100, bindings: list = [\"Chr\", \"Start\", \"End\"]\n) -> pd.DataFrame:\n\n \"\"\"\\\n Plot GWAS results for different cell types for bulk calling cards data. It considers the relative number of insertions in each group.\n GWAS data is downloaded from `GWAS Catalog `__.\n\n\n :param adata:\n The anndata object of bulk CC data.\n :param number: Default is `100`.\n The minimun total number for each SNP.\n :param bindings: Default is ['Chr', 'Start', 'End'].\n The name for binding information.\n\n :example:\n >>> import pycallingcards as cc\n >>> adata_cc = cc.datasets.mouse_brd4_data(data=\"CC\")\n >>> adata_cc = cc.tl.liftover(adata_cc)\n >>> cc.tl.GWAS_adata_bulk(adata_cc, bindings = ['Chr_liftover', 'Start_liftover', 'End_liftover'])\n\n \"\"\"\n\n GWAS = pd.read_csv(\n \"https://github.com/The-Mitra-Lab/pycallingcards_data/releases/download/data/GWAS.csv\"\n )\n\n diseases = list(GWAS[\"DISEASE/TRAIT\"].unique())\n disease_dict = {}\n\n for i in range(len(GWAS[\"DISEASE/TRAIT\"].unique())):\n disease_dict[diseases[i]] = i\n\n bind_dis = lil_matrix(np.zeros((adata.shape[1], len(diseases))))\n data = adata.var[bindings]\n\n for binding in tqdm(range(adata.shape[1])):\n\n chrom = data.iloc[binding, 0]\n start = data.iloc[binding, 1]\n end = data.iloc[binding, 2]\n\n if str(start) == \"None\" or str(start) == \"nan\" or str(start) == \"\":\n gwasbind = []\n else:\n gwasbind = GWAS[\n (GWAS[\"CHR_ID\"] == str(chrom)[3:])\n & (GWAS[\"CHR_POS\"] >= int(start))\n & (GWAS[\"CHR_POS\"] <= int(end))\n ]\n\n for dis in range(len(gwasbind)):\n bind_dis[binding, disease_dict[gwasbind.iloc[dis, 3]]] += 1\n\n sample_dis = adata.X.dot(bind_dis)\n\n final_result = np.array(sample_dis.todense().T)\n total = final_result.sum(axis=1).T\n diseases = np.array(diseases)\n diseases = diseases[total > number]\n final_result = final_result[total > number]\n\n names = list(adata.obs.index)\n\n final_result = pd.DataFrame(final_result, index=diseases, columns=names)\n\n return final_result\n", "sub_path": "pycallingcards/tools/_GWAS.py", "file_name": "_GWAS.py", "file_ext": "py", "file_size_in_byte": 7489, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.DataFrame", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 73, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 75, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "attribute"}, {"api_name": "anndata.AnnData", "line_number": 79, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 113, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 123, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 156, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 159, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 84, "usage_type": "attribute"}, {"api_name": "anndata.AnnData", "line_number": 165, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 188, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 198, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 223, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 229, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 166, "usage_type": "attribute"}]} +{"seq_id": "518767452", "text": "# Copyright (c) Microsoft Corporation. All rights reserved.S\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\nimport os\nimport sys\nimport logging # noqa: F401\nimport json\nimport time\nfrom paho_client import PahoClient\n\n\"\"\"\nUncomment the following lines to enable debug logging\n\"\"\"\n\n# logging.basicConfig(level=logging.INFO)\n# logging.getLogger(\"paho\").setLevel(level=logging.DEBUG)\n\ntopic_filter = \"sample/#\"\n\n\n##################################\n# CREATE CLIENT\n##################################\n\nclient = PahoClient.create_from_x509_certificate(\n host_name=os.environ[\"IOTHUB_HOST_NAME\"],\n device_id=os.environ[\"SUB_DEVICE_ID\"],\n certificate_filename=os.environ[\"SUB_CERT_PATH\"],\n key_filename=os.environ[\"SUB_KEY_PATH\"],\n clean_session=True,\n)\n\n##################################\n# CONNECT\n##################################\n\nprint(\"Connecting to {}\".format(client.auth.device_id))\nclient.start_connect()\nif not client.connection_status.wait_for_connected(timeout=20):\n sys.exit(1)\nprint(\"Connected\")\n\n##################################\n# SUBSCRIBE\n##################################\n\nqos = 1\nprint(\"Subscribing to {} at qos {}\".format(topic_filter, qos))\n(rc, mid) = client.subscribe(topic_filter, qos)\n\nack_result = client.incoming_subacks.wait_for_ack(mid, timeout=20)\nif not ack_result:\n print(\"SUBACK not received within 20 seconds\")\n client.disconnect()\n client.connection_status.wait_for_disconnected()\n sys.exit(1)\nelif ack_result[0] == -1:\n print(\"Subscription was rejected\")\n client.disconnect()\n client.connection_status.wait_for_disconnected()\n sys.exit(1)\nelse:\n print(\"Subscription was granted with qos {}\".format(ack_result[0]))\n\n##################################\n# LISTEN\n##################################\n\ntime_to_listen_in_seconds = 600\nend_time = time.time() + time_to_listen_in_seconds\n\nwhile time.time() <= end_time:\n remaining_time = end_time - time.time()\n print(\"Waiting for messages for {} more seconds\".format(remaining_time))\n\n message = client.incoming_messages.pop_next_message(timeout=remaining_time)\n if message:\n print(\"Message received on topic {}\".format(message.topic))\n payload_object = json.loads(message.payload)\n print(\"Payload: {}\".format(payload_object))\n\n##################################\n# DISCONNECT\n##################################\n\nprint(\"Disconnecting\")\nclient.disconnect()\nclient.connection_status.wait_for_disconnected()\n", "sub_path": "Scenario6-x509-authentication/python/subscribe_x509.py", "file_name": "subscribe_x509.py", "file_ext": "py", "file_size_in_byte": 2505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "paho_client.PahoClient.create_from_x509_certificate", "line_number": 25, "usage_type": "call"}, {"api_name": "paho_client.PahoClient", "line_number": 25, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 56, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 72, "usage_type": "call"}, {"api_name": "time.time", "line_number": 73, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "109420293", "text": "\"\"\"Tests for Coherence node\"\"\"\nimport pytest\nfrom cognigraph.nodes.processors import Coherence\nfrom cognigraph.nodes.sources import FileSource\n\nfrom cognigraph.nodes.tests.prepare_tests_data import info, data_path # noqa\nimport numpy as np\n\n\n@pytest.fixture # noqa\ndef coh_computer(info, data_path): # noqa\n coh_computer = Coherence()\n coh_computer.mne_info = info\n N_SEN = len(info['ch_names'])\n coh_computer.input = np.random.rand(N_SEN)\n parent = FileSource(data_path)\n parent.output = np.random.rand(info['nchan'], 1)\n parent.mne_info = info\n coh_computer.parent = parent\n return coh_computer\n\n\ndef test_change_api_attributes(coh_computer):\n \"\"\"Check if _on_critical_attr_change is defined\"\"\"\n coh_computer._on_critical_attr_change(None, None, None)\n", "sub_path": "cognigraph/nodes/tests/test_Coherence.py", "file_name": "test_Coherence.py", "file_ext": "py", "file_size_in_byte": 790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "cognigraph.nodes.processors.Coherence", "line_number": 12, "usage_type": "call"}, {"api_name": "cognigraph.nodes.tests.prepare_tests_data.info", "line_number": 13, "usage_type": "name"}, {"api_name": "cognigraph.nodes.tests.prepare_tests_data.info", "line_number": 14, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cognigraph.nodes.sources.FileSource", "line_number": 16, "usage_type": "call"}, {"api_name": "cognigraph.nodes.tests.prepare_tests_data.data_path", "line_number": 16, "usage_type": "argument"}, {"api_name": "numpy.random.rand", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cognigraph.nodes.tests.prepare_tests_data.info", "line_number": 17, "usage_type": "name"}, {"api_name": "cognigraph.nodes.tests.prepare_tests_data.info", "line_number": 18, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "70254172", "text": "\n# coding: utf-8\n\n# # Follow-On Classifier: Error Analysis \n# \n# Outline of the notebook\n# \n# - Feature scaling, selection & baseline logistic regresssion\n# - Test on Tony's paper [D11-1142](http://s2.allenai.org/#/paper/D11-1142/papers)\n# - Analysis of a pair of false negative & false positive\n# \n# ## Feature scaling, selection & baseline logistic regression\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\nget_ipython().magic('matplotlib inline')\nimport numpy as np\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\n\nclf = Pipeline([\n ('feature_scaling', preprocessing.StandardScaler()),\n ('feature_selection', LogisticRegression(penalty = \"l1\")),\n ('classification', LogisticRegression(class_weight = 'auto'))\n])\n\ndf = pd.read_csv(\"data/featurized.tsv\", index_col = 0, sep = \"\\t\")\nnpdata = df.values\nX, y = npdata[:,1:].astype(float), npdata[:,0].astype(float)\nclf.fit(X, y)\n\n\n# ___\n# ## Test on Tony's paper\n\n# In[2]:\n\ndf_test = pd.read_csv(\"data/D11-1142.features.tsv\", index_col = 0, sep = \"\\t\")\nclf.predict(df_test.values.astype(float))\n\n\n# It looks like Tony's paper's citations are classified as incidental. This is different from the current RBF SVM classifier that has [4 important citations](http://s2.allenai.org/paper/D11-1142/papers). \n\n# ___\n# ## Analysis of false negatives and false positives\n\n# First let's look at the first false negative\n\n# In[3]:\n\npos = df[df.label == 1]\npos_predict = clf.predict(pos.values[:,1:].astype(float))\nfalse_negatives = pos[pos_predict == 0]\nfirst_false_negative = false_negatives.iloc[[0],:]\nprint(\"The two paper ids are:\", first_false_negative.index[0])\nprint(\"Below are the features for this pair:\")\nfirst_false_negative.iloc[0]\n\n\n# In[4]:\n\nclf.predict_proba(first_false_negative.values[:,1:].astype(float))\n\n\n# The model predicts this to be important with only 0.192 probability. The main feature is `total = 1`, which is why the model classifies it as negative. Upon closer inspection of the [citing paper P01-1006](http://aclweb.org/anthology/P/P01/P01-1006.pdf), it looks like the citation is incidental as the parser in [A97-1001](http://aclweb.org/anthology/A/A97/A97-1011.pdf) is used as a tool. So the label (1 = important) was incorrect. \n# \n# Next, let's look at the first false positive.\n\n# In[5]:\n\nneg = df[df.label == 0]\nneg_predict = clf.predict(neg.values[:,1:].astype(float))\nfalse_positives = neg[neg_predict == 1]\nfirst_false_positive = false_positives.iloc[[0],:]\nprint(\"The two paper ids are:\", first_false_positive.index[0])\nprint(\"Belowe are the features for this pair:\")\nfirst_false_positive.iloc[0]\n\n\n# In[6]:\n\nclf.predict_proba(first_false_positive.values[:,1:].astype(float))\n\n\n# There are two citation occurrences (`total = 2`). Also, `countsInSection_method = 2` as well, which explains the score of 0.65. Upon closer inspection, the first citation occurence is part of a list of four citations, whereas the second citation occurence is actually in related work section which should have negative weight.\n# \n# So for this case, a feature that considers citation list length (inversely weighted) and a better `countsInSection_related_work` feature extractor should help. \n\n# In[ ]:\n\n\n\n", "sub_path": "offline/experiments/citationClassifier/reports/Main-VuH-2-CR.py", "file_name": "Main-VuH-2-CR.py", "file_ext": "py", "file_size_in_byte": 3265, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sklearn.pipeline.Pipeline", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 25, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 25, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "540691630", "text": "import io\nimport datetime\nimport requests\nimport xml.dom.minidom\n\nfrom django.db.models import Count, Q\nfrom django.conf import settings\nfrom django.db.models.signals import pre_save, post_save\nfrom django.urls import reverse\nfrom xlsxwriter import Workbook\nfrom tqdm import tqdm\n\nfrom KlimaKar.email import mail_managers\nfrom KlimaKar.templatetags.slugify import slugify\nfrom apps.settings.models import InvoiceDownloadSettings\nfrom apps.warehouse.models import Invoice, Ware\nfrom apps.audit.functions import post_save_handler as audit_post, pre_save_handler as audit_pre\nfrom apps.search.utils import post_save_handler as search_post\n\n\ndef generate_ware_inventory(queryset):\n output = io.BytesIO()\n workbook = Workbook(output, {\"in_memory\": True})\n worksheet = workbook.add_worksheet()\n\n bold = workbook.add_format({\"bold\": True})\n bold_money = workbook.add_format({\"num_format\": \"#,##0.00 zł\", \"bold\": True})\n border = workbook.add_format({\"border\": 1})\n bold_border = workbook.add_format({\"border\": 1, \"bold\": True})\n border_money = workbook.add_format({\"border\": 1, \"num_format\": \"#,##0.00 zł\"})\n columns = [(\"Lp.\", 5), (\"Nr kat.\", 35), (\"Nazwa\", 35), (\"szt.\", 5), (\"cena\", 15), (\"wartość\", 15)]\n\n today = datetime.date.today()\n worksheet.write(0, 1, \"INWENTARYZACJA NA DZIEŃ {}\".format(today.strftime(\"%d.%m.%Y\")))\n\n row = 2\n col = 0\n\n for column in columns:\n worksheet.write(row, col, column[0], bold_border)\n worksheet.set_column(col, col, column[1])\n col += 1\n\n row = 3\n col = 0\n\n for ware in queryset.order_by(\"index\"):\n worksheet.write(row, col, row - 2, border)\n worksheet.write(row, col + 1, ware.index, border)\n worksheet.write(row, col + 2, ware.name, border)\n worksheet.write(row, col + 3, ware.stock, border)\n worksheet.write(row, col + 4, ware.last_price, border_money)\n if ware.last_price:\n worksheet.write(row, col + 5, ware.stock * ware.last_price, border_money)\n else:\n worksheet.write(row, col + 5, \"\", border_money)\n row += 1\n\n worksheet.write(row + 1, 4, \"SUMA\", bold)\n worksheet.write(row + 1, 5, \"=SUM(F4:F{})\".format(row), bold_money)\n\n worksheet.write(row + 3, 1, \"Remanent zakończono na pozycji {}.\".format(row - 3))\n worksheet.write(row + 4, 1, \"Wartość słownie: \")\n\n workbook.close()\n output.seek(0)\n return output\n\n\ndef get_inter_cars_barcodes():\n pre_save.disconnect(audit_pre, sender=Invoice)\n post_save.disconnect(audit_post, sender=Invoice)\n post_save.disconnect(search_post, sender=Invoice)\n pre_save.disconnect(audit_pre, sender=Ware)\n post_save.disconnect(audit_post, sender=Ware)\n post_save.disconnect(search_post, sender=Ware)\n\n config = InvoiceDownloadSettings.load()\n invoices = (\n Invoice.objects.filter(supplier=config.INTER_CARS_SUPPLIER)\n .annotate(item_count=Count(\"invoiceitem\"))\n .order_by(\"-item_count\")\n )\n checked_wares = []\n\n url = settings.IC_API_URL + \"GetInvoices\"\n url_detail = settings.IC_API_URL + \"GetInvoice\"\n headers = {\"kh_kod\": config.INTER_CARS_CLIENT_NUMBER, \"token\": config.INTER_CARS_TOKEN}\n\n for invoice in tqdm(invoices):\n wares = Ware.objects.filter(invoiceitem__invoice=invoice, barcode=\"\").exclude(pk__in=checked_wares).distinct()\n if not wares:\n continue\n\n if not invoice.remote_id:\n r = requests.get(\n url,\n params={\"from\": invoice.date.strftime(\"%Y%m%d\"), \"to\": invoice.date.strftime(\"%Y%m%d\")},\n headers=headers,\n )\n if r.status_code != 200:\n print(f\"Invoice {invoice} failed fetching\")\n continue\n DOMTree = xml.dom.minidom.parseString(r.text)\n collection = DOMTree.documentElement\n xml_invoices = collection.getElementsByTagName(\"nag\")\n xml_invoice = [i for i in xml_invoices if getData(i, \"numer\") == invoice.number]\n if not xml_invoice:\n continue\n xml_invoice = xml_invoice[0]\n if not invoice.remote_id:\n invoice.remote_id = getData(xml_invoice, \"id\")\n invoice.save()\n\n r = requests.get(url_detail, params={\"id\": invoice.remote_id}, headers=headers)\n if r.status_code != 200:\n print(f\"Invoice {invoice} failed fetching\")\n continue\n\n DOMTree = xml.dom.minidom.parseString(r.text)\n collection = DOMTree.documentElement\n xml_wares = collection.getElementsByTagName(\"poz\")\n for xml_ware in xml_wares:\n barcode_list = getData(xml_ware, \"kod_kre\")\n if not barcode_list:\n continue\n ean_list = [\n code.strip() for code in barcode_list.split(\",\") if len(code.strip()) == 13 and code.strip().isdigit()\n ]\n barcode = ean_list[0] if ean_list else \"\"\n if not barcode:\n continue\n try:\n ware = wares.get(\n Q(index=getData(xml_ware, \"indeks\")) | Q(index_slug=Ware.slugify(getData(xml_ware, \"indeks\")))\n )\n checked_wares.append(ware.pk)\n ware.barcode = barcode\n ware.save()\n except Ware.DoesNotExist:\n continue\n except Ware.MultipleObjectsReturned:\n print(f\"Multiple wares with same index: {getData(xml_ware, 'indeks')}\")\n\n pre_save.connect(audit_pre, sender=Invoice)\n post_save.connect(audit_post, sender=Invoice)\n post_save.connect(search_post, sender=Invoice)\n pre_save.connect(audit_pre, sender=Ware)\n post_save.connect(audit_post, sender=Ware)\n post_save.connect(search_post, sender=Ware)\n\n\ndef getData(node, tag):\n if node.getElementsByTagName(tag)[0].childNodes != []:\n return node.getElementsByTagName(tag)[0].childNodes[0].nodeValue\n else:\n return None\n\n\ndef check_total_price(invoice, total):\n if invoice.total_value == total:\n return True\n if invoice.total_value == -total:\n for item in invoice.invoiceitem_set.all():\n item.quantity = -item.quantity\n item.save()\n return True\n mail_managers(\n f\"Błąd w fakturze {invoice.supplier.name}\",\n \"Kwota całkowita faktury nie zgadza się z cenami pozycji. Proszę o sprawdzenie.\\n\\n{}{}\".format(\n settings.ABSOLUTE_URL,\n reverse(\"warehouse:invoice_detail\", kwargs={\"pk\": invoice.pk, \"slug\": slugify(invoice)}),\n ),\n )\n", "sub_path": "apps/warehouse/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 6611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "io.BytesIO", "line_number": 22, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.db.models.signals.pre_save.disconnect", "line_number": 71, "usage_type": "call"}, {"api_name": "apps.audit.functions.pre_save_handler", "line_number": 71, "usage_type": "argument"}, {"api_name": "django.db.models.signals.pre_save", "line_number": 71, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.disconnect", "line_number": 72, "usage_type": "call"}, {"api_name": "apps.audit.functions.post_save_handler", "line_number": 72, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 72, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.disconnect", "line_number": 73, "usage_type": "call"}, {"api_name": "apps.search.utils.post_save_handler", "line_number": 73, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 73, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.signals.pre_save.disconnect", "line_number": 74, "usage_type": "call"}, {"api_name": "apps.audit.functions.pre_save_handler", "line_number": 74, "usage_type": "argument"}, {"api_name": "django.db.models.signals.pre_save", "line_number": 74, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.disconnect", "line_number": 75, "usage_type": "call"}, {"api_name": "apps.audit.functions.post_save_handler", "line_number": 75, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 75, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.disconnect", "line_number": 76, "usage_type": "call"}, {"api_name": "apps.search.utils.post_save_handler", "line_number": 76, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 76, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 76, "usage_type": "name"}, {"api_name": "apps.settings.models.InvoiceDownloadSettings.load", "line_number": 78, "usage_type": "call"}, {"api_name": "apps.settings.models.InvoiceDownloadSettings", "line_number": 78, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "apps.warehouse.models.Invoice.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 80, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 81, "usage_type": "call"}, {"api_name": "django.conf.settings.IC_API_URL", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 86, "usage_type": "name"}, {"api_name": "django.conf.settings.IC_API_URL", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 90, "usage_type": "call"}, {"api_name": "apps.warehouse.models.Ware.objects.filter", "line_number": 91, "usage_type": "call"}, {"api_name": "apps.warehouse.models.Ware.objects", "line_number": 91, "usage_type": "attribute"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 91, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 96, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom.minidom.parseString", "line_number": 104, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom", "line_number": 104, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom", "line_number": 104, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 115, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom.minidom.parseString", "line_number": 120, "usage_type": "call"}, {"api_name": "xml.dom.minidom.dom", "line_number": 120, "usage_type": "attribute"}, {"api_name": "xml.dom.minidom", "line_number": 120, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 135, "usage_type": "call"}, {"api_name": "apps.warehouse.models.Ware.slugify", "line_number": 135, "usage_type": "call"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 135, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware.DoesNotExist", "line_number": 140, "usage_type": "attribute"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 140, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware.MultipleObjectsReturned", "line_number": 142, "usage_type": "attribute"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.signals.pre_save.connect", "line_number": 145, "usage_type": "call"}, {"api_name": "apps.audit.functions.pre_save_handler", "line_number": 145, "usage_type": "argument"}, {"api_name": "django.db.models.signals.pre_save", "line_number": 145, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 145, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 146, "usage_type": "call"}, {"api_name": "apps.audit.functions.post_save_handler", "line_number": 146, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 146, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 146, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 147, "usage_type": "call"}, {"api_name": "apps.search.utils.post_save_handler", "line_number": 147, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 147, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Invoice", "line_number": 147, "usage_type": "name"}, {"api_name": "django.db.models.signals.pre_save.connect", "line_number": 148, "usage_type": "call"}, {"api_name": "apps.audit.functions.pre_save_handler", "line_number": 148, "usage_type": "argument"}, {"api_name": "django.db.models.signals.pre_save", "line_number": 148, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 148, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 149, "usage_type": "call"}, {"api_name": "apps.audit.functions.post_save_handler", "line_number": 149, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 149, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 149, "usage_type": "name"}, {"api_name": "django.db.models.signals.post_save.connect", "line_number": 150, "usage_type": "call"}, {"api_name": "apps.search.utils.post_save_handler", "line_number": 150, "usage_type": "argument"}, {"api_name": "django.db.models.signals.post_save", "line_number": 150, "usage_type": "name"}, {"api_name": "apps.warehouse.models.Ware", "line_number": 150, "usage_type": "name"}, {"api_name": "KlimaKar.email.mail_managers", "line_number": 168, "usage_type": "call"}, {"api_name": "django.conf.settings.ABSOLUTE_URL", "line_number": 171, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 171, "usage_type": "name"}, {"api_name": "django.urls.reverse", "line_number": 172, "usage_type": "call"}, {"api_name": "KlimaKar.templatetags.slugify.slugify", "line_number": 172, "usage_type": "call"}]} +{"seq_id": "292796078", "text": "## Import useful packages\nimport matplotlib\nmatplotlib.use('Agg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_extraction.image import extract_patches_2d\nfrom scipy.ndimage import gaussian_filter\nimport graphlearning as gl\nfrom scipy import sparse\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA as sklearn_pca\nimport urllib.request\nimport scipy.io\n\n## Function for Non-Local Means Method\ndef NonLocalMeans(image, d):\n \n ## Pad the image with mirror reflections of itself with a width d\n pad = (d,d)\n padimage = np.pad(image,(pad,pad,(0,0)),mode='reflect') #(top,bottom),(left,right),(0,0)\n\n ## For the ith pixel, make a (2d + 1) by (2d + 1) patch centered at pixel i\n patches = extract_patches_2d(padimage, tuple((2*d+1,2*d+1)))\n\n ## For the jth, (j = 1; 2; 3) band, apply a Gaussian kernel on this patch\n u = np.zeros((2*d+1,2*d+1))\n u[d,d] = 1\n G = gaussian_filter(u,d/2,mode='constant',cval=0)\n patches = patches * G[np.newaxis,:,:,np.newaxis]\n\n ## Form the feature matrix F by letting each row of F be a feature vector of a pixel\n F = patches.reshape((patches.shape[0],patches.shape[1]*patches.shape[2]*patches.shape[3]))\n print(\"feature vector shape: \", F.shape)\n\n return F\n\n#Function to perform PCA on image\ndef PCA(image, component=False):\n\n #Perform PCA with 60 components\n pca = sklearn_pca(n_components=60)\n X = image.reshape(image.shape[0]*image.shape[1],image.shape[2])\n pca.fit(X)\n variance = 100*(pca.explained_variance_ratio_)\n\n #Get the number of components with variance greater than 0.005%\n num_components = len(variance[variance>5e-3])\n\n #Perform PCA with the new number of components\n pca = sklearn_pca(n_components=num_components)\n pca_image = pca.fit_transform(X)\n print(\"Total Variation (%d components): \"%num_components, np.sum(pca.explained_variance_ratio_))\n pca_image = pca_image.reshape(image.shape[0], image.shape[1], num_components)\n print(\"pca image shape: \", pca_image.shape)\n \n if component==True:\n return pca_image, num_components\n return pca_image\n\n## Function to generate the weight matrix\ndef WeightMatrix(data):\n boolean = False\n k=1\n I,J,D = gl.knnsearch_annoy(data, 50, similarity='angular')\n D[D==0] = 1e-150\n\n while boolean==False:\n k+=1\n W = gl.weight_matrix(I,J,D,k)\n boolean = gl.isconnected(W)\n print(\"k=%d\"%k, \"connected=%s\"%boolean)\n\n k=2*k\n W = gl.weight_matrix(I,J,D,k)\n print(\"k=%d\"%k, \"connected=%s\"%boolean)\n print(\"weight matrix shape: \", W.shape)\n \n return W\n\n## Function to perform spectral clustering and plot the image segmentation\ndef spectral_clustering(W, image, labels_rescaled, method, plot=True):\n\n ## Spectral Embedding\n num_classes = len(np.unique(labels_rescaled))\n L = gl.graph_laplacian(W, norm='normalized')\n vals, vec = sparse.linalg.eigs(L,k=num_classes+1,which='SM')\n vals = vals[1:].real\n vec = vec[:,1:].real \n norm = np.sum(vec**2,axis=1)\n Y = vec/norm[:,np.newaxis]**.5 \n\n ## K-means\n kmeans = KMeans(n_clusters=num_classes).fit(Y)\n labels_cluster = kmeans.labels_\n\n if plot==True:\n\n ## Plot image segmentation\n image_seg = np.reshape(labels_cluster,image.shape[0:2])\n plt.figure()\n plt.title('Spectral Clustering Segmentation')\n plt.imshow(image_seg)\n plt.savefig('Spectral_Clustering_%d.png'%method)\n plt.show()\n\n # ## Plot eigenvectors\n # for i in range(num_classes):\n # eigen_vec = vec[:,i].reshape((image.shape[0],image.shape[1]))\n # plt.figure()\n # plt.title('eigenvector %s'%i)\n # plt.imshow(eigen_vec, cmap='gray')\n\n ## Accuracy score\n acc = gl.clustering_accuracy(labels_cluster, labels_rescaled)\n print(\"Spectral clustering accuracy: %.2f%%\"%acc)\n\n return acc\n\n## Function to perform laplace and poisson learning and plot the image segmentation\ndef laplace_poisson(W, train_ind, image, labels, method, plot=True):\n #Laplace Learning\n labels_laplace = gl.graph_ssl(W, train_ind, labels[train_ind], algorithm='laplace')\n laplace_acc = gl.accuracy(labels, labels_laplace, len(train_ind))\n\n if plot==True:\n\n ## Plot laplace segmentation\n laplace_seg = np.reshape(labels_laplace,image.shape[0:2])\n plt.figure()\n plt.title('Laplace Segmentation')\n plt.imshow(laplace_seg)\n plt.savefig('Laplace_Learning_%d.png'%method)\n plt.show()\n print('Laplace learning accuracy: %.2f%%'%laplace_acc)\n\n #Poisson Learning\n labels_poisson = gl.graph_ssl(W, train_ind, labels[train_ind], algorithm='poisson')\n poisson_acc = gl.accuracy(labels, labels_poisson, len(train_ind))\n\n if plot==True:\n\n ## Plot poisson segmentation\n poisson_seg = np.reshape(labels_poisson,image.shape[0:2])\n plt.figure()\n plt.title('Poisson Segmentation')\n plt.imshow(poisson_seg)\n plt.savefig('Poisson_Learning_%d.png'%method)\n plt.show()\n print('Poisson learning accuracy: %.2f%%'%poisson_acc)\n\n return laplace_acc, poisson_acc\n\n## Import the hyperspectral dataset\nurllib.request.urlretrieve('http://www.ehu.eus/ccwintco/uploads/f/f1/Salinas.mat', 'salinas.mat')\ndata = scipy.io.loadmat('salinas.mat')\nimage = data['salinas']\nnum_pixels = image.shape[0]*image.shape[1]\nprint(image.shape)\n\n## Display a random sample band of the image\nrand_slice = np.random.randint(image.shape[2])\nplt.title('salinas image (band: %d)'%rand_slice)\nplt.imshow(image[:,:,rand_slice])\nplt.savefig('Salinas_random_band.png')\nplt.show()\n\n## Import the ground truth labels\nurllib.request.urlretrieve('http://www.ehu.eus/ccwintco/uploads/f/fa/Salinas_gt.mat', 'salinas_gt.mat')\ndata = scipy.io.loadmat('salinas_gt.mat')\ngt = data['salinas_gt']\nlabels = gt.flatten().astype(int)\nplt.title('ground truth labels')\nplt.imshow(gt)\nplt.savefig('Ground_Truth.png')\nplt.show()\n\n\"\"\"#Comparing different preprocessing methods\"\"\"\n\n#Initialize training points\nnp.random.seed(0)\nnum_train_per_class = int(0.10*num_pixels/len(np.unique(labels)))\ntrain_ind = gl.randomize_labels(labels,num_train_per_class)\nprint(\"%.3f%% training data\"%(100*len(train_ind)/num_pixels))\n\n#Array to store accuracy scores\nspectral = np.ones(5)\nlaplace = np.ones(5)\npoisson = np.ones(5)\n\n\"\"\"1. Raw Image\"\"\"\n\n#Construct Weight Matrix \nraw_image = image.reshape(num_pixels, image.shape[2])\nW = WeightMatrix(raw_image)\n\n#Perform Spectral Clustering to segment the image\nspectral[0] = spectral_clustering(W, image, labels, 1)\n\n#Perform Laplace and Poisson Learning to segment the image\nlaplace[0], poisson[0] = laplace_poisson(W, train_ind, image, labels, 1)\n\n\"\"\"2. Raw NonLocalMeans\"\"\"\n\n#Perform NonLocalMeans and Construct Weight Matrix\nF = NonLocalMeans(image, 2)\nW = WeightMatrix(F)\n\n#Perform Spectral Clustering to segment the image\nspectral[1] = spectral_clustering(W, image, labels, 2)\n\n#Perform Laplace and Poisson Learning to segment the image\nlaplace[1], poisson[1] = laplace_poisson(W, train_ind, image, labels, 2)\n\n\"\"\"3. Raw PCA\"\"\"\n\n#Perform PCA and Construct Weight Matrix\npca_image, raw_pca_components = PCA(image, component=True)\nW = WeightMatrix(pca_image.reshape(num_pixels, pca_image.shape[2]))\n\n#Perform Spectral Clustering to segment the image\nspectral[2] = spectral_clustering(W, image, labels, 3)\n\n#Perform Laplace and Poisson Learning to segment the image\nlaplace[2], poisson[2] = laplace_poisson(W, train_ind, image, labels, 3)\n\n\"\"\"4. NonLocalMeans-PCA\"\"\"\n\n#Perform NonLocalMeans then PCA and Construct Weight Matrix\nfeat_vec = NonLocalMeans(image, 2)\nfeat_vec = feat_vec.reshape(image.shape[0], image.shape[1], feat_vec.shape[1])\npca_image = PCA(feat_vec)\nW = WeightMatrix(pca_image.reshape(num_pixels, pca_image.shape[2]))\n\n#Perform Spectral Clustering to segment the image\nspectral[3] = spectral_clustering(W, image, labels, 4)\n\n#Perform Laplace and Poisson Learning to segment the image\nlaplace[3], poisson[3] = laplace_poisson(W, train_ind, image, labels, 4)\n\n\"\"\"5. PCA-NonLocalMeans\"\"\"\n\n#Perform PCA then NonLocalMeans and Construct Weight Matrix\npca_image = PCA(image)\nF = NonLocalMeans(pca_image, 2)\nW = WeightMatrix(F)\n\n#Perform Spectral Clustering to segment the image\nspectral[4] = spectral_clustering(W, image, labels, 5)\n\n#Perform Laplace and Poisson Learning to segment the image\nlaplace[4], poisson[4] = laplace_poisson(W, train_ind, image, labels, 5)\n\n#Output best accuracy for each method\nspectral_ind = np.argmax(spectral)\nprint(\"Best Spectral Accuracy (method: %d): %.2f\"%(spectral_ind+1, spectral[spectral_ind]))\nlaplace_ind = np.argmax(laplace)\nprint(\"Best Laplace Accuracy (method: %d): %.2f\"%(laplace_ind+1, laplace[laplace_ind]))\npoisson_ind = np.argmax(poisson)\nprint(\"Best Poisson Accuracy (method: %d): %.2f\"%(poisson_ind+1, poisson[poisson_ind]))\n\n\"\"\"#PCA-NonLocalMeans-VAE\"\"\"\n\nimport torch\nimport torch.utils.data\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom torch.optim.lr_scheduler import StepLR\n\nclass MyDataset(Dataset):\n def __init__(self, data, targets, transform=None):\n self.data = data\n self.targets = targets\n self.transform = transform\n \n def __getitem__(self, index):\n x = self.data[index]\n y = self.targets[index]\n \n if self.transform:\n x = self.transform(x)\n \n return x, y\n \n def __len__(self):\n return len(self.data)\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n\n self.fc1 = nn.Linear(25*raw_pca_components, 100)\n self.fc21 = nn.Linear(100, 2)\n self.fc22 = nn.Linear(100, 2)\n self.fc3 = nn.Linear(2, 100)\n self.fc4 = nn.Linear(100, 25*raw_pca_components)\n\n def encode(self, x):\n h1 = F.relu(self.fc1(x))\n return self.fc21(h1), self.fc22(h1)\n\n def reparameterize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self, z):\n h3 = F.relu(self.fc3(z))\n return torch.sigmoid(self.fc4(h3))\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, 25*raw_pca_components))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n# Reconstruction + KL divergence losses summed over all elements and batch\ndef loss_function(recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, 25*raw_pca_components), reduction='sum')\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n return BCE + KLD\n\n\ndef train(epoch):\n model.train()\n train_loss = 0\n for batch_idx, (data, _) in enumerate(data_loader):\n data = data.to(device)\n optimizer.zero_grad()\n recon_batch, mu, logvar = model(data)\n loss = loss_function(recon_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(data_loader.dataset),\n 100. * batch_idx / len(data_loader),\n loss.item() / len(data)))\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(data_loader.dataset)))\n\nno_cuda = False\nbatch_size = 128\nlog_interval = 10 #how many batches to wait before logging training status\nepochs = 100\n\n#GPU settings\ncuda = not no_cuda and torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda else \"cpu\")\nkwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}\n\n#Load SalinasA data\npca_image = PCA(image)\nfeat_vec = NonLocalMeans(pca_image, 2)\ndata = (feat_vec-feat_vec.min())/feat_vec.max()\ntarget = labels\n\n#Convert to torch dataloaders\ndata = torch.from_numpy(data).float()\ntarget = torch.from_numpy(target).long()\nsalinasA = MyDataset(data, target) \ndata_loader = DataLoader(salinasA, batch_size = batch_size, shuffle = True, **kwargs)\n\n#Put model on GPU and set up optimizer\nmodel = VAE().to(device)\noptimizer = optim.Adam(model.parameters(), lr=1e-3)\nscheduler = StepLR(optimizer, step_size=1, gamma=0.7)\n\n#Training epochs\nfor epoch in range(1, epochs + 1):\n train(epoch)\n scheduler.step()\n\n#Encode the dataset and save to npz file\nwith torch.no_grad():\n mu, logvar = model.encode(data.to(device).view(-1, 25*raw_pca_components))\n Y = mu.cpu().numpy()\n np.savez_compressed('SalinasA_vae.npz',data=Y,labels=target.numpy())\n\n# M = np.load('SalinasA_vae.npz')\n# data = M['data']\n# print(data.shape)\n\n# W_vae = WeightMatrix(data)\n# accuracy(W_vae, labels)\n\n\"\"\"Visualization of 2 Dimensions\"\"\"\n\n#PCA\npca = sklearn_pca(n_components=2)\npca_image = pca.fit_transform(image.reshape(num_pixels,image.shape[2]))\nprint(pca_image.shape)\nplt.title(\"pca\")\nplt.scatter(pca_image[:,0], pca_image[:,1], c=labels)\nplt.savefig('PCA_2dim.png')\nplt.show()\n\n#VAE\nM = np.load('SalinasA_vae.npz')\ndata = M['data']\nprint(data.shape)\nplt.title(\"vae\")\nplt.scatter(data[:,0], data[:,1], c=labels)\nplt.savefig('VAE_2dim.png')\nplt.show()\n\n", "sub_path": "Image Preprocessing/Salinas/salinas.py", "file_name": "salinas.py", "file_ext": "py", "file_size_in_byte": 13221, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.use", "line_number": 3, "usage_type": "call"}, {"api_name": "numpy.pad", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.image.extract_patches_2d", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 29, "usage_type": "attribute"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 41, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 52, "usage_type": "call"}, {"api_name": "graphlearning.knnsearch_annoy", "line_number": 64, "usage_type": "call"}, {"api_name": "graphlearning.weight_matrix", "line_number": 69, "usage_type": "call"}, {"api_name": "graphlearning.isconnected", "line_number": 70, "usage_type": "call"}, {"api_name": "graphlearning.weight_matrix", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 84, "usage_type": "call"}, {"api_name": "graphlearning.graph_laplacian", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.eigs", "line_number": 86, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg", "line_number": 86, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 86, "usage_type": "name"}, {"api_name": "numpy.sum", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 90, "usage_type": "attribute"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 103, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 104, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 104, "usage_type": "name"}, {"api_name": "graphlearning.clustering_accuracy", "line_number": 114, "usage_type": "call"}, {"api_name": "graphlearning.graph_ssl", "line_number": 122, "usage_type": "call"}, {"api_name": "graphlearning.accuracy", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "graphlearning.graph_ssl", "line_number": 137, "usage_type": "call"}, {"api_name": "graphlearning.accuracy", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 154, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 154, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 154, "usage_type": "name"}, {"api_name": "scipy.ndimage.io.loadmat", "line_number": 155, "usage_type": "call"}, {"api_name": "scipy.ndimage.io", "line_number": 155, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 161, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.title", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 168, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 168, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 168, "usage_type": "name"}, {"api_name": "scipy.ndimage.io.loadmat", "line_number": 169, "usage_type": "call"}, {"api_name": "scipy.ndimage.io", "line_number": 169, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 169, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 180, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 180, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 181, "usage_type": "call"}, {"api_name": "graphlearning.randomize_labels", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 188, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 272, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 290, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 290, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 294, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 294, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 295, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 296, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 296, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 297, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 297, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 298, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 298, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 301, "usage_type": "name"}, {"api_name": "torch.exp", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.randn_like", "line_number": 306, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 310, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 310, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.nn.functional.binary_cross_entropy", "line_number": 320, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 320, "usage_type": "name"}, {"api_name": "torch.sum", "line_number": 326, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 357, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 357, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 358, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 368, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 369, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 371, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 375, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 375, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.StepLR", "line_number": 376, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 384, "usage_type": "call"}, {"api_name": "numpy.savez_compressed", "line_number": 387, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 399, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 402, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 402, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 403, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 403, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 404, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 404, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 405, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 405, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 408, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 411, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 411, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 412, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 412, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 413, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 413, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 414, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 414, "usage_type": "name"}]} +{"seq_id": "303090996", "text": "import tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nimport os\n\ntf.set_random_seed(777) # reproducibility\n\nif \"DISPLAY\" not in os.environ:\n # remove Travis CI Error\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nflags.DEFINE_integer(\"seq_length\", 7, \"rnn sequence length\")\nflags.DEFINE_integer(\"data_dim\", 5, \"data dimension\")\nflags.DEFINE_integer(\"hidden_dim\", 10, \"hidden dimension\")\nflags.DEFINE_integer(\"output_dim\", 1, \"output dimension\")\nflags.DEFINE_integer(\"rnn_stack\", 3, \"stacked rnn depth\")\nflags.DEFINE_float(\"learning_rate\", 0.01, \"learning rate\")\nflags.DEFINE_integer(\"epoch\", 20, \"number of epoch\")\nflags.DEFINE_integer(\"batch_size\", 100, \"batch size\")\nflags.DEFINE_integer(\"num_models\", 1, \"number of models in ensemble\")\nflags.DEFINE_integer(\"distance\", 5, \"distance minutes between present and future\")\n\nflags.DEFINE_string(\"train_csv\", \"data/train/DAT_MT_EURUSD_M1_2016.csv\", \"train data directory\")\nflags.DEFINE_string(\"test_csv\", \"data/test/DAT_MT_EURUSD_M1_201701.csv\", \"train data directory\")\n\n\ndef scaler(data):\n \"\"\" Min Max Normalization\n\n Parameters\n ----------\n data : numpy.ndarray\n input data to be normalized\n shape: [Batch size, dimension]\n\n Returns\n ----------\n data : numpy.ndarry\n normalized data\n shape: [Batch size, dimension]\n\n References\n ----------\n .. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html\n\n \"\"\"\n numerator = data - np.min(data, 0)\n denominator = np.max(data, 0) - np.min(data, 0)\n # noise term prevents the zero division\n return numerator / (denominator + 1e-7)\n\n\ndef data_parser(file_directory):\n xy = pd.read_csv(file_directory, header=None)\n xy = scaler(xy[[2, 3, 4, 5]].values)\n\n x = xy\n y = xy[:, [-1]] # choose 'close price' as label\n\n # build a dataset\n data_x = []\n data_y = []\n for i in range(0, len(y) - FLAGS.seq_length - FLAGS.distance):\n _x = x[i:i + FLAGS.seq_length]\n _y = y[i - 1 + FLAGS.seq_length + FLAGS.distance]\n if i < 3:\n print(_x, \"->\", _y)\n data_x.append(_x)\n data_y.append(_y)\n\n return data_x, data_y\n\n\nclass NetModel(object):\n def __init__(self, sess, name):\n self.sess = sess\n self.name = name\n self._build_net()\n\n def _build_net(self):\n self.training = tf.placeholder(tf.bool)\n\n with tf.variable_scope(self.name):\n # input place holders\n self.X = tf.placeholder(tf.float32, [None, FLAGS.seq_length, FLAGS.data_dim])\n self.Y = tf.placeholder(tf.float32, [None, 1])\n\n # build a LSTM network\n cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=FLAGS.hidden_size)\n cell = tf.nn.rnn_cell.MultiRNNCell([cell] * FLAGS.rnn_stack, state_is_tuple=True)\n self.output, __states = tf.nn.dynamic_rnn(cell, inputs=self.X, dtype=tf.float32)\n\n # prediction (no activation) layer\n self.pred = tf.contrib.layers.fully_connected(input=self.output[:, -1], num_outputs=FLAGS.output_dim,\n activation_fn=None)\n\n # define cost/loss & optimizer\n self.cost = tf.reduce_sum(tf.square(self.pred - self.Y))\n # optimizer\n self.optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss=self.cost)\n\n # RMSE\n self.targets = tf.placeholder(tf.float32, [None, 1])\n self.predictions = tf.placeholder(tf.float32, [None, 1])\n self.rmse = tf.sqrt(tf.reduce_mean(tf.square(self.targets - self.predictions)))\n\n def predict(self, x_test, training=False):\n return self.sess.run(self.pred, feed_dict={self.X: x_test, self.training: training})\n\n def get_rmse(self, x_test, y_test, training=False):\n return self.sess.run(self.rmse, feed_dict={self.X: x_test, self.Y: y_test, self.training: training})\n\n def train(self, x_data, y_data, training=True):\n return self.sess.run([self.cost, self.optimizer],\n feed_dict={self.X: x_data, self.Y: y_data, self.training: training})\n\n\ndef main(_):\n train_x, train_y = data_parser(FLAGS.train_csv)\n test_x, test_y = data_parser(FLAGS.test_csv)\n train_batch_xs, train_batch_ys = tf.train.batch([train_x, train_y], batch_size=FLAGS.batch_size)\n\n sess = tf.Session()\n\n # build ensemble model\n rnn_models = []\n for m in range(FLAGS.num_models):\n rnn_models.append(NetModel(sess, \"forex\" + str(m)))\n\n sess.run(tf.global_variables_initializer())\n\n print('start learning')\n\n # train my model\n for epoch in range(FLAGS.epoch):\n avg_cost_list = np.zeros(len(rnn_models))\n total_batch = int(len(train_y) / FLAGS.batch_size)\n\n for step in range(total_batch):\n # train each model\n for m_idx, m in enumerate(rnn_models):\n c, _ = m.train(train_batch_xs, train_batch_ys)\n avg_cost_list[m_idx] += c / total_batch\n print('Epoch:', '%04d' % (epoch + 1), 'cost =', avg_cost_list)\n print('finish learning')\n\n # test model and check rmse\n test_size = len(test_y)\n predictions = np.zeros(test_size * FLAGS.output_dim).reshape(test_size, FLAGS.output_dim)\n # Test step\n for m_idx, m in enumerate(rnn_models):\n print(m_idx, 'RMSE:', m.get_rmse(test_x, test_y))\n p = m.predict(test_x)\n predictions += p\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n", "sub_path": "2.x/02_Projects/stock/rnn_ensemble.py", "file_name": "rnn_ensemble.py", "file_ext": "py", "file_size_in_byte": 5594, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tensorflow.set_random_seed", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.use", "line_number": 11, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.min", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 53, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 59, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 86, "usage_type": "call"}, {"api_name": "tensorflow.bool", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tensorflow.variable_scope", "line_number": 88, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 90, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 90, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 91, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.BasicLSTMCell", "line_number": 94, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 94, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.rnn_cell.MultiRNNCell", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.dynamic_rnn", "line_number": 96, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.float32", "line_number": 96, "usage_type": "attribute"}, {"api_name": "tensorflow.contrib.layers.fully_connected", "line_number": 99, "usage_type": "call"}, {"api_name": "tensorflow.contrib", "line_number": 99, "usage_type": "attribute"}, {"api_name": "tensorflow.reduce_sum", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 105, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 105, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 108, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 108, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 109, "usage_type": "call"}, {"api_name": "tensorflow.float32", "line_number": 109, "usage_type": "attribute"}, {"api_name": "tensorflow.sqrt", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.square", "line_number": 110, "usage_type": "call"}, {"api_name": "tensorflow.train.batch", "line_number": 126, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 126, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 154, "usage_type": "call"}, {"api_name": "tensorflow.app.run", "line_number": 163, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 163, "usage_type": "attribute"}]} +{"seq_id": "331891875", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.special import erf\n\n\ndef plot_trajectory_subset(timeseries, tmin, tmax):\n \"\"\"\n Plot timeseries TIMESERIES between TMIN and TMAX.\n\n Parameters\n ----------\n timeseries: numpy.array\n the timeseries to plot.\n tmin: float\n lower bound of the interval to plot.\n tmax: float\n upper bound of the interval to plot.\n\n Returns\n -------\n \"\"\"\n\n t = \"######\"\n if tmin < np.amin(t) or tmax > np.amax(t):\n raise ValueError(\"Interval is out of bounds\")\n\n # Rest of function goes here\n\n return fig, ax\n\n\ndef plot_histogram(timeseries, nbins=10, mean=None, std=None):\n \"\"\"\n Plot the histogram for the timeseries TIMESERIES.\n\n Parameters\n ----------\n timeseries: numpy.array\n the timeseries to plot.\n nbins: int, optional\n The number of bins for the histogram.\n mean: float, optional\n The empirical mean computed over the timeseries\n std: float, optional\n The empirical standard deviation computed over the timeseries\n \"\"\"\n\n def get_gaussian_histogram(fluct_min, fluct_max, std, mean, nbins=100):\n bin_edges = np.linspace(fluct_min, fluct_max, nbins + 1)\n nb_samples = len(timeseries[:, 1])\n rescaled_bin_edges = (bin_edges - mean) / (std * np.sqrt(2))\n hist = (\n 0.5\n * nb_samples\n * (erf(rescaled_bin_edges[1:]) - erf(rescaled_bin_edges[:-1]))\n )\n return hist, bin_edges\n\n hist, bin_edges = \"######\"\n bin_centers = \"######\"\n\n fig, ax = plt.subplots()\n ax.plot(\"######\")\n\n if (not std) or (not mean):\n mean, var = get_mean_and_var(timeseries)\n std = np.sqrt(var)\n hist, bin_edges = \"######\"\n ax.plot(\"######\")\n\n return fig, ax\n", "sub_path": ".infrastructure/pip_example_no_setup/tstools/vis.py", "file_name": "vis.py", "file_ext": "py", "file_size_in_byte": 1811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.amin", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.special.erf", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "125425098", "text": "import keras.models as krm\nimport keras.layers as klc\nfrom keras.layers.merge import concatenate\nimport keras.layers.convolutional as kcv\nimport keras.layers.noise as kno\n\nimport keras.backend as K\nK.set_image_data_format('channels_first')\n\n\ndef model_main(nc, nf_l, nt_l, nf_h, nt_h, mw=False, weights=None,\n opti='adam', noise=False, noise_amp=0.2):\n\n mw_prep = () if not mw else (3,)\n\n i_x_l = klc.Input(shape=mw_prep + (nc, nf_l, nt_l), name='input.lo')\n i_x_h = klc.Input(shape=mw_prep + (nc, nf_h, nt_h), name='input.hi')\n\n conv_stack_h = \\\n [kcv.Conv2D(8, (3, 3), name='conv_h.0.0', activation='elu'),\n kcv.Conv2D(8, (3, 3), name='conv_h.0.1', activation='elu'),\n kcv.MaxPooling2D((2, 2)),\n kcv.Conv2D(8, (3, 3), name='conv_h.1.0', activation='elu'),\n kcv.Conv2D(8, (3, 3), name='conv_h.1.1', activation='elu'),\n kcv.MaxPooling2D((2, 2)),\n klc.Flatten(name='conv_h.flatten')]\n\n conv_stack_l = \\\n [kcv.Conv2D(8, (3, 3), name='conv_l.0.0', activation='elu'),\n kcv.Conv2D(8, (3, 3), name='conv_l.0.1', activation='elu'),\n kcv.MaxPooling2D((2, 2)),\n klc.Flatten(name='conv_l.flatten')]\n\n if mw:\n conv_stack_h = [\n klc.Permute((1, 4, 2, 3)),\n klc.Reshape((-1, nc, nf_h),\n input_shape=(3, nc, nf_h, nt_h)),\n klc.Permute((2, 1, 3)),\n ] + conv_stack_h\n\n conv_stack_l = [\n klc.Permute((1, 4, 2, 3)),\n klc.Reshape((-1, nc, nf_l),\n input_shape=(3, nc, nf_l, nt_l)),\n klc.Permute((2, 1, 3)),\n ] + conv_stack_l\n\n if noise:\n conv_stack_h = [kno.GaussianNoise(noise_amp, name='inoise_h')]\\\n + conv_stack_h\n conv_stack_l = [kno.GaussianNoise(noise_amp, name='inoise_l')]\\\n + conv_stack_l\n\n conv_l = i_x_l\n for layer in conv_stack_l:\n conv_l = layer(conv_l)\n\n conv_h = i_x_h\n for layer in conv_stack_h:\n conv_h = layer(conv_h)\n\n dn_suff = '.mw' if mw else ''\n merged_conv = concatenate([conv_l, conv_h])\n dense_stack = [\n klc.Dense(24, name='dense.0' + dn_suff, activation='elu'),\n klc.Dropout(0.5),\n klc.Dense(24, name='dense.1' + dn_suff, activation='elu'),\n ]\n\n y = merged_conv\n for layer in dense_stack:\n y = layer(y)\n\n y = klc.Dense(6, name='y' + dn_suff, activation='softmax')(y)\n m = krm.Model(inputs=[i_x_l, i_x_h], outputs=y)\n m.compile(optimizer=opti, loss='categorical_crossentropy')\n\n if weights and mw:\n m.load_weights(weights, by_name=True)\n\n return m\n\n\ndef model_multiw(nc, nf_l, nt_l, nf_h, nt_h, swwfn=None, **kwargs):\n\n # m, cstack_l, cstack_h, dstack =\\\n # model_singlew(nc, nf_l, nt_l, nf_h, nt_h, comp=False)\n m_sw = model_singlew(nc, nf_l, nt_l, nf_h, nt_h, comp=False, **kwargs)\n if swwfn:\n m_sw.load_weights(swwfn, by_name=True)\n\n # TODO: hacky, until keras pulls #3432 to allow klc.TimeDistributed\n # on sequences\n i_x_l_s = klc.Input(shape=(3, nc, nf_l, nt_l), name='lo freq seq input')\n i_x_h_s = klc.Input(shape=(3, nc, nf_h, nt_h), name='hi freq seq input')\n\n # csls = i_x_l_s\n # for layer in cstack_l:\n # csls = klc.TimeDistributed(layer)(csls)\n\n # cshs = i_x_h_s\n # for layer in cstack_h:\n # cshs = klc.TimeDistributed(layer)(cshs)\n\n # merged_conv_s = klc.merge([csls, cshs], mode='concat', concat_axis=-1)\n\n # y_s = merged_conv_s\n # for layer in dstack:\n # y_s = klc.TimeDistributed(layer)(y_s)\n y_sw = klc.TimeDistributed(m_sw)([i_x_l_s, i_x_h_s])\n y_flat = klc.Flatten()(y_sw)\n y_s_merged = klc.Dense(16, activation='elu', name='d.merge')(y_flat)\n y_out = klc.Dense(6, name='y', activation='softmax')(y_s_merged)\n\n mr = krm.Model(input=[i_x_l_s, i_x_h_s], output=y_out)\n mr.compile(optimizer='adam', loss='categorical_crossentropy')\n\n return mr\n\n\nif __name__ == '__main__':\n m = model_singlew(7, 20, 30, 50, 40)\n", "sub_path": "src/learners.py", "file_name": "learners.py", "file_ext": "py", "file_size_in_byte": 4028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "keras.backend.set_image_data_format", "line_number": 8, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 8, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 16, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 16, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 17, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 20, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 21, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 22, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 23, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 23, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 24, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 25, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 25, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 26, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 26, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 29, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 29, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 30, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 30, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 31, "usage_type": "call"}, {"api_name": "keras.layers.convolutional", "line_number": 31, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 32, "usage_type": "name"}, {"api_name": "keras.layers.Permute", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 36, "usage_type": "name"}, {"api_name": "keras.layers.Reshape", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 37, "usage_type": "name"}, {"api_name": "keras.layers.Permute", "line_number": 39, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 39, "usage_type": "name"}, {"api_name": "keras.layers.Permute", "line_number": 43, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 43, "usage_type": "name"}, {"api_name": "keras.layers.Reshape", "line_number": 44, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 44, "usage_type": "name"}, {"api_name": "keras.layers.Permute", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 46, "usage_type": "name"}, {"api_name": "keras.layers.noise.GaussianNoise", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.layers.noise", "line_number": 50, "usage_type": "name"}, {"api_name": "keras.layers.noise.GaussianNoise", "line_number": 52, "usage_type": "call"}, {"api_name": "keras.layers.noise", "line_number": 52, "usage_type": "name"}, {"api_name": "keras.layers.merge.concatenate", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 66, "usage_type": "name"}, {"api_name": "keras.layers.Dropout", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 67, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 68, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 75, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 76, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 95, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 95, "usage_type": "name"}, {"api_name": "keras.layers.Input", "line_number": 96, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 96, "usage_type": "name"}, {"api_name": "keras.layers.TimeDistributed", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 111, "usage_type": "name"}, {"api_name": "keras.layers.Flatten", "line_number": 112, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 112, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 113, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 113, "usage_type": "name"}, {"api_name": "keras.layers.Dense", "line_number": 114, "usage_type": "call"}, {"api_name": "keras.layers", "line_number": 114, "usage_type": "name"}, {"api_name": "keras.models.Model", "line_number": 116, "usage_type": "call"}, {"api_name": "keras.models", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "472856654", "text": "from django.contrib import messages\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.core.urlresolvers import reverse\nfrom django.http import JsonResponse, Http404\nfrom django.views.generic.base import View\nfrom django.views.generic.detail import SingleObjectMixin, DetailView\nfrom django.views.generic.edit import FormMixin\nfrom django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\n\nfrom orders.forms import GuestCheckoutForm\nfrom orders.mixins import CartOrderMixin\nfrom orders.models import UserCheckout, UserAddress\nfrom products.models import Variation\nfrom .models import Cart, CartItem\n\nclass ItemCountView(View):\n\n def get(self, request, *args, **kwargs):\n if request.is_ajax():\n cart_id = self.request.session.get('cart_id')\n if cart_id:\n cart = Cart.objects.get(id=cart_id)\n cartitem_count = cart.cartitem_set.count()\n else:\n cartitem_count = 0\n self.request.session['cartitem_count'] = cartitem_count\n return JsonResponse({'cartitem_count': cartitem_count})\n else:\n raise Http404\n\nclass CartView(SingleObjectMixin, View):\n model = Cart\n template_name = 'carts/cart_view.html'\n\n def get_object(self, *args, **kwargs):\n self.request.session.set_expiry(900) # 900 secs\n cart_id = self.request.session.get('cart_id')\n if cart_id is None:\n cart = Cart()\n cart.save()\n cart_id = cart.id\n self.request.session['cart_id'] = cart_id\n\n cart = Cart.objects.get(id=cart_id)\n if self.request.user.is_authenticated(): # Login user\n # if the cart is not belong to the current login user,\n # start a new cart\n if cart.user is not None and cart.user != self.request.user:\n cart = Cart()\n cart.save()\n self.request.session['cart_id'] = cart.id\n cart.user = self.request.user\n cart.save()\n else: # Guest user\n if cart.user:\n pass # Required Login or remind user to start a new session\n return cart\n\n def get(self, request, *args, **kwargs):\n cart = self.get_object()\n item_id = request.GET.get('item_id')\n delete_item = request.GET.get('delete', False)\n qty = request.GET.get('qty')\n flash_message = ''\n # Check order:\n # 1. item_id -- determine the instance\n # 2. delete_item -- determine whether to delete the instance\n # 3. qty -- determine how many instance will be added to the cart\n # the 'qty' option would not work when 'delete_item' is set to something\n # By default, 'qty' is set to 1.\n if item_id:\n item_instance = get_object_or_404(Variation, id=item_id)\n cart_item, created = CartItem.objects.get_or_create(cart=cart, item=item_instance)\n item_updated = False # For Ajax\n if delete_item:\n cart_item.delete()\n cart.update_subtotal() # Recalculate subtotal\n elif qty:\n if qty.isdigit() and int(qty) > 0:\n if cart_item.quantity != qty:\n cart_item.quantity = qty\n cart_item.save()\n item_updated = True # For Ajax\n else:\n messages.error(request, \"The input quantity is not valid. Add to cart operation fails.\")\n if created:\n cart_item.delete()\n # Check operation status\n item_added = cart_item and created\n if item_added:\n flash_message = 'Item successfully added.'\n elif delete_item:\n flash_message = 'Item removed successfully.'\n elif item_updated:\n flash_message = 'Quantity has been update successfully.'\n if request.is_ajax():\n # Refresh data for Ajax request\n cart.update_subtotal()\n cartitem_count = cart.cartitem_set.count()\n\n jsondata = {\n 'flash_message': flash_message,\n # For cart detail view only\n 'line_item_total': cart_item.line_item_total,\n 'cart_subtotal': cart.subtotal,\n 'cart_tax_total': cart.tax_total,\n 'cart_total': cart.total,\n 'cartitem_count': cartitem_count,\n }\n return JsonResponse(jsondata)\n\n context = {\n 'object': cart,\n }\n template = self.template_name\n # if update quantity from self page, Reload page to recalculate the subtotal\n update_item = request.GET.get('update')\n if update_item:\n return redirect('cart')\n return render(request, template, context)\n\nclass CheckoutView(FormMixin, CartOrderMixin, DetailView):\n model = Cart\n template_name = 'carts/checkout_view.html'\n form_class = GuestCheckoutForm\n\n def get_object(self, *args, **kwargs):\n cart = self.get_cart()\n return cart\n\n def get_context_data(self, *args, **kwargs):\n context = super(CheckoutView, self).get_context_data(*args, **kwargs)\n user_checkout_id = self.request.session.get('user_checkout_id')\n if self.request.user.is_authenticated() or user_checkout_id:\n context['user_can_continue'] = True\n else:\n context['user_can_continue'] = False\n context['login_form'] = AuthenticationForm()\n context['next_url'] = self.request.build_absolute_uri()\n if self.request.user.is_authenticated():\n user_checkout, created = UserCheckout.objects.get_or_create(email=self.request.user.email)\n if created: # Do not validate if the user and the email match\n user_checkout.user = self.request.user\n user_checkout.save()\n self.request.session['user_checkout_id'] = user_checkout.id\n context['order'] = self.get_order()\n context['form'] = self.get_form()\n return context\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object() # Assign the object to the view\n form = self.get_form()\n if form.is_valid():\n email = form.cleaned_data.get('email')\n user_checkout, created = UserCheckout.objects.get_or_create(email=email)\n request.session['user_checkout_id'] = user_checkout.id\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n def get_success_url(self):\n return reverse('checkout')\n\n def get(self, request, *args, **kwargs):\n get_data = super(CheckoutView, self).get(request, *args, **kwargs)\n\n # 1. Get shopping cart\n cart = self.get_object()\n if cart.cartitem_set.count() == 0:\n return redirect('cart')\n\n # 2. Get order\n new_order = self.get_order()\n\n # 3. Get user_checkout\n user_checkout_id = request.session.get('user_checkout_id')\n if user_checkout_id:\n user_checkout = UserCheckout.objects.get(id=user_checkout_id)\n else:\n # If user_checkout_id is None, stop continuing gathering order data\n return get_data\n\n # 4. Confirm shipping and billing address\n if new_order.shipping_address is None or new_order.billing_address is None:\n return redirect('order_address')\n\n # 5. Save the order\n new_order.user_checkout = user_checkout\n new_order.save()\n return get_data\n\nclass CheckoutFinalView(CartOrderMixin, View):\n def post(self, request, *args, **kwargs):\n order = self.get_order()\n if order.cart.cartitem_set.count == 0:\n return reverse('cart')\n # Validate payment\n if request.POST.get('payment_token') == 'ABC':\n order.mark_completed()\n del request.session['cart_id']\n del request.session['order_id']\n messages.success(request, 'Your order has been completed. Thank you for your order.')\n return redirect('order_detail', pk=order.pk)\n\n def get(self, request, *args, **kwargs):\n return redirect('orders')", "sub_path": "carts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 8322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.views.generic.base.View", "line_number": 18, "usage_type": "name"}, {"api_name": "models.Cart.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 24, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 29, "usage_type": "call"}, {"api_name": "django.http.Http404", "line_number": 31, "usage_type": "name"}, {"api_name": "django.views.generic.detail.SingleObjectMixin", "line_number": 33, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Cart", "line_number": 34, "usage_type": "name"}, {"api_name": "models.Cart", "line_number": 41, "usage_type": "call"}, {"api_name": "models.Cart.objects.get", "line_number": 46, "usage_type": "call"}, {"api_name": "models.Cart.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.Cart", "line_number": 46, "usage_type": "name"}, {"api_name": "models.Cart", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 74, "usage_type": "call"}, {"api_name": "products.models.Variation", "line_number": 74, "usage_type": "argument"}, {"api_name": "models.CartItem.objects.get_or_create", "line_number": 75, "usage_type": "call"}, {"api_name": "models.CartItem.objects", "line_number": 75, "usage_type": "attribute"}, {"api_name": "models.CartItem", "line_number": 75, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 87, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 112, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 122, "usage_type": "call"}, {"api_name": "django.views.generic.edit.FormMixin", "line_number": 124, "usage_type": "name"}, {"api_name": "orders.mixins.CartOrderMixin", "line_number": 124, "usage_type": "name"}, {"api_name": "django.views.generic.detail.DetailView", "line_number": 124, "usage_type": "name"}, {"api_name": "models.Cart", "line_number": 125, "usage_type": "name"}, {"api_name": "orders.forms.GuestCheckoutForm", "line_number": 127, "usage_type": "name"}, {"api_name": "django.contrib.auth.forms.AuthenticationForm", "line_number": 140, "usage_type": "call"}, {"api_name": "orders.models.UserCheckout.objects.get_or_create", "line_number": 143, "usage_type": "call"}, {"api_name": "orders.models.UserCheckout.objects", "line_number": 143, "usage_type": "attribute"}, {"api_name": "orders.models.UserCheckout", "line_number": 143, "usage_type": "name"}, {"api_name": "orders.models.UserCheckout.objects.get_or_create", "line_number": 157, "usage_type": "call"}, {"api_name": "orders.models.UserCheckout.objects", "line_number": 157, "usage_type": "attribute"}, {"api_name": "orders.models.UserCheckout", "line_number": 157, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 164, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 172, "usage_type": "call"}, {"api_name": "orders.models.UserCheckout.objects.get", "line_number": 180, "usage_type": "call"}, {"api_name": "orders.models.UserCheckout.objects", "line_number": 180, "usage_type": "attribute"}, {"api_name": "orders.models.UserCheckout", "line_number": 180, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 187, "usage_type": "call"}, {"api_name": "orders.mixins.CartOrderMixin", "line_number": 194, "usage_type": "name"}, {"api_name": "django.views.generic.base.View", "line_number": 194, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 198, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 204, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 204, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 205, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 208, "usage_type": "call"}]} +{"seq_id": "277345590", "text": "from django.conf.urls import url\nfrom . import views\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom .forms import CustomAuthForm\nfrom django.conf.urls import include\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\n\nstart = True\n\nurlpatterns = [\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^home/', views.index, name='index'),\n url(r'^login/$', auth_views.login, name='login', kwargs={\"authentication_form\":CustomAuthForm}),\n url(r'^logout/$', auth_views.logout, {'next_page': 'login'}, name='logout'),\n url(r'^signup/$', views.signup, name='signup'),\n url(r'^admin/', admin.site.urls),\n url(r'^quiz/', views.show_word, name='show_word'),\n url(r'^quiz/continue', views.continue_quiz, name='continue_quiz'),\n url(r'^findclassroom', views.find_classroom, name='find_classroom'),\n url(r'^myclassrooms', views.my_classrooms, name='my_classrooms'),\n url(r'^teacherclassrooms', views.teacher_classrooms, name='teacher_classrooms'),\n url(r'^createclassroom', views.create_new_classroom, name='create_new_classroom'),\n url(r'^remove_classroom/(?P\\d+)/$', views.remove_classroom, name='remove_classroom'),\n url(r'^join_classroom/(?P\\d+)/$', views.join_classroom, name='join_classroom'),\n url(r'^check_classroom_pw/(?P\\d+)/$', views.check_classroom_pw, name='check_classroom_pw'),\n url(r'^enter_classroom/(?P\\d+)/$', views.enter_classroom, name='enter_classroom'),\n url(r'^unassign_classroom/(?P\\d+)/$', views.unassign_classroom, name='unassign_classroom'),\n url(r'^enter_lesson/(?P\\d+)/$', views.enter_lesson, name='enter_lesson'),\n url(r'^enter_lesson_teacher/(?P\\d+)/$', views.enter_lesson_teacher, name='enter_lesson_teacher'),\n url(r'^enter_vocab/(?P\\d+)/$', views.enter_vocab, name='enter_vocab'),\n url(r'^enter_vocab_teacher/(?P\\d+)/$', views.enter_vocab_teacher, name='enter_vocab_teacher'),\n url(r'^enter_more/(?P\\d+)/$', views.enter_more, name='enter_more'),\n url(r'^enter_more_teacher/(?P\\d+)/$', views.enter_more_teacher, name='enter_more_teacher'),\n url(r'^enter_section/(?P\\d+)/$', views.enter_section, name='enter_section'),\n url(r'^enter_section_teacher/(?P\\d+)/$', views.enter_section_teacher, name='enter_section_teacher'),\n url(r'^enter_exercises/(?P\\d+)/$', views.enter_exercises, name='enter_exercises'),\n url(r'^edit_carousel/(?P\\d+)/$', views.edit_carousel, name='edit_carousel'),\n url(r'^enter_exercises_teacher/(?P\\d+)/$', views.enter_exercises_teacher, name='enter_exercises_teacher'),\n url(r'^do_exercise/(?P\\d+)/(?P\\d+)/$', views.do_exercise, name='do_exercise'),\n url(r'^edit_classroom/(?P\\d+)/$', views.edit_classroom, name='edit_classroom'),\n url(r'^view_lesson/(?P\\d+)/$', views.view_lesson, name='view_lesson'),\n url(r'^enter_vocab_overview/(?P\\d+)/$', views.enter_vocab_overview, name='enter_vocab_overview'),\n url(r'^add_to_classroom/(?P\\d+)/(?P\\d+)/$', views.add_lesson_to_cl, name='add_lesson_to_cl'),\n url(r'^remove_lesson_from_classroom/(?P\\d+)/(?P\\d+)/$', views.remove_lesson_from_classroom, name='remove_lesson_from_classroom'),\n url(r'^student_performance/(?P\\d+)/(?P\\d+)/$', views.student_performance, name='student_performance'),\n url(r'^find_lesson/(?P\\d+)/$', views.find_lesson_in_classroom, name='find_lesson_in_classroom'),\n url(r'^add_new_lesson/(?P\\d+)/$', views.add_new_lesson, name='add_new_lesson'),\n url(r'^add_new_module/(?P\\d+)/$', views.add_new_module, name='add_new_module'),\n url(r'^prev_exercise/(?P\\d+)/$', views.prev_exercise, name='prev_exercise'),\n url(r'^remove_lesson/(?P\\d+)/$', views.remove_lesson, name='remove_lesson'),\n url(r'^add_new_word/(?P\\d+)/$', views.add_new_word, name='add_new_word'),\n url(r'^upload_csv/(?P\\d+)/$', views.upload_csv, name='upload_csv'),\n url(r'^add_word_to_lesson/(?P\\d+)/$', views.add_word_to_lesson, name='add_word_to_lesson'),\n url(r'^add_exercise_to_lesson/(?P\\d+)/$', views.add_exercise_to_lesson, name='add_exercise_to_lesson'),\n url(r'^remove_word_from_lesson/(?P\\d+)/(?P\\d+)/$', views.remove_word_from_lesson, name='remove_word_from_lesson'),\n url(r'^add_exe_to_lesson/(?P\\d+)/(?P\\d+)/$', views.add_exe_to_lesson, name='add_exe_to_lesson'),\n url(r'^remove_exe_from_lesson/(?P\\d+)/(?P\\d+)/$', views.remove_exe_from_lesson, name='remove_exe_from_lesson'),\n url(r'^evaluate_st_exercise/(?P\\d+)/(?P\\d+)/$', views.evaluate_st_exercise, name='evaluate_st_exercise'),\n url(r'^student_exercise_details/(?P\\d+)/(?P\\d+)/(?P\\d+)/$', views.student_exercise_details, name='student_exercise_details'),\n url(r'^findword/(?P\\d+)/$', views.find_word_for_lesson, name='find_word_for_lesson'),\n url(r'^add_new_text/(?P\\d+)/$', views.add_new_text, name='add_new_text'),\n url(r'^remove_exe_entirely/(?P\\d+)/$', views.remove_exe_entirely, name='remove_exe_entirely'),\n url(r'^add_new_excercise/(?P\\d+)/$', views.add_new_excercise, name='add_new_excercise'),\n url(r'^editor/$', views.editor, name='editor'),\n url(r'^save/$', views.save_new_excercise, name='save_new_excercise'),\n url(r'^uploadvideo/$', views.uploadvideo, name='uploadvideo'),\n url(r'^uploadaudio/$', views.uploadaudio, name='uploadaudio'),\n url(r'^uploadimage/$', views.uploadimage, name='uploadimage'),\n url(r'^upload_area/$', views.upload_area, name='upload_area'),\n url(r'^about/$', views.about, name='about'),\n url(r'^privacy/$', views.privacy, name='privacy'),\n url(r'^tinymce/', include('tinymce.urls')),\n]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "sub_path": "efluentapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 5978, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 14, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.login", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 16, "usage_type": "name"}, {"api_name": "forms.CustomAuthForm", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.logout", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 19, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 19, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 35, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 36, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 37, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 38, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 39, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 40, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 41, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 42, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 43, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 44, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 45, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 46, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 47, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 48, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 49, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 50, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 51, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 52, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 53, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 54, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 55, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 56, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 57, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 58, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 59, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 60, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 61, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 62, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 63, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 64, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 65, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 66, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 67, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 68, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 69, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 70, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 71, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 72, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 73, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 74, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 75, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 75, "usage_type": "call"}, {"api_name": "django.conf.urls.static.static", "line_number": 76, "usage_type": "call"}, {"api_name": "django.conf.settings.MEDIA_URL", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 76, "usage_type": "name"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "468820781", "text": "# coding: utf-8\n\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\n\"\"\"\nFILE: ml_samples_authentication_sovereign_cloud.py\n\nDESCRIPTION:\n These samples demonstrate authenticating a client for multiple clouds.\n \nUSAGE:\n python ml_samples_authentication_sovereign_cloud.py\n\n Set the environment variables with your own values before running the sample:\n 1) AZURE_SUBSCRIPTION_ID - The subscription id.\n 2) RESOURCE_GROUP_NAME - Resource group name.\n\n\"\"\"\n\nimport os\n\n\nclass MLClientSamples(object):\n def ml_auth_azure_default_credential(self):\n # [START create_ml_client_default_credential]\n # Get a credential for authentication\n # Default Azure Credentials attempt a chained set of authentication methods, per documentation here: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/identity/azure-identity\n # Alternately, one can specify the AZURE_TENANT_ID, AZURE_CLIENT_ID, and AZURE_CLIENT_SECRET to use the EnvironmentCredentialClass.\n # The docs above specify all mechanisms which the defaultCredential internally support.\n # Enter details of your subscription\n subscription_id = os.environ[\"AZURE_SUBSCRIPTION_ID\"]\n resource_group = os.environ[\"RESOURCE_GROUP_NAME\"]\n\n # Instantiate a MLClient\n from azure.identity import AzureAuthorityHosts, DefaultAzureCredential\n\n from azure.ai.ml import MLClient\n\n # When using sovereign domains (that is, any cloud other than AZURE_PUBLIC_CLOUD),\n # you must use an authority with DefaultAzureCredential.\n # Default authority value : AzureAuthorityHosts.AZURE_PUBLIC_CLOUD\n # Expected values for authority for sovereign clouds:\n # AzureAuthorityHosts.AZURE_CHINA or AzureAuthorityHosts.AZURE_GOVERNMENT\n # credential = DefaultAzureCredential(authority=AzureAuthorityHosts.AZURE_CHINA)\n credential = DefaultAzureCredential(authority=AzureAuthorityHosts.AZURE_PUBLIC_CLOUD)\n\n # When using sovereign domains (that is, any cloud other than AZURE_PUBLIC_CLOUD),\n # you must pass in the cloud name in kwargs. Default cloud is AzureCloud\n kwargs = {\"cloud\": \"AzureCloud\"}\n # get a handle to the subscription\n ml_client = MLClient(credential, subscription_id, resource_group, **kwargs)\n # [END create_ml_client_default_credential]\n\n from azure.ai.ml.entities import Workspace\n\n # Get a list of workspaces in a resource group\n for ws in ml_client.workspaces.list():\n print(ws.name, \":\", ws.location, \":\", ws.description)\n\n\nif __name__ == \"__main__\":\n sample = MLClientSamples()\n sample.ml_auth_azure_default_credential()\n", "sub_path": "sdk/ml/azure-ai-ml/samples/ml_samples_authentication_sovereign_cloud.py", "file_name": "ml_samples_authentication_sovereign_cloud.py", "file_ext": "py", "file_size_in_byte": 2951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 36, "usage_type": "attribute"}, {"api_name": "azure.identity.DefaultAzureCredential", "line_number": 49, "usage_type": "call"}, {"api_name": "azure.identity.AzureAuthorityHosts.AZURE_PUBLIC_CLOUD", "line_number": 49, "usage_type": "attribute"}, {"api_name": "azure.identity.AzureAuthorityHosts", "line_number": 49, "usage_type": "name"}, {"api_name": "azure.ai.ml.MLClient", "line_number": 55, "usage_type": "call"}, {"api_name": "{'AzureAuthorityHosts': 'azure.identity.AzureAuthorityHosts', 'DefaultAzureCredential': 'azure.identity.DefaultAzureCredential', 'MLClient': 'azure.ai.ml.MLClient', 'Workspace': 'azure.ai.ml.entities.Workspace'}", "line_number": 66, "usage_type": "call"}]} +{"seq_id": "109360115", "text": "# coding=gbk\n\nimport itchat\nimport os\nimport PIL.Image as Image\nfrom os import listdir\nimport math\nimport matplotlib.pyplot as plt\nimport random\nfrom wordcloud import WordCloud\nimport re\n\nif __name__ == '__main__':\n # 登录\n itchat.auto_login(hotReload=True)\n\n friends = itchat.get_friends(update=True)[0:]\n\n # # 发送信息\n # for item friends:\n # print(item)\n # print(item['NickName'])\n # number = 500\n # while number:\n # itchat.send_msg('', friends[20]['UserName'])\n # number -= 1\n\n # 爬取拼接图片\n user = friends[0][\"UserName\"]\n\n print(user)\n\n os.mkdir(user)\n\n num = 0\n\n for i in friends:\n img = itchat.get_head_img(userName=i[\"UserName\"])\n\n fileImage = open(user + \"/\" + str(num) + \".jpg\", 'wb')\n\n fileImage.write(img)\n\n fileImage.close()\n\n num += 1\n\n pics = listdir(user)\n\n numPic = len(pics)\n\n print(numPic)\n\n eachsize = int(math.sqrt(float(640 * 640) / numPic))\n\n print(eachsize)\n\n numline = int(640 / eachsize)\n\n toImage = Image.new('RGB', (640, 640))\n\n print(numline)\n\n x = 0\n\n y = 0\n\n for i in pics:\n\n try:\n\n # 打开图片\n\n img = Image.open(user + \"/\" + i)\n\n except IOError:\n\n print(\"Error: 没有找到文件或读取文件失败\")\n\n else:\n\n # 缩小图片\n\n img = img.resize((eachsize, eachsize), Image.ANTIALIAS)\n\n # 拼接图片\n\n toImage.paste(img, (x * eachsize, y * eachsize))\n\n x += 1\n\n if x == numline:\n x = 0\n\n y += 1\n\n toImage.save(user + \".jpg\")\n\n itchat.send_image(user + \".jpg\", 'filehelper')\n\n # 性别统计\n sex = dict()\n for f in friends:\n if f[\"Sex\"] == 1: # 男\n sex[\"man\"] = sex.get(\"man\", 0) + 1\n elif f[\"Sex\"] == 2: #女\n sex[\"women\"] = sex.get(\"women\", 0) + 1\n else: #未知\n sex[\"unknown\"] = sex.get(\"unknown\", 0) + 1\n # 柱状图展示\n for i, key in enumerate(sex):\n plt.bar(key, sex[key])\n plt.show()\n\n # 获取词云\n itchat.login()\n friends = itchat.get_friends(update=True)\n file = open('sign.txt', 'a', encoding='utf-8')\n for f in friends:\n signature = f[\"Signature\"].strip().replace(\"emoji\", \"\").replace(\"span\", \"\").replace(\"class\", \"\")\n rec = re.compile(\"1f\\d+\\w*|[<>/=]\")\n signature = rec.sub(\"\", signature)\n file.write(signature + \"\\n\")\n\n\n # 生成词云图\n def create_word_cloud(filename):\n # 读取文件内容\n text = open(\"{}.txt\".format(filename), encoding='utf-8').read()\n\n # 注释部分采用结巴分词\n # wordlist = jieba.cut(text, cut_all=True)\n # wl = \" \".join(wordlist)\n\n # 设置词云\n wc = WordCloud(\n # 设置背景颜色\n background_color=\"white\",\n # 设置最大显示的词云数\n max_words=2000,\n # 这种字体都在电脑字体中,window在C:\\Windows\\Fonts\\下,mac下可选/System/Library/Fonts/PingFang.ttc 字体\n font_path='C:\\\\Windows\\\\Fonts\\\\simfang.ttf',\n height=500,\n width=500,\n # 设置字体最大值\n max_font_size=60,\n # 设置有多少种随机生成状态,即有多少种配色方案\n random_state=30,\n )\n\n myword = wc.generate(text) # 生成词云 如果用结巴分词的话,使用wl 取代 text, 生成词云图\n # 展示词云图\n plt.imshow(myword)\n plt.axis(\"off\")\n plt.show()\n wc.to_file('signature.png') # 把词云保存下\n\n\n create_word_cloud(\"sign\")\n\n", "sub_path": "learnPython/test/mac/itchat.py", "file_name": "itchat.py", "file_ext": "py", "file_size_in_byte": 3724, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "itchat.auto_login", "line_number": 15, "usage_type": "call"}, {"api_name": "itchat.get_friends", "line_number": 17, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 33, "usage_type": "call"}, {"api_name": "itchat.get_head_img", "line_number": 38, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 48, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 54, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 60, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 60, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 74, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 74, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 84, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 84, "usage_type": "name"}, {"api_name": "itchat.send_image", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "itchat.login", "line_number": 116, "usage_type": "call"}, {"api_name": "itchat.get_friends", "line_number": 117, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 121, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}]} +{"seq_id": "384964164", "text": "#!/usr/bin/env python\n\n\"\"\"\nCopyright (c) UChicago Argonne, LLC. All rights reserved.\nSee LICENSE file.\n\"\"\"\n\nimport unittest\n\nfrom selenium import webdriver\n\nfrom cdbSeleniumModules.browse_by import BrowseBy\nfrom cdbSeleniumModules.cable_catalog import CableCatalog\nfrom cdbSeleniumModules.cable_design import CableDesign\nfrom cdbSeleniumModules.cable_inventory import CableInventory\nfrom cdbSeleniumModules.catalog import Catalog\nfrom cdbSeleniumModules.connector_type import ConnectorType\nfrom cdbSeleniumModules.inventory import Inventory\nfrom cdbSeleniumModules.location import Location\nfrom cdbSeleniumModules.machineDesign import MachineDesign\nfrom cdbSeleniumModules.portal import Portal\nfrom cdbSeleniumModules.propertyType import PropertyType\nfrom cdbSeleniumModules.source import Source\nfrom cdbSeleniumModules.user_info import UserInfo\n\n\nclass CdbPortalFunctionalTestSuite(unittest.TestCase):\n\n PORTAL_URL = 'http://localhost:8080/cdb'\n MACHINE_DESIGN_CSV_PATH = 'data/md-test.csv'\n MACHINE_DESIGN_PROJECT = 'APS-U'\n\n def setUp(self):\n self.driver = webdriver.Chrome()\n self.driver.set_window_position(0, 0)\n self.driver.maximize_window()\n self.driver.get(self.PORTAL_URL)\n\n self.portal = Portal(self.driver)\n self.property_type = PropertyType(self.driver)\n self.catalog = Catalog(self.driver)\n self.inventory = Inventory(self.driver)\n self.cable_inventory = CableInventory(self.driver)\n self.cable_catalog = CableCatalog(self.driver)\n self.cable_design = CableDesign(self.driver)\n self.machine_design = MachineDesign(self.driver)\n self.location = Location(self.driver)\n self.browse_by = BrowseBy(self.driver)\n self.source = Source(self.driver)\n self.user_info = UserInfo(self.driver)\n self.connector_type = ConnectorType(self.driver)\n\n self.portal.login()\n\n def tearDown(self):\n self.portal.logout()\n self.driver.close()\n\n def test_create_property_type(self):\n self.property_type.add_sample_test_property_type()\n self.property_type.delete_sample_test_property_type()\n\n def test_create_catalog_item(self):\n self.catalog.start_create_sample_catalog_item()\n self.catalog.finish_create_sample_catalog_item()\n\n self.catalog.display_more_columns()\n self.catalog.search_for_sample_catalog_item()\n self.catalog.add_log_to_catalog_item()\n\n self.catalog.delete_current_item()\n\n def test_machine_design(self):\n self.machine_design.navigate_to_machine_design()\n self.machine_design.input_hierarchy_from_sample_file(csvFile=self.MACHINE_DESIGN_CSV_PATH,\n project=self.MACHINE_DESIGN_PROJECT)\n\n def test_browse_by_function_pages(self):\n self.browse_by.navigate_to_browse_by_function()\n self.browse_by.test_browse_by_function(self)\n\n def test_browse_by_owner(self):\n self.browse_by.navigate_to_browse_by_owner()\n self.browse_by.test_browse_by_owner(self)\n\n def test_browse_by_location(self):\n self.browse_by.navigate_to_browse_by_location()\n self.browse_by.test_browse_by_location(self)\n\n def test_catalog_pages(self):\n self.catalog.navigate_to_catalog_list()\n self.catalog.test_catalog_pages()\n\n def test_cable_catalog_pages(self):\n self.cable_catalog.navigate_to_cable_catalog_list()\n self.cable_catalog.test_cable_catalog_pages()\n\n def test_inventory_pages(self):\n self.inventory.navigate_to_inventory_list()\n self.inventory.test_inventory_pages()\n\n def test_cable_inventory_pages(self):\n self.cable_inventory.navigate_to_cable_inventory_list()\n self.cable_inventory.test_cable_inventory_pages()\n\n def test_cable_design_pages(self):\n self.cable_design.navigate_to_cable_design_list()\n self.cable_design.test_cable_design_pages()\n\n def test_source_pages(self):\n self.source.navigate_to_source_list()\n self.source.test_source_pages()\n\n def test_user_info_pages(self):\n self.user_info.navigate_to_user_info_list()\n self.user_info.test_user_info_pages()\n\n def test_connector_type_pages(self):\n self.connector_type.navigate_to_connector_type_list()\n self.connector_type.test_connector_type_pages()\n\n def test_location_pages(self):\n self.location.navigate_to_location_list()\n self.location.test_location_pages()\n\n\nif __name__ == '__main__':\n unittest.main()", "sub_path": "tools/developer_tools/portal_testing/PythonSeleniumTest/gui_test.py", "file_name": "gui_test.py", "file_ext": "py", "file_size_in_byte": 4551, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "unittest.TestCase", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 34, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 34, "usage_type": "name"}, {"api_name": "cdbSeleniumModules.portal.Portal", "line_number": 39, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.propertyType.PropertyType", "line_number": 40, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.catalog.Catalog", "line_number": 41, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.inventory.Inventory", "line_number": 42, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.cable_inventory.CableInventory", "line_number": 43, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.cable_catalog.CableCatalog", "line_number": 44, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.cable_design.CableDesign", "line_number": 45, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.machineDesign.MachineDesign", "line_number": 46, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.location.Location", "line_number": 47, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.browse_by.BrowseBy", "line_number": 48, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.source.Source", "line_number": 49, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.user_info.UserInfo", "line_number": 50, "usage_type": "call"}, {"api_name": "cdbSeleniumModules.connector_type.ConnectorType", "line_number": 51, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "331118280", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport json\nimport re\nimport xlwt\nimport os\n\n\ndef del_file(path):\n for i in os.listdir(path):\n path_file = os.path.join(basepath + auctionname, i)\n if os.path.isfile(path_file):\n os.remove(path_file)\n else:\n del_file(path_file)\n\ndef create_folder():\n if os.path.isdir(basepath + auctionname):\n if len(os.listdir(basepath + auctionname)) != 0:\n del_file(basepath + auctionname + '\\\\')\n print('文件删除完成!')\n else:\n os.makedirs(basepath + auctionname )\n print('文件夹已创建!')\n\ndef save_excel(itemdict):\n excel_init_file = xlwt.Workbook(encoding='utf-8')\n table = excel_init_file.add_sheet('auction', cell_overwrite_ok=True)\n row_num = 0\n for k, v in itemdict.items():\n table.write(row_num, 0, k)\n table.write(row_num, 1, str(v[0]))\n table.write(row_num, 2, str(v[1]))\n table.write(row_num, 3, str(v[2]))\n table.write(row_num, 4, str(v[3]))\n table.write(row_num, 5, str(v[4]))\n row_num += 1\n excel_init_file.save(basepath + auctionname + '\\\\' + 'auction.xls')\n\ndef download_img(item_dict):\n for k, v in item_dict.items():\n imgnum = 0\n print('item_dict', v[5])\n if v[5]:\n for imgurl in v[5]:\n if imgnum == 0:\n imgreq = requests.get(imgurl, verify=False)\n if imgreq.status_code < 400:\n with open(basepath + auctionname + '\\\\' + str(k) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n else:\n imgreq = requests.get(imgurl, verify=False)\n if imgreq.status_code < 400:\n with open(basepath + auctionname + '\\\\' + str(k) + '_' + str(imgnum) + '.jpg', 'wb') as f:\n f.write(imgreq.content)\n imgnum += 1\n\n\nbasepath = 'C:\\\\auctions\\\\catawiki\\\\'\nauctionname = '179995-exclusive-asian-art-object-auction'\ncreate_folder()\n\n\nurl = 'https://www.catawiki.com/a/179995-exclusive-asian-art-object-auction'\njson_url = 'https://www.catawiki.com/buyer/api/v1/lots/live?ids=24350827%2C24279059%2C24294193%2C24303379%2C24205195%2C24283417%2C24296847%2C24232591%2C24304837%2C24310437%2C23235533%2C24218021%2C24296399%2C24158205%2C24218077%2C24272347%2C24075667%2C24213779%2C24273951%2C24195181%2C24249779%2C24273905%2C24111451%2C24246059%2C24321771%2C24269679%2C24146239%2C24225199%2C22299979%2C24196993%2C24088467%2C24349577%2C24251971%2C24297737%2C24243923%2C24249029%2C24245807%2C24345053%2C24265961%2C24358305%2C24150977%2C24272363%2C24305591%2C24271353%2C24227277%2C24317425%2C24313973%2C24040563%2C24294017%2C24297273%2C24317571%2C24220931%2C24345323%2C24223677%2C24117147%2C24349691%2C24284955%2C24183199%2C24235011%2C24328983%2C24319497%2C24274057%2C24259805%2C24302093&tdsourcetag=s_pctim_aiomsg'\njson_req = requests.get(json_url)\nlots_json = json.loads(json_req.text)['lots']\n\nitem_dict = {}\n\nreq = requests.get(url)\n# print(req.text)\nsoup = BeautifulSoup(req.text,'html5lib')\n# print(soup)\nitems_div = soup.select_one('div.be-lot-list__loader.disabled')\nif items_div:\n data = items_div.get('data-props')\n # print(data)\n\n json_data = json.loads(data)\n for item in json_data['results']:\n # print(item)\n #编号\n lotno = item['id']\n print(lotno)\n #标题\n title = item['title']\n print(title)\n\n #拍品链接\n item_url = item['url']\n #请求拍品内页\n req = requests.get(item_url)\n soup = BeautifulSoup(req.text, 'html5lib')\n # print(soup)\n #描述\n des = soup.select_one('#cw-lot-description table.cw-spacious')\n if des:\n des = des.text.replace('\\n',' ').strip()\n else:\n des = ''\n print(des)\n #起拍价\n for lot in lots_json:\n # print(lot)\n lotid = lot['id']\n # print('lotno',lotno)\n # print('lotid',lotid)\n if lotno == lotid:\n startprice = lot['current_bid_amount']['USD']\n break\n else:\n startprice=0\n print(startprice)\n #估价\n est = soup.select_one('span.cw-currency-amount.cw-currency-amount-usd')\n if est:\n est = re.sub('\\s','',est.text).replace(',','')\n len_est = len(est.split('-'))\n if len_est > 1:\n est_low = est.split('-')[0]\n est_high = est.split('-')[1]\n else:\n est_low = est.split('-')[0]\n est_high = est_low\n else:\n est_low = ''\n est_high = ''\n print(est_low)\n print(est_high)\n #图片\n img_url_list = []\n image_a_list = soup.select('div.cw-lot-images-thumbs.cw-lazy_images_collection a')\n if image_a_list:\n for image_a in image_a_list:\n img_url = image_a.get('href')\n img_url_list.append(img_url)\n print(img_url_list)\n item_dict[lotno] = [title,des,startprice,est_low,est_high,img_url_list]\n\n\n\nsave_excel(item_dict)\ndownload_img(item_dict)\n", "sub_path": "tools/Crawler/爬虫www.catawiki.com.py", "file_name": "爬虫www.catawiki.com.py", "file_ext": "py", "file_size_in_byte": 5303, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 23, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 47, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 53, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 67, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 68, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 94, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 95, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 119, "usage_type": "call"}]} +{"seq_id": "648358756", "text": "\"\"\"Tests for transform_columns.\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas.testing import assert_frame_equal\n\n\n@pytest.mark.functions\ndef test_transform_columns(dataframe):\n \"\"\"Checks in-place transformation of multiple columns is as expected.\"\"\"\n df = (\n dataframe.add_column(\"another\", 10)\n .add_column(\"column\", 100)\n .transform_columns([\"another\", \"column\"], np.log10)\n )\n expected = pd.DataFrame(\n {\"another\": np.ones(len(df)), \"column\": np.ones(len(df)) * 2}\n )\n assert_frame_equal(df[[\"another\", \"column\"]], expected)\n\n\n@pytest.mark.functions\ndef test_transform_column_with_suffix(dataframe):\n \"\"\"Checks `suffix` creates new columns as expected.\"\"\"\n df = (\n dataframe.add_column(\"another\", 10)\n .add_column(\"column\", 100)\n .transform_columns([\"another\", \"column\"], np.log10, suffix=\"_log\")\n )\n\n assert \"another_log\" in df.columns\n assert \"column_log\" in df.columns\n assert \"another\" in df.columns\n assert \"column\" in df.columns\n\n\n@pytest.mark.functions\ndef test_transform_column_with_new_names(dataframe):\n \"\"\"Checks `new_column_names` creates new columns as expected.\"\"\"\n df = (\n dataframe.add_column(\"another\", 10)\n .add_column(\"column\", 100)\n .transform_columns(\n [\"another\", \"column\"],\n np.log10,\n new_column_names={\"another\": \"hello\", \"column\": \"world\"},\n )\n )\n\n assert \"hello\" in df.columns\n assert \"world\" in df.columns\n assert \"another\" in df.columns\n assert \"column\" in df.columns\n\n\n@pytest.mark.functions\ndef test_transform_column_with_incomplete_new_names(dataframe):\n \"\"\"Use of `new_column_names` with additional columns (not in `column_names`\n should passthrough silently. Related to bug #1063.\n \"\"\"\n df = (\n dataframe.add_column(\"another\", 10)\n .add_column(\"column\", 100)\n .transform_columns(\n [\"another\", \"column\"],\n np.log10,\n new_column_names={\n \"another\": \"hello\",\n \"fakecol\": \"world\",\n },\n )\n )\n\n assert \"another\" in df.columns\n assert \"column\" in df.columns\n assert \"hello\" in df.columns\n assert \"world\" not in df.columns\n\n\n@pytest.mark.functions\ndef test_suffix_newname_validation(dataframe):\n \"\"\"Check ValueError is raised when both suffix and new_column_names are\n provided.\"\"\"\n with pytest.raises(\n ValueError,\n match=\"Only one of `suffix` or `new_column_names` should be specified\",\n ):\n _ = (\n dataframe.add_column(\"another\", 10)\n .add_column(\"column\", 100)\n .transform_columns(\n [\"another\", \"column\"],\n np.log10,\n new_column_names={\"another\": \"hello\", \"column\": \"world\"},\n suffix=\"_log\",\n )\n )\n", "sub_path": "tests/functions/test_transform_columns.py", "file_name": "test_transform_columns.py", "file_ext": "py", "file_size_in_byte": 2896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.log10", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 19, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.log10", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 80, "usage_type": "attribute"}]} +{"seq_id": "162035383", "text": "from anki_sentence_maker.bases import ScrapeDataSource\nfrom anki_sentence_maker.headers import headers\nfrom bs4 import BeautifulSoup\nfrom exceptions import IncorrectlyTypedException\nfrom type import Data\nfrom utils import get_phonetic_notation_from_list, get_word_separated_by_delimiter\n\nimport os\nimport requests\n\nclass Oxford(ScrapeDataSource):\n def scrape(self):\n '''Scrape the oxford dictionary'''\n word_separated_by_hyphen = get_word_separated_by_delimiter(self.word, '-')\n\n response = requests.get(\n f'{os.getenv(\"OXFORD_URL\")}{word_separated_by_hyphen}',\n headers=headers\n )\n\n if 'Word not found in the dictionary' in response.text:\n raise IncorrectlyTypedException(Oxford.get_classname(), word_separated_by_hyphen)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n title = soup.find('h1', attrs={'class': 'headword'})\n phonetic_notation = soup.find('span', attrs={'class': 'phon'})\n definitions = soup.find_all('span', class_='def')\n examples = soup.select('ul.examples > li > span.x')\n\n name = title.text if title else ''\n phonetic_notation = phonetic_notation.text if phonetic_notation else ''\n\n if not phonetic_notation:\n word_to_list = self.word.split()\n phonetic_notation = get_phonetic_notation_from_list(word_to_list)\n\n definitions = [d.text.strip() for d in definitions]\n examples = [e.text.strip().capitalize() for e in examples]\n\n return Data(\n name=name,\n phonetic_notation=phonetic_notation,\n definitions=definitions,\n examples=examples,\n )\n\n def retrieve(self):\n return self.scrape()\n", "sub_path": "anki_sentence_maker/datasources/oxford.py", "file_name": "oxford.py", "file_ext": "py", "file_size_in_byte": 1741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "anki_sentence_maker.bases.ScrapeDataSource", "line_number": 11, "usage_type": "name"}, {"api_name": "utils.get_word_separated_by_delimiter", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 17, "usage_type": "call"}, {"api_name": "anki_sentence_maker.headers.headers", "line_number": 18, "usage_type": "name"}, {"api_name": "exceptions.IncorrectlyTypedException", "line_number": 22, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 24, "usage_type": "call"}, {"api_name": "utils.get_phonetic_notation_from_list", "line_number": 36, "usage_type": "call"}, {"api_name": "type.Data", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "541684943", "text": "from glob import glob\nimport shutil\nimport re\nimport os\n\nwhitespace = re.compile(r'\\s+')\n\nimport lxml.etree\n\n# class legend:\n# - inside: 0\n# - beginning of new sentence (and, thus, also beginning of word): 1\n# - beginning of new token: 2\n\ndata_dir = 'data/eltec'\ngt_dir = 'data/eltec_gt'\n\ntry:\n shutil.rmtree(gt_dir)\nexcept FileNotFoundError:\n pass\nos.mkdir(gt_dir)\n\nlanguages = sorted(set(os.listdir(f'{data_dir}/orig/')))\n#languages = ['eng']\n\nfor language in languages:\n os.mkdir(f'{gt_dir}/{language}')\n\n for fn in sorted(glob(f'{data_dir}/orig/{language}/*.xml')):\n orig_p = {}\n try:\n orig_tree = lxml.etree.parse(fn)\n except lxml.etree.XMLSyntaxError:\n print(f'invalid XML in: {fn}')\n continue\n for p in orig_tree.iterfind('//p'):\n n = p.attrib['n']\n t = ''.join(p.itertext())\n t = ' '.join(t.split())\n orig_p[n] = t\n\n segm_fn = f'{data_dir}/tokenized/{language}/xml/{os.path.basename(fn)}'\n try:\n segm_tree = lxml.etree.parse(segm_fn)\n except lxml.etree.XMLSyntaxError:\n print(f'invalid XML in: {fn}')\n\n\n all_chars, all_labels = [], []\n \n for p in segm_tree.iterfind('//p'):\n try:\n n = p.attrib['n']\n if n not in orig_p:\n print(f'Incompatible XML identifiers in: {fn}')\n continue\n \n segm_labels = []\n for s_idx, s in enumerate(p.iterfind('s')):\n if not len(s):\n continue\n segm_labels.append('')\n for w_idx, w in enumerate(s.iterfind('w')):\n if w_idx != 0:\n segm_labels.append('')\n chars = ''.join(w.itertext()).replace(' ', '')\n segm_labels.extend(chars)\n\n text, labels = list(orig_p[n]), []\n \n for char_idx, char in enumerate(text):\n if char == ' ':\n labels.append(0)\n elif segm_labels[0] == char:\n segm_labels.pop(0)\n labels.append(0)\n elif segm_labels[0] in ('', ''):\n if segm_labels[0] == '':\n labels.append(1)\n elif segm_labels[0] == '':\n labels.append(2)\n while segm_labels[0] in ('', ''):\n segm_labels.pop(0)\n segm_labels.pop(0)\n except IndexError:\n continue\n\n # mark paragraph breaks with spaces:\n text += ' '\n labels.append(0)\n\n if len(text) == len(labels):\n all_chars.extend(text)\n all_labels.extend(labels)\n else:\n print(f'-> issue parsing #{n} in {fn}')\n #for c, l in zip(text, labels):\n # print(c, ' ', l)\n #print(len(text), len(labels))\n \n new_fn = f'{gt_dir}/{language}/{os.path.basename(fn)}'.replace('.xml', '.tsv')\n with open(new_fn, 'w') as f:\n for c, l in zip(all_chars, all_labels):\n f.write('\\t'.join((c, str(l)))+'\\n')\n", "sub_path": "make_ground_truth.py", "file_name": "make_ground_truth.py", "file_ext": "py", "file_size_in_byte": 3432, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.compile", "line_number": 6, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 19, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 22, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.mkdir", "line_number": 28, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 30, "usage_type": "call"}, {"api_name": "lxml.etree.etree.parse", "line_number": 33, "usage_type": "call"}, {"api_name": "lxml.etree.etree", "line_number": 33, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 33, "usage_type": "name"}, {"api_name": "lxml.etree.etree", "line_number": 34, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 34, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "lxml.etree.etree.parse", "line_number": 45, "usage_type": "call"}, {"api_name": "lxml.etree.etree", "line_number": 45, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 45, "usage_type": "name"}, {"api_name": "lxml.etree.etree", "line_number": 46, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 46, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}]} +{"seq_id": "280195989", "text": "\"\"\" This is undocumented \"\"\"\nfrom django.shortcuts import render\nfrom django.template import loader\n\n# from django.http import HttpResponse\n# from django.http import Http404\nfrom .models import Professor, Semester_Class\n\n\n# Create your views here.\ndef index(request):\n \"\"\" This is undocumented \"\"\"\n professors = Professor.objects.all()\n classes = Semester_Class.objects.all()\n\n # pylint: disable=unused-variable\n template = loader.get_template(\"gatorgrouper/index.html\") # noqa: F841\n\n return render(\n request,\n \"gatorgrouper/index.html\",\n {\"all_professors\": professors, \"all_classes\": classes},\n )\n", "sub_path": "gatorgrouper/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 642, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "models.Professor.objects.all", "line_number": 13, "usage_type": "call"}, {"api_name": "models.Professor.objects", "line_number": 13, "usage_type": "attribute"}, {"api_name": "models.Professor", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Semester_Class.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Semester_Class.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Semester_Class", "line_number": 14, "usage_type": "name"}, {"api_name": "django.template.loader.get_template", "line_number": 17, "usage_type": "call"}, {"api_name": "django.template.loader", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "41885780", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom urllib.parse import urljoin\nfrom ..items import WeiboItem\n\n\nclass WeiboSpiderSpider(scrapy.Spider):\n name = 'meijiequan_weibo_spider'\n allowed_domains = ['www.meijiequan.com']\n start_urls = ['https://www.meijiequan.com/weibo.html']\n\n already_list = \"weibo_already_list.txt\"\n base_url = \"https://www.meijiequan.com\"\n\n custom_settings = {\n \"ITEM_PIPELINES\": {\n 'meijiequan_website.pipelines.WeiboSaveToMongodbPipeline': 301,\n }\n }\n\n def start_requests(self):\n yield scrapy.Request(url=self.start_urls[0], callback=self.get_url)\n\n # 获取个人主页链接\n def get_url(self, response):\n data_list = response.xpath('//*[@class=\"par\"]')\n for data in data_list:\n item = WeiboItem()\n item['nick_name'] = data.xpath('.//*[@class=\"title\"]/span/text()').extract()[0]\n item['kol_url'] = data.xpath('.//*[@class=\"tiaoDetail\"]/a[1]/@href').extract()[0]\n item['head_img_url'] = data.xpath('.//*[@class=\"wx_img fl\"]/img/@src').extract()[0]\n part_url = data.xpath('.//*[@class=\"tiaoDetail\"]/a[2]/@href').extract()[0]\n url = urljoin(self.base_url, part_url)\n item['collection_url'] = url\n item['service_type_id_1'] = '转发'\n item['price_1'] = data.xpath('.//*[@class=\"text_red text_item\"][2]/span[1]/text()').extract()[0].strip().strip('¥')\n item['service_type_id_2'] = '直发'\n item['price_2'] = data.xpath('.//*[@class=\"text_red text_item\"][3]/span[1]/text()').extract()[0].strip().strip('¥')\n item['service_type_id_3'] = '微任务转发'\n item['price_3'] = data.xpath('.//*[@class=\"text_red text_item\"][2]/span[2]/text()').extract()[0].strip().strip('¥')\n item['service_type_id_4'] = '微任务直发'\n item['price_4'] = data.xpath('.//*[@class=\"text_red text_item\"][3]/span[2]/text()').extract()[0].strip().strip('¥')\n item['fans_nums'] = data.xpath('.//td[5]/text()').extract()[0].strip()\n item['total_star_nums'] = data.xpath('.//td[6]/text()').extract()[0].strip()\n\n # 去重\n with open(self.already_list, 'r') as f:\n already_list = set(f.readlines())\n if url+'\\n' not in already_list:\n yield scrapy.Request(url=url, meta={'item':item}, callback=self.parse_page)\n else:\n print('url已抓取')\n # 下一页\n next_page_list = response.xpath(\".//*[@class='pagination']/li/a/@href\").extract()\n for next_page in next_page_list:\n yield scrapy.Request(url=urljoin(self.base_url, next_page), callback=self.get_url)\n\n # 解析个人详情\n def parse_page(self, response):\n item = response.meta['item']\n item['platform_id'] = '微博'\n try:\n item['huxiao_type_id'] = response.xpath('//*[@class=\"classfiy-item-con\"][1]/span[2]/text()').extract()[0].strip()\n except:\n item['huxiao_type_id'] = ''\n item['website_id'] = '媒介圈'\n item['total_post_nums'] = response.xpath('//*[@class=\"classfiy\"]//tr[2]/td[3]/text()').extract()[0].strip()\n item['following_follower_nums'] = response.xpath('//*[@class=\"mwrapper-content-group\"][2]//*[@class=\"mwt-info-tab clearfix\"]/li[3]/span/text()').extract()[0].strip()\n\n # 保存已经抓取过的url\n with open(self.already_list, \"a+\")as f:\n f.writelines(response.url+\"\\n\")\n yield item\n\n", "sub_path": "kol_platform/meijiequan_website/meijiequan_website/spiders/weibo_spider.py", "file_name": "weibo_spider.py", "file_ext": "py", "file_size_in_byte": 3553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scrapy.Spider", "line_number": 7, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 22, "usage_type": "call"}, {"api_name": "items.WeiboItem", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 33, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 50, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "629706438", "text": "import config\nimport requests\nimport re\nimport urllib\n\nbot = config.bot\nbot_username = config.bot_username\n\n\ndef treatTitle(title):\n title = title.replace(\"_\", \" \")\n title = title.replace(\"[\", \"(\")\n title = title.replace(\"]\", \")\")\n title = title.replace(\"(\", \"(\")\n title = title.replace(\")\", \")\")\n return title\n\n\ndef reddit(msg):\n if msg.get('text'):\n if msg['text'].startswith('/r ') or msg['text'].startswith('!r '):\n sub = msg['text'][3:]\n if sub:\n sub = re.findall(r'\\S*', sub)\n sub = \"r/\" + sub[0] if sub[0:2] != \"r/\" else sub[0]\n url = \"http://www.reddit.com/\" + sub + \"/.json?limit=6\"\n subreddit = \"http://www.reddit.com/\" + sub\n request = requests.get(url, headers={'User-agent': 'testscript by /u/fakebot3'})\n data = request.json()\n posts = \"\"\n if request.status_code == 200:\n for post in data['data']['children']:\n domain = post['data']['domain']\n title = treatTitle(post['data']['title'])\n pUrl = urllib.parse.quote_plus(post['data']['url'])\n isNsfw_bool = post['data']['over_18']\n permalink = \"http://www.reddit.com\" + post['data']['permalink']\n if isNsfw_bool:\n isNsfw = \"nsfw\"\n else:\n isNsfw = \"sfw\"\n post = u\"`> `[{title}]({pUrl})` <{nsfw}> - `[comments]({permalink})\\n\".format(title=title,\n permalink=permalink,\n nsfw=isNsfw,\n pUrl=pUrl,\n domain=domain)\n posts += post\n if posts:\n bot.sendMessage(msg['chat']['id'],\n u\"[{sub}]({subreddit})`:`\\n\\n\".format(sub=sub, subreddit=subreddit) + posts,\n reply_to_message_id=msg['message_id'], parse_mode=\"Markdown\",\n disable_web_page_preview=True)\n else:\n bot.sendMessage(msg['chat']['id'], u\"`I couldnt find {sub}, please try again`\".format(sub=sub),\n reply_to_message_id=msg['message_id'], parse_mode=\"Markdown\",\n disable_web_page_preview=True)\n elif request.status_code == 403:\n bot.sendMessage(msg['chat']['id'], \"`Subreddit not found, please verify your input.`\",\n reply_to_message_id=msg['message_id'], parse_mode=\"Markdown\")\n else:\n bot.sendMessage(msg['chat']['id'],\n \"`There has been an error, the number {error} to be specific.`\".format(\n error=request.status_code), reply_to_message_id=msg['message_id'],\n parse_mode=\"Markdown\")\n else:\n bot.sendMessage(msg['chat']['id'],\n \"`Follow this command with the name of a subreddit to see the top 6 posts.\\nExample: /r Awww`\",\n reply_to_message_id=msg['message_id'], parse_mode=\"Markdown\")\n", "sub_path": "plugins/reddit.py", "file_name": "reddit.py", "file_ext": "py", "file_size_in_byte": 3733, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "config.bot", "line_number": 6, "usage_type": "attribute"}, {"api_name": "config.bot_username", "line_number": 7, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 28, "usage_type": "call"}, {"api_name": "urllib.parse.quote_plus", "line_number": 35, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "375048542", "text": "import csv\r\nfrom datetime import datetime,timedelta\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n#result: get 316 medicines\r\ndef read_wholeyizhu(f_in='wholeyizhu_check.csv',f_out='medicine_list.dat',col=2):\r\n csv_reader =csv.reader(open(f_in))\r\n medicine =[]\r\n for i in csv_reader:\r\n if csv_reader.line_num ==1:\r\n continue\r\n medicine_temp = i[col]\r\n if medicine_temp in medicine:\r\n continue\r\n else:\r\n medicine.append(medicine_temp)\r\n medicine_list =open(f_out,'w')\r\n # medicine_list.write(\"[none]none\\n\")\r\n for k in range(0,len(medicine)):\r\n medicine_list.write(medicine[k]+'\\n')\r\n medicine_list.close()\r\n\r\ndef tiwen_file_to_list(f_temp='wholetiwen.csv'):\r\n read_temp =csv.reader(open(f_temp))\r\n tiwen_list =[]\r\n for i in read_temp:\r\n temp_list=[]\r\n temp_list.append(i[0])\r\n temp_list.append(i[1])\r\n temp_list.append(i[2])\r\n tiwen_list.append(temp_list)\r\n return tiwen_list\r\n\r\ndef yizhu_file_to_list(f_medicine='wholeyizhu_check.csv'):\r\n read_medicine =csv.reader(open(f_medicine))\r\n medicine_list=[]\r\n for i in read_medicine:\r\n if read_medicine.line_num ==1:\r\n continue\r\n temp_list=[]\r\n temp_list.append(i[0])\r\n temp_list.append(i[2])\r\n temp_list.append(i[3])\r\n temp_list.append(i[4])\r\n temp_list.append(i[5])\r\n medicine_list.append(temp_list)\r\n return medicine_list\r\n\r\ndef read_medicine_list(f_in='medicine_list.dat'):\r\n f=open(f_in)\r\n medicine_name_list=[]\r\n for i,line in enumerate(f):\r\n medicine_name = line.strip()\r\n medicine_name_list.append(medicine_name)\r\n return medicine_name_list\r\n\r\n\r\ndef function_a():\r\n temp_list =tiwen_file_to_list()\r\n medicine_list =yizhu_file_to_list()\r\n medicine_name_list = read_medicine_list()\r\n # print(medicine_list[0][3])\r\n\r\n for i in range(0,len(temp_list)):\r\n #get temp time\r\n temp_time = datetime.strptime(temp_list[i][2], \"%Y/%m/%d %H:%M\")\r\n for j in range(0,len(medicine_list)):\r\n if medicine_list[j][0]!=temp_list[i][0]:\r\n continue\r\n #get medicine_begin_time\r\n medicine_begin_time =datetime.strptime(medicine_list[j][2],\"%Y/%m/%d %H:%M\")\r\n\r\n #get medicine_end_time\r\n if medicine_list[j][3]!='':\r\n medicine_end_time =datetime.strptime(medicine_list[j][3],\"%Y/%m/%d %H:%M\")\r\n else:\r\n medicine_end_time=medicine_begin_time\r\n\r\n #get frequence\r\n gap_sequence =medicine_list[j][4]\r\n if gap_sequence=='每天1次'or '每晚1次':\r\n gap_time=24\r\n elif gap_sequence=='每天2次' or '每12小时1次' or '每12小时一次':\r\n gap_time=12\r\n elif gap_sequence=='每8小时1次'or'每天3次' or '每8小时一次':\r\n gap_time=8\r\n elif gap_sequence=='每6小时1次'or'每6小时一次'or'每天4次':\r\n gap_time=6\r\n elif gap_sequence=='每5小时1次':\r\n gap_time=5\r\n elif gap_sequence=='每4小时1次'or'每8小时2次':\r\n gap_time=4\r\n elif gap_sequence=='每3小时一次':\r\n gap_time=3\r\n elif gap_sequence=='每2小时1次':\r\n gap_time=2\r\n elif gap_sequence=='立即':\r\n gap_time=1\r\n elif gap_sequence=='每周2次':\r\n gap_time=84\r\n elif gap_sequence=='间隔'or'每隔1天':\r\n gap_time=48\r\n elif gap_sequence=='每周1次':\r\n gap_time=168\r\n\r\n while medicine_begin_time<=medicine_end_time:\r\n # if i == 0 or temp_list[i - 1][0] != temp_list[i][0]:\r\n # if medicine_begin_time < temp_time and medicine_name_list.index(medicine_list[j][1]) + 1 not in temp_list[i]:\r\n # temp_list[i].append(medicine_name_list.index(medicine_list[j][1]) + 1)\r\n # else:\r\n if temp_time-timedelta(minutes=60) <= medicine_begin_time <= temp_time:\r\n if medicine_name_list.index(medicine_list[j][1]) + 1 not in temp_list[i]:\r\n temp_list[i].append(medicine_name_list.index(medicine_list[j][1]) + 1)\r\n medicine_begin_time=medicine_begin_time+timedelta(hours=gap_time)\r\n print(temp_list[i])\r\n\r\n\r\ndef function_b():\r\n temp_list = tiwen_file_to_list()\r\n medicine_list = yizhu_file_to_list()\r\n medicine_name_list = read_medicine_list()\r\n\r\n for i in range(0,len(medicine_list)):\r\n # for i in range(0,50):\r\n if i%1000==0:\r\n print(i)\r\n\r\n # get medicine_begin_time\r\n medicine_begin_time = datetime.strptime(medicine_list[i][2], \"%Y/%m/%d %H:%M\")\r\n\r\n # get medicine_end_time\r\n if medicine_list[i][3] != '':\r\n medicine_end_time = datetime.strptime(medicine_list[i][3], \"%Y/%m/%d %H:%M\")\r\n else:\r\n medicine_end_time = medicine_begin_time\r\n\r\n # get frequence\r\n gap_sequence = medicine_list[i][4]\r\n if gap_sequence == '每天1次' or '每晚1次':\r\n gap_time = 24\r\n elif gap_sequence == '每天2次' or '每12小时1次' or '每12小时一次':\r\n gap_time = 12\r\n elif gap_sequence == '每8小时1次' or '每天3次' or '每8小时一次':\r\n gap_time = 8\r\n elif gap_sequence == '每6小时1次' or '每6小时一次' or '每天4次':\r\n gap_time = 6\r\n elif gap_sequence == '每5小时1次':\r\n gap_time = 5\r\n elif gap_sequence == '每4小时1次' or '每8小时2次':\r\n gap_time = 4\r\n elif gap_sequence == '每3小时一次':\r\n gap_time = 3\r\n elif gap_sequence == '每2小时1次':\r\n gap_time = 2\r\n elif gap_sequence == '立即':\r\n gap_time = 1\r\n elif gap_sequence == '每周2次':\r\n gap_time = 84\r\n elif gap_sequence == '间隔' or '每隔1天':\r\n gap_time = 48\r\n elif gap_sequence == '每周1次':\r\n gap_time = 168\r\n\r\n while medicine_begin_time <= medicine_end_time:\r\n for j in range(0, len(temp_list)):\r\n if medicine_list[i][0] != temp_list[j][0]:\r\n continue\r\n temp_time = datetime.strptime(temp_list[j][2], \"%Y/%m/%d %H:%M\")\r\n if temp_time-timedelta(minutes=180) <= medicine_begin_time <= temp_time :\r\n if medicine_name_list.index(medicine_list[i][1]) + 1 not in temp_list[j]:\r\n temp_list[j].append(medicine_name_list.index(medicine_list[i][1]) + 1)\r\n break\r\n medicine_begin_time=medicine_begin_time+timedelta(hours=gap_time)\r\n\r\n f_out=open('tiwen_yongyao_infor.dat','w')\r\n for i in range(0, len(temp_list)):\r\n for k in range(0,len(temp_list[i])):\r\n f_out.write('%s '%str(temp_list[i][k]))\r\n f_out.write('\\n')\r\n f_out.close()\r\n\r\n\r\ndef period_time():\r\n temp_list=tiwen_file_to_list()\r\n f_out=open('tiwen_period_time.dat','w')\r\n for i in range(0,len(temp_list)-1):\r\n if temp_list[i][0]==temp_list[i+1][0]:\r\n temp_time = datetime.strptime(temp_list[i][2], \"%Y/%m/%d %H:%M\")\r\n temp_time2 =datetime.strptime(temp_list[i+1][2],\"%Y/%m/%d %H:%M\")\r\n f_out.write(\"%s\\n\"%(temp_time2-temp_time))\r\n\r\n\r\ndef analyse_period_time(f_in='tiwen_period_time.dat'):\r\n f_in=open(f_in)\r\n array =np.zeros((24))\r\n for i,lines in enumerate(f_in):\r\n line =lines.strip()\r\n try:\r\n temp_time = datetime.strptime(line, \"%H:%M:%S\")\r\n except ValueError:\r\n continue\r\n\r\n array[temp_time.hour]+=1\r\n x =range(0,24)\r\n y =sum(array)\r\n # for i in range(1,24):\r\n # array[i]+=array[i-1]\r\n\r\n print(array)\r\n print(y)\r\n plt.plot(x,array[x]/y)\r\n plt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n print(\"main\")\r\n # function_a()\r\n function_b()\r\n # read_wholeyizhu()\r\n # read_wholeyizhu(f_out='medicine_frequent.dat',col =5)\r\n", "sub_path": "Medicine v2/medicine_process.py", "file_name": "medicine_process.py", "file_ext": "py", "file_size_in_byte": 8233, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "csv.reader", "line_number": 7, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 24, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 35, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 71, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 168, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 168, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 169, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 188, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 188, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 189, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 189, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 199, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}]} +{"seq_id": "142842743", "text": "# Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)\n# \n# MIT License (MIT)\n# \n# Permission is hereby granted, free of charge, to any person obtaining a \n# copy of this software and associated documentation files (the \"Software\"), \n# to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, \n# and/or sell copies of the Software, and to permit persons to whom the \n# Software is furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in \n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, \n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A \n# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF \n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \n# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# \n\n\"\"\"\nGenerate index.cgi html\n\nLoads index.html from the template with:\n\n1. bootinfo\n2. static html of home / if _escaped_fragment_ is given\n3. top menus and bottom menus\n\n\"\"\"\n\nimport webnotes\n\nbody_html = \"\"\"\n
\n
\n\tLoading...\n
\n\n
\n
\n
\n %s\n
\n
\n
\n\"\"\"\n\ndef get():\n\t\"\"\"get index html\"\"\"\n\timport webnotes\n\tfrom jinja2 import Template\n\t\n\twith open('lib/conf/index.html', 'r') as f:\n\t\ttemplate = Template(f.read())\n\n\t# google crawler\n\tif '_escaped_fragment_' in webnotes.form:\n\t\tpage = webnotes.form_dict['_escaped_fragment_']\n\t\tif not page:\n\t\t\tpage = webnotes.user.get_home_page()\n\t\t\t\n\t\treturn template.render(bootinfo = '', style_tag='', version='0', analytics_code = '',\\\n\t\t\tscript_tag = '', body_html=html_snapshot(page), ajax_meta_tag = '')\n\t\n\t# home page\n\telse:\n\t\timport webnotes.session_cache\n\t\tfrom build.project import get_version\n\t\timport json\n\n\t\tbootdict = webnotes.session_cache.get()\n\t\tbootinfo = \"\"\"var wn = {}; wn.boot = %s;\"\"\" % json.dumps(bootdict)\n\n\t\tif webnotes.session['user'] == 'Guest':\n\t\t\tscript_tag = ''\n\t\t\tstyle_tag = ''\n\t\telse:\n\t\t\tscript_tag = ''\n\t\t\tstyle_tag = ''\n\n\t\treturn template.render(bootinfo = bootinfo, version = get_version(),\n\t\t\tscript_tag = script_tag, style_tag = style_tag, body_html=body_html % '',\n\t\t\tajax_meta_tag = '', \n\t\t\tanalytics_code = bootdict.get('analytics_code', '') or '')\n\t\t\t\ndef html_snapshot(page):\n\t\"\"\"get html snapshot for search bot\"\"\"\n\tfrom webnotes.widgets.page import get_page_html\t\n\tfrom webnotes.model.doc import Document\n\n\tdoc = Document('Website Settings', 'Website Settings')\n\tdoc.content = get_page_html(page)\n\tdoc.header_menu = doc.footer_menu = ''\n\tdoc.page_name = page\n\t\n\tfor m in webnotes.conn.sql(\"\"\"select parentfield, label, url, custom_page\n\t\tfrom `tabTop Bar Item` where parent='Top Bar Settings' order by idx\"\"\", as_dict=1):\n\t\n\t\tm['std_page'] = m.get('url') or m('custom_page')\n\n\t\tif m['parentfield']=='top_bar_items':\t\t\t\t\n\t\t\tdoc.header_menu += '
  • %(label)s
  • ' % m\n\t\telse:\n\t\t\tdoc.footer_menu += '
  • %(label)s
  • ' % m\n\t\n\treturn \"\"\"\n\t
    \n\t\t

    %(brand_html)s

    \n\t\t
      \n\t\t\t%(header_menu)s\n\t\t
    \n\t
    \n\t%(content)s\n\t
    \n\t\t
      \n\t\t\t%(footer_menu)s\n\t\t
    \n\t\t
    Address: %(address)s
    \n\t\t
    © %(copyright)s
    \n\t\t
    Powered by erpnext.com
    \n\t\t
    \n\t\t\tThis page is for search engines, for standard browsers click \n\t\t\there\n\t\t
    \n\t
    \n\t\"\"\" % doc.fields\n\t\n\t\n", "sub_path": "py/webnotes/cms/index.py", "file_name": "index.py", "file_ext": "py", "file_size_in_byte": 4285, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "jinja2.Template", "line_number": 58, "usage_type": "call"}, {"api_name": "webnotes.form", "line_number": 61, "usage_type": "attribute"}, {"api_name": "webnotes.form_dict", "line_number": 62, "usage_type": "attribute"}, {"api_name": "webnotes.user.get_home_page", "line_number": 64, "usage_type": "call"}, {"api_name": "webnotes.user", "line_number": 64, "usage_type": "attribute"}, {"api_name": "webnotes.session_cache.get", "line_number": 75, "usage_type": "call"}, {"api_name": "webnotes.session_cache", "line_number": 75, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 76, "usage_type": "call"}, {"api_name": "webnotes.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "build.project.get_version", "line_number": 85, "usage_type": "call"}, {"api_name": "webnotes.model.doc.Document", "line_number": 95, "usage_type": "call"}, {"api_name": "webnotes.widgets.page.get_page_html", "line_number": 96, "usage_type": "call"}, {"api_name": "webnotes.conn.sql", "line_number": 100, "usage_type": "call"}, {"api_name": "webnotes.conn", "line_number": 100, "usage_type": "attribute"}]} +{"seq_id": "54523015", "text": "# -*- coding: utf-8 -*-\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass OptionDialog(QtWidgets.QDialog):\n def __init__(self, parent, name, options = None):\n super(QtWidgets.QDialog, self).__init__()\n self.setObjectName(name + \" Dialog\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.groupBox = QtWidgets.QGroupBox(self)\n self.groupBox.setObjectName(\"groupBox\")\n self.groupBox.setTitle(name)\n self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.options = QtWidgets.QComboBox(self.groupBox)\n self.options.setObjectName(\"options\")\n self.set_options(options)\n self.verticalLayout.addWidget(self.options)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.buttonBox = QtWidgets.QDialogButtonBox(self)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)\n self.buttonBox.setCenterButtons(True)\n self.buttonBox.setObjectName(\"buttonBox\")\n self.verticalLayout_2.addWidget(self.buttonBox)\n\n self.buttonBox.accepted.connect(self.accept)\n QtCore.QMetaObject.connectSlotsByName(self)\n \n def current_option(self):\n return self.options.currentText()\n \n def set_options(self, options):\n for option in options:\n self.options.addItem(option)\n self.options.setCurrentIndex(0)\n \n\n", "sub_path": "dataset_tools/src/ui/OptionDialog.py", "file_name": "OptionDialog.py", "file_ext": "py", "file_size_in_byte": 1641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 6, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialog", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 10, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 10, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QGroupBox", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 15, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 15, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QComboBox", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 23, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 23, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 24, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QDialogButtonBox", "line_number": 25, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 25, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QMetaObject.connectSlotsByName", "line_number": 31, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.QMetaObject", "line_number": 31, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "278991215", "text": "import json\n\nfrom GeradorDeAnuncios.CriadorDeAnunciosMagento.manipulateSku.jsonKitControl import jsonKitControl\n\n\nclass ControlSku:\n\n def __init__(self, json='', test=True):\n self.__jsonType = json.upper()\n self.__caminho = f'/home/bertho/Documents/AutParts/DadosUniversais/Json/dados_sap_kit/json{self.__jsonType}.json'\n self.__estadoTestando = test\n self._dicionarioAtual = self.__abreJsonRetornaDict()\n self._jsonKit = jsonKitControl(self.dictDeConsultaRapida())\n\n def __abreJsonRetornaDict(self):\n caminho = self.__caminho\n try:\n config = json.loads(open(caminho, encoding=\"utf8\").read())\n return config\n except FileNotFoundError:\n raise ValueError(\"Arquivo \" + str(caminho) + \" Não encontrado - STATUS - test=\" +\n str(self.__estadoTestando))\n\n def _skuModelWithValue(self, sku, value):\n return str(sku) + '-' + str((4 - len(str(value))) * '0') + str(value)\n\n def dictDeConsultaRapida(self):\n \"\"\"\n key = 'SKU' obs: sem traço\n return data {}\n \"\"\"\n dic = self._dicionarioAtual\n if dic is None:\n raise ValueError(\"Dicionario nao pode ser aberto\")\n fastDic = {}\n for test in dic:\n if test == {}:\n continue\n fastDic[str(test['id'])] = test['data']\n\n return fastDic\n\n def updateSkuValue(self, sku, add=1):\n sku_atual = ''\n value_atual = ''\n verificado = False\n if add <= 0:\n raise ValueError(f'Não se pode adicionar valores 0 ou menores, como {add} - arq - openSkuControl')\n dic = self._dicionarioAtual\n for di in dic:\n if di['id'] == sku:\n di['data']['value'] += add\n sku_atual = f'{sku}'\n value_atual = f'{di[\"data\"][\"value\"]}'\n verificado = True\n break\n if verificado:\n self._dicionarioAtual = dic\n return self._skuModelWithValue(sku_atual, value_atual)\n\n def createNewKit(self, linha):\n self._dicionarioAtual.append(linha)\n\n def saveJson(self, direto=False):\n senha = str(input('insira a senha para salvar json ' + self.__jsonType + ': \\n'))\n\n if self.__estadoTestando and not direto:\n print(f'Json {self.__colorir(self.__jsonType, cor=\"amarelo\")} não salvo')\n elif not self.__estadoTestando or direto:\n if senha == 'idkfa123':\n with open(self.__caminho, \"w\", encoding='utf8') as jsonFile:\n json.dump(self._dicionarioAtual, jsonFile, ensure_ascii=False)\n print(f'Json {self.__colorir(self.__jsonType, cor=\"azul\")} salvo com sucesso')\n else:\n print(f'Json {self.__colorir(self.__jsonType, cor=\"amarelo\")} '\n f'não salvo\\nTEST:' + str(self.__estadoTestando))\n else:\n print('sei lá qq deu')\n\n def getSkuKitFromSap(self, dic):\n skuLimpa = self._jsonKit.getSkuKitFromSap(dic)\n return skuLimpa\n\n def __colorir(self, texto, cor='amarelo'):\n if cor == 'amarelo':\n return f'\\033[33m{texto}\\033[0;0m'\n elif cor == 'azul':\n return f'\\033[34m{texto}\\033[0;0m'\n else:\n return texto\n\n# dum = ControlSku(json='sap')\n#\n# print('AUAB001: ', end='')\n# print(dum.dictDeConsultaRapida()['AUAB001']['value'])\n# dum.saveJson()\n", "sub_path": "GeradorDeAnuncios/CriadorDeAnunciosMagento/manipulateSku/ControlSku.py", "file_name": "ControlSku.py", "file_ext": "py", "file_size_in_byte": 3475, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "json.upper", "line_number": 9, "usage_type": "call"}, {"api_name": "GeradorDeAnuncios.CriadorDeAnunciosMagento.manipulateSku.jsonKitControl.jsonKitControl", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "636526318", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Dimitrios Paraschas\n# 1562\n# Dimitrios Greasidis\n# 1624\n# Stefanos Papanastasiou\n# 1608\n\n\nfrom __future__ import print_function\nimport sys\nimport logging\nimport json\nimport socket\nfrom tqdm.auto import tqdm\n\nclass TqdmLoggingHandler(logging.StreamHandler):\n def __init__(self, level=logging.NOTSET):\n super().__init__(level)\n\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record) \n\n\ndef sigint_handler(signal, frame):\n \"\"\"\n handle keyboard interrupts (CTRL-C)\n \"\"\"\n\n # cli_output\n print()\n logging.info(\"CTRL-C received, exiting\")\n sys.exit(0)\n\n\ndef send_message(connection, message):\n try:\n connection.sendall(message.encode(\"utf8\"))\n except socket.error:\n logging.error(\"error, send_message\")\n sys.exit(-1)\n\n logging.info(\"message sent: \" + message)\n\n\ndef json_load(json_file):\n with open(json_file, \"r\") as file_:\n json_ = json.load(file_)\n\n return json_\n\n\ndef json_save(json_file, json_):\n with open(json_file, \"w+\") as file_:\n json.dump(json_, file_, sort_keys=True, indent=4, separators=(\",\", \": \"))\n\n\nif __name__ == \"__main__\":\n print(\"This file is meant to be imported, not run.\")\n", "sub_path": "client6/library.py", "file_name": "library.py", "file_ext": "py", "file_size_in_byte": 1423, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.StreamHandler", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.NOTSET", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tqdm.auto.tqdm.write", "line_number": 26, "usage_type": "call"}, {"api_name": "tqdm.auto.tqdm", "line_number": 26, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 41, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 42, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 48, "usage_type": "attribute"}, {"api_name": "logging.error", "line_number": 49, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 52, "usage_type": "call"}, {"api_name": "json.load", "line_number": 57, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 64, "usage_type": "call"}]} +{"seq_id": "350038671", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.http import HttpResponseForbidden, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom kexie_game.views import change_count\nfrom kexie_game.models import User\nfrom battle.models import Level\nimport hashlib\nimport redis\nimport json\n\nTOKEN = \"laoganbunvzhuang\"\nr_belonging = redis.Redis(db=0) # OpenID-{item_id: count}\nr_expire = redis.Redis(db=2) # PortalID-LastMinerTime\n\n\n@csrf_exempt\ndef join(request):\n # 用户验证\n if request.method == \"GET\":\n return HttpResponseForbidden()\n data = json.loads(request.body.decode('utf-8'))\n openid = data.get(\"openid\")\n sign = data.get(\"sign\")\n nonce = data.get(\"nonce\")\n if sign != hashlib.md5(openid + TOKEN + nonce).hexdigest():\n return HttpResponseForbidden()\n\n # 进入战斗\n level = int(data.get(\"level\"))\n if r_belonging.exists(openid):\n items = [{\"id\": key, \"count\": value} for key, value in r_belonging.hgetall(openid).items()]\n else:\n items = []\n u = User.objects.get(openid=openid)\n if level > u.level + 1:\n return JsonResponse({\"status\": \"Error\", \"Error\": \"LevelWrong\"})\n return JsonResponse({\"status\": \"Success\", \"items\": items})\n\n\ndef end(request):\n # 用户验证\n if request.method == \"GET\":\n return HttpResponseForbidden()\n data = json.loads(request.body.decode('utf-8'))\n openid = data.get(\"openid\")\n sign = data.get(\"sign\")\n nonce = data.get(\"nonce\")\n if sign != hashlib.md5(openid + TOKEN + nonce).hexdigest():\n return HttpResponseForbidden()\n\n # 战斗结算\n level = data.get(\"level\")\n l = Level.objects.get(level=level)\n level_token = data.get(\"token\")\n if level_token != l.token:\n return JsonResponse({\"status\": \"Error\", \"Error\": \"LevelWrong\"})\n items = json.loads(l.items)\n for item_id, count in items.items():\n change_count(int(count), item_id, openid)\n return JsonResponse({\"status\": \"Success\", \"items\": [{\"id\": key, \"count\": value} for key, value in items.items()]})\n", "sub_path": "battle/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2084, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "redis.Redis", "line_number": 13, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 14, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 21, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 26, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 27, "usage_type": "call"}, {"api_name": "kexie_game.models.User.objects.get", "line_number": 35, "usage_type": "call"}, {"api_name": "kexie_game.models.User.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "kexie_game.models.User", "line_number": 35, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 37, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 38, "usage_type": "call"}, {"api_name": "django.views.decorators.csrf.csrf_exempt", "line_number": 17, "usage_type": "name"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 44, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 49, "usage_type": "call"}, {"api_name": "django.http.HttpResponseForbidden", "line_number": 50, "usage_type": "call"}, {"api_name": "battle.models.Level.objects.get", "line_number": 54, "usage_type": "call"}, {"api_name": "battle.models.Level.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "battle.models.Level", "line_number": 54, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 57, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 58, "usage_type": "call"}, {"api_name": "kexie_game.views.change_count", "line_number": 60, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "221700402", "text": "#\n# Questions 1 through 3.\n#\nimport requests\nimport requests_cache\nimport json\n\n# Install requests_cache with no expiration.\nrequests_cache.install_cache('requests_cache', expire_after=None)\n\n#\n# Question 3\n#\n\ndef get_movies_from_tastedive(movie):\n baseurl = \"https://tastedive.com/api/similar\"\n params_diction = {} # Set up an empty dictionary for query parameters\n params_diction[\"q\"] = movie\n # The type=movies is not consistent with the api docs.\n # It specifies singular, (i.e. type=movie)\n params_diction[\"type\"] = \"movies\"\n params_diction[\"limit\"] = 5 # get at most 5 results\n params_diction[\"k\"] = '350179-umichpyt-IXUEPCY9'\n resp = requests.get(baseurl, params=params_diction) \n return resp.json() \n\ndef extract_movie_titles(dict):\n movies = []\n for movie in dict['Similar']['Results']:\n movies.append(movie['Name'])\n return movies\n\ndef get_related_titles(movieList):\n relatedTitles = []\n for movie in movieList:\n m = get_movies_from_tastedive(movie)\n relatedTitleList = extract_movie_titles(m)\n for title in relatedTitleList:\n if title not in relatedTitles:\n relatedTitles.append(title) \n return relatedTitles\n\n# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages\n# extract_movie_titles(get_movies_from_tastedive(\"Tony Bennett\")) \n# extract_movie_titles(get_movies_from_tastedive(\"Black Panther\"))\n\n\n# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages\n# print(get_related_titles([\"Black Panther\"]))\n# print(get_related_titles([\"Black Panther\", \"Captain Marvel\"]))\n# get_related_titles([])\n\n# \n# Question 4\n#\ndef get_movie_data(title):\n#http://www.omdbapi.com/?apikey=68415c82&type=movie&t=John+Wick\n baseurl = \"http://www.omdbapi.com\"\n params_diction = {} # Set up an empty dictionary for query parameters\n params_diction['t'] = title\n params_diction['r'] = 'json'\n params_diction['apikey'] = '68415c82'\n resp = requests.get(baseurl, params=params_diction) \n return resp.json() \n\n \n# some invocations that we use in the automated tests; uncomment these if you are getting errors and want better error messages\n# print('\\nQuestion 4 -----------------------------------------------\\n')\n# print(get_movie_data(\"Black Panther\"))\n# get_movie_data(\"Baby Mama\")\n\n#\n# Question 5\n#\n# Write a function called get_movie_rating. It takes an OMDB dictionary result for \n# one movie and extracts the Rotten Tomatoes rating as an integer. For example, \n# if given the OMDB dictionary for “Black Panther”, it would return 97. If there \n# is no Rotten Tomatoes rating, return 0.\ndef get_movie_rating(dict):\n # print(json.dumps(dict, indent = 2))\n rating = 0\n for source in dict['Ratings']:\n if source['Source'] == 'Rotten Tomatoes':\n rating = int(source['Value'][:-1]) \n return rating\n\nprint('\\nQuestion 5 -----------------------------------------------\\n')\n#print(get_movie_rating(get_movie_data(\"Black Panther\")))\n#print(get_movie_rating(get_movie_data(\"Sherlock Holmes\")))\n#print(get_movie_rating(get_movie_data(\"Finding Nemo\")))\n\n\n#\n# Question 6\n#\n# Define a function get_sorted_recommendations. It takes a list of movie titles \n# as an input. It returns a sorted list of related movie titles as output, up \n# to five related movies for each input movie title. The movies should be sorted \n# in descending order by their Rotten Tomatoes rating, as returned by the \n# get_movie_rating function. Break ties in reverse alphabetic order, so that \n# ‘Yahşi Batı’ comes before ‘Eyyvah Eyvah’.\n\ndef get_sorted_recommendations(titleList):\n ratedDict = {}\n #for title in titleList:\n relatedTitlesList = get_related_titles(titleList)\n for relatedTitle in relatedTitlesList:\n movieData = get_movie_data(relatedTitle)\n if movieData['Response'] == 'True':\n movieRating = get_movie_rating(movieData)\n ratedDict[relatedTitle] = movieRating\n \n kys = ratedDict.keys()\n sortedTitles = sorted(kys, reverse = True, key = lambda x: ratedDict[x])\n \n return sortedTitles\n\nprint('\\nQuestion 6 -----------------------------------------------\\n')\n#print(get_movies_from_tastedive('Black Panther'))\n# print(get_movie_data(\"Death Race\"))\n\n#print(get_sorted_recommendations([\"Black Panther\", \"Bridesmaids\"]))\n#print(get_sorted_recommendations([\"Black Panther\"]))\n#print(get_sorted_recommendations([\"Sherlock Holmes\"]))\nprint(get_sorted_recommendations([\"Bridesmaids\", \"Sherlock Holmes\"]))", "sub_path": "c3/c3project-cache.py", "file_name": "c3project-cache.py", "file_ext": "py", "file_size_in_byte": 4654, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests_cache.install_cache", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "433723381", "text": "#!/usr/bin/python3\n\"\"\"\nDisplays a continuously-updating ticker for statistics related to Ethereum mining.\n\"\"\"\n\nimport requests\nimport json\nimport sys, os\nfrom time import sleep\n\nELECTRIC_COST_PER_KWH=0.029\nWATTS_CONSUMED=140\nMEGAHASH_RATE=46\nFEES=.02\n\nif True:\n\ttry:\n\t\tr = requests.get(\"https://api.minerstat.com/v2/coins\", params={'list':'ETH'})\n\t\tapi_fetch = r.json()[0]\n\n\t\t\"\"\"\n\t\tAvailable keys in 'api_fetch':\n\t\tid\t\t\tstring\t\tUnique identifier of the coin.\n\t\tcoin\t\t\tstring\t\tCoin's ticker.\n\t\tname\t\t\tstring\t\tCoin's name.\n\t\ttype\t\t\tstring\t\tCoin's type. It can be coin or pool, where pool is multi pool, such as NiceHash, Zpool, etc.\n\t\talgorithm\t\tstring\t\tCoin's algorithm.\n\t\tnetwork_hashrate\tinteger\tCoin's network hashrate in H/s. If coin has no data on network hashrate, the network hashrate is -1.\n\t\tdifficulty\t\treal\t\tCoin's difficulty. If coin has no data on difficulty, the difficulty is -1.\n\t\treward\t\t\treal\t\tCoin's reward for 1 H/s for 1 hour of mining based on the most current difficulty. If coin has no data on reward, the reward is -1.\n\t\treward_unit\t\tstring\t\tCoin's reward unit. If a coin is multi pool, the reward unit can be BTC or XMR or whichever reward is provided by the multi pool.\n\t\treward_block\t\treal\t\tCoin's block reward. If coin has no data on the block's reward, the block's reward is -1.\n\t\tprice\t\t\treal\t\tCoin's price in USD. If coin has no data on price, the price is -1.\n\t\tvolume\t\t\treal\t\tCoin's last 24h volume in USD. If coin has no data on volume, the volume is -1.\n\t\tupdated\t\t\tinteger\t\tThe UNIX timestamp of the last time the coin was updated.\n\t\t\"\"\"\n\n\t\tnetwork_hashrate = api_fetch.get(\"network_hashrate\")\n\t\tdifficulty = api_fetch.get(\"difficulty\")\n\t\treward = api_fetch.get(\"reward\")\n\t\treward_block = api_fetch.get(\"reward_block\")\n\t\tprice = api_fetch.get(\"price\")\n\t\tvolume = api_fetch.get(\"volume\")\n\n\texcept Exception as e:\n\t\tprint(f\"Exception occurred while requesting current ETH price: {e}\")\n\n\teth_reward_per_hour = reward * MEGAHASH_RATE * 1000 * 1000\n\tusd_reward_per_hour = eth_reward_per_hour*price\n\tusd_reward_per_month = usd_reward_per_hour * 24 * 30.5\n\telectric_cost_per_month = ELECTRIC_COST_PER_KWH*(WATTS_CONSUMED/1000)*24*30.5\n\tusd_reward_per_month_less_costs = usd_reward_per_month - (usd_reward_per_month*FEES) - electric_cost_per_month\n\n\tprint(f\"${usd_reward_per_month:.2f}/mo\\n---\")\n\tprint(\"Start Mining | bash='sudo /usr/bin/systemctl start cryptomining.service' terminal=false\")\n\tprint(\"Stop Mining | bash='sudo /usr/bin/systemctl stop cryptomining.service' terminal=false\")\n\tprint(\"---\")\n\tprint(f\"ETH Price:\\t\\t${price:.2f}\")\n\tprint(f\"Reward:\\t\\t{reward_block:.2f} ETH\")\n\tprint(f\"Net Profit:\\t\\t${usd_reward_per_month_less_costs:.2f}/mo\")\n\n\t# We divide the reward by the percentage of the block which are transaction fees.\n\t# e.g. A 4ETH block would be a 2ETH block after London, so we would slash our monthly profit by /2\n\t# e.g. A 6ETH block would be a 2ETH block after London, so we would slash our monthly profit by /3\n\n\tprint(f\"After London:\\t${((usd_reward_per_month*(1-FEES))/(reward_block/2)) - electric_cost_per_month:.2f}/mo\") \n\tprint(\"---\")\n\n\twith open('/sys/class/drm/card0/device/hwmon/hwmon1/temp3_input','r') as gpu_temp_file:\n\t\tgpu_temp = int(gpu_temp_file.read())/1000\n\n\twith open('/sys/class/drm/card0/device/hwmon/hwmon1/fan1_input','r') as fan_speed_file:\n\t\tfan_speed = int(fan_speed_file.read())\n\n\tprint(f\"GPU Temp:\\t\\t{gpu_temp}C\")\n\tprint(f\"Fan Speed:\\t\\t{fan_speed}rpm\")\n", "sub_path": "argos/ethprofit.r.2m.py", "file_name": "ethprofit.r.2m.py", "file_ext": "py", "file_size_in_byte": 3438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "614447398", "text": "import tensorflow as tf\nimport tensorflow_probability as tfp\n\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense, Reshape, Lambda, Dropout\n\nimport numpy as np\nimport os\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\n# Dimensions\nD = 32\nbatch_size = 10\n\nN = 9\nT = 5\nY = 4\n\nclass Generator(Model) :\n \"\"\"\n Cf. section 3.1 and 5 (\"Generator architecure\") of the paper\n - input of 32 neurons, 3 layer MLP (128, 256, 512) with tanh as activation functions\n - projection of the output into the shapes of A (N, N, Y) and X (N, T) + gumbel softmax \n \"\"\"\n def __init__(self) :\n super(Generator, self).__init__()\n\n self.dropout_rate = .3\n self.dense1 = Dense(128, activation='tanh')\n self.dropout1 = Dropout(self.dropout_rate)\n self.dense2 = Dense(256, activation='tanh')\n self.dropout2 = Dropout(self.dropout_rate)\n self.dense3 = Dense(512, activation='tanh')\n self.dropout3 = Dropout(self.dropout_rate)\n\n self.denseA = Dense(N*N*Y, activation='relu')\n self.dropoutA = Dropout(self.dropout_rate)\n self.reshapeA = Reshape((N,N,Y))\n\n self.denseX = Dense(N*T)\n self.dropoutX = Dropout(self.dropout_rate)\n self.reshapeX = Reshape((N,T))\n\n self.argmax = Lambda(lambda x: tf.keras.backend.argmax(x))\n \n\n def call(self, z) :\n # MLP\n x = self.dense1(z)\n x = self.dropout1(x)\n x = self.dense2(x)\n x = self.dropout2(x)\n x = self.dense3(x)\n x = self.dropout3(x)\n\n # Projection\n A = self.denseA(x)\n A = self.dropoutA(A)\n A = self.reshapeA(A)\n\n X = self.denseX(x)\n X = self.dropoutX(X)\n X = self.reshapeX(X)\n\n # Gumbel softmax\n gumbel_dist_A = tfp.distributions.RelaxedOneHotCategorical(.15, A)\n generated_A = gumbel_dist_A.sample(1)\n\n gumbel_dist_X = tfp.distributions.RelaxedOneHotCategorical(0.15, X)\n generated_X = gumbel_dist_X.sample(1)\n\n return generated_A, generated_X\n", "sub_path": "generator.py", "file_name": "generator.py", "file_ext": "py", "file_size_in_byte": 2051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 10, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Model", "line_number": 21, "usage_type": "name"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 39, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 40, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 42, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dropout", "line_number": 43, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Reshape", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Lambda", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras.backend.argmax", "line_number": 46, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 46, "usage_type": "attribute"}, {"api_name": "tensorflow_probability.distributions.RelaxedOneHotCategorical", "line_number": 68, "usage_type": "call"}, {"api_name": "tensorflow_probability.distributions", "line_number": 68, "usage_type": "attribute"}, {"api_name": "tensorflow_probability.distributions.RelaxedOneHotCategorical", "line_number": 71, "usage_type": "call"}, {"api_name": "tensorflow_probability.distributions", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "612199746", "text": "# -*- coding:utf-8 -*-\n'''\nCreated on 2014-7-7\n\n@author: baoguodong.kevin\n'''\nimport urllib2\nimport urllib\nimport json\nfrom business.actionlog.dbman import ActionLog\nfrom django.conf import settings\n\nclass RequestClient():\n def __init__(self, base_url=None):\n self.base_url = base_url if base_url else settings.BILLING_BASE_URL\n # self.base_url = 'http://127.0.0.1:8080/v1.0/'\n\n def request(self, url, method='GET', headers={}, data=None, isJson=True):\n if isJson:\n headers['Content-Type'] = 'application/json'\n else:\n data = urllib.urlencode(data)\n req = urllib2.Request(url, headers=headers)\n if method in ['PUT', 'DELETE']:\n req.get_method = lambda: method\n response = urllib2.urlopen(req, data)\n return response\n\n\ndef request(url, method='POST', headers={}, data=None, isJson=True):\n request_Client = RequestClient()\n return json.loads(request_Client.request(request_Client.base_url+url, method, headers, data, isJson=True).read())\n\n\nif __name__ == \"__main__\":\n pass\n # # ActionLog().insert_log({\n # # \"user_id\": 'cvv',\n # # \"user_name\": '323',\n # # \"resource_name\":'323',\n # # \"resource_type\":\"客户\",\n # # \"action_id\":\"customer_create\",\n # # \"action_name\":\"创建客户\",\n # # \"detail\": \"cvv\",\n # # \"status\": 'mamamiya'\n # # })\n # # pass\n # data={'user_id': 4\n # }\n # data = json.dumps(data)\n # response = request('customer/getuserrole', method='POST', data=data)\n # print response['role']\n\n", "sub_path": "business/RequestClient.py", "file_name": "RequestClient.py", "file_ext": "py", "file_size_in_byte": 1658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.settings.BILLING_BASE_URL", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 15, "usage_type": "name"}, {"api_name": "urllib.urlencode", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 26, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "161147566", "text": "from timeit import default_timer as timer\n\nimport numpy as np\nimport pygame\n\nimport helpers\nimport gameloop\nimport imagehandler\nimport inputhandler\nimport menu\nimport optionhandler\nimport soundhandler\n\n\nclass Main:\n def __init__(self):\n # init mixer first to prevent audio delay\n pygame.mixer.pre_init(44100, -16, 2, 512)\n pygame.mixer.init()\n\n pygame.mixer.set_num_channels(16)\n\n pygame.init()\n pygame.display.set_caption('FIGHTBATTLE')\n\n self.option_handler = optionhandler.OptionHandler()\n\n if self.option_handler.fullscreen:\n self.screen = pygame.display.set_mode(self.option_handler.resolution,\n flags=(pygame.FULLSCREEN | pygame.HWSURFACE))\n else:\n self.screen = pygame.display.set_mode(self.option_handler.resolution)\n\n self.image_handler = imagehandler.ImageHandler()\n self.sound_handler = soundhandler.SoundHandler()\n self.input_handler = inputhandler.InputHandler()\n\n self.loop = gameloop.GameLoop(self.option_handler)\n\n self.clock = pygame.time.Clock()\n\n self.time_step = 15.0 / self.option_handler.fps\n\n self.font = pygame.font.Font(None, 30)\n\n self.sound_handler.set_volume(self.option_handler.sfx_volume)\n self.sound_handler.set_music_volume(self.option_handler.music_volume)\n self.sound_handler.set_music('line')\n\n def main_loop(self):\n while self.loop.state != menu.State.QUIT:\n fps = self.clock.get_fps()\n\n if self.option_handler.fps == 999:\n if fps != 0:\n self.time_step = min(15.0 / fps, 15.0 / 60.0)\n\n self.loop.input(self.input_handler)\n self.loop.update(self.time_step)\n self.loop.draw(self.screen, self.image_handler)\n self.loop.play_sounds(self.sound_handler)\n\n fps_str = self.font.render(str(int(fps)), True, self.image_handler.debug_color)\n self.screen.blit(fps_str, (50, 50))\n\n pygame.display.update()\n self.clock.tick(self.option_handler.fps)\n\n\ndef main():\n main_window = Main()\n main_window.main_loop()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.mixer.pre_init", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.mixer.init", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.mixer.set_num_channels", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.display.set_caption", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "optionhandler.OptionHandler", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 29, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pygame.FULLSCREEN", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.HWSURFACE", "line_number": 30, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 32, "usage_type": "attribute"}, {"api_name": "imagehandler.ImageHandler", "line_number": 34, "usage_type": "call"}, {"api_name": "soundhandler.SoundHandler", "line_number": 35, "usage_type": "call"}, {"api_name": "inputhandler.InputHandler", "line_number": 36, "usage_type": "call"}, {"api_name": "gameloop.GameLoop", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pygame.font.Font", "line_number": 44, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 44, "usage_type": "attribute"}, {"api_name": "menu.State", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "445001187", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 17 23:16:23 2017\n\n@author: svd\n\"\"\"\nimport sys\nsys.path.append('/auto/users/hellerc/nems')\nsys.path.append('/auto/users/hellerc/nems/nems/utilities')\nfrom baphy import load_baphy_file\nimport imp\nimport scipy.io\nimport pkgutil as pk\nimport os\n\n\nimport nems.modules as nm\nimport nems.main as main\nimport nems.fitters as nf\nimport nems.keyword as nk\n\nimport nems.utilities as ut\nimport nems.stack as ns\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal\n\nimp.reload(nm)\nimp.reload(main)\nimp.reload(nf)\nimp.reload(nk)\nimp.reload(ut)\nimp.reload(ns)\n\nsys.path.append('/auto/users/hellerc/nems/charlie_population_coding')\nfrom baphy_charlie import load_baphy_file2\n\nuser = 'david'\npasswd = 'nine1997'\nhost = 'neuralprediction.org'\ndatabase = 'cell'\nfrom sqlalchemy import create_engine\ndb_uri = 'mysql+pymysql://{0}:{1}@{2}/{3}'.format(user, passwd, host, database)\nengine = create_engine(db_uri)\nf = '/auto/data/code/nems_in_cache/batch299/BOL005c-04-1_b299_none_fs100.mat'\n#f = '/auto/data/code/nems_in_cache/batch299/BOL006b-02-1_b299_none_fs100.mat'\ndata = load_baphy_file2(f)\ncid = data['cellids'][0]\nrespfile = os.path.basename(data['resp_fn'][0])\nrid = engine.execute('SELECT rawid FROM sCellFile WHERE respfile = \"'+respfile+'\" and cellid = %s', (cid,))\nfor obj in rid:\n rawid = str(obj[0])\nisolation = 84\nchan_unit_cellid = engine.execute('SELECT channum, unit, cellid FROM gSingleRaw WHERE isolation > %s AND rawid = %s', (isolation,rawid)).fetchall()\nchan_unit_cellid = sorted(chan_unit_cellid, key=lambda x: x[0])\n\nkeep_ind = []\nfor i in range(0, len(chan_unit_cellid)):\n keep_ind.append(np.argwhere(data['cellids'] == np.array(chan_unit_cellid)[:,2][i]))\n\nkeep_ind = [int(s) for s in keep_ind]\n\nr = data['resp'][:,:,:,keep_ind]\ncellids = data['cellids'][keep_ind]\n\n# above code is only used to get list of cell ids. Just using mat files for \n# familiarity and convencience\n\n\nrvals = []\nfor i, cellid in enumerate(cellids):\n #cellid='BOL005c-18-1'\n cellid = cellid[0]\n batch=293\n modelname= \"parm50_wcg02_fir10_dexp_fit01_nested5\" #\"parm50_wcg02_fir10_pupgainctl_fit01_nested5\"\n \n stack=ut.io.load_single_model(cellid, batch, modelname)\n \n #sys.exit('laoded stack for first cell')\n \n '''\n weight_module_idx=ut.utils.find_modules(stack,'filters.weight_channels')\n fir_module_idx=ut.utils.find_modules(stack,'filters.fir')\n val_file_idx=1 #entry in data stack that contains validation set\n \n m_input=stack.modules[1] # stack that generates the crossval sets\n stim_in=m_input.d_out[val_file_idx]['stim'].copy()\n \n wgt_coefs=stack.modules[weight_module_idx[0]].coefs\n \n m=stack.modules[fir_module_idx[0]]\n '''\n \n #p=m.d_out[val_file_idx]['pred'].copy()\n #r=m.d_out[val_file_idx]['resp'].copy()\n \n p = stack.data[-1][0]['pred'].copy()\n r = stack.data[-1][0]['resp'].copy()\n pup = stack.data[-1][0]['pupil'].copy()\n pup = pup.T\n \n if i == 0:\n pred = np.empty((r.shape[-1], r.shape[-2], len(cellids)))\n resp = np.empty((r.shape[-1], r.shape[-2], len(cellids)))\n \n #pup=m.d_out[val_file_idx]['pupil'].copy().T\n #fir_coefs=m.coefs\n #strf_baseline=m.baseline # h0 in Taylor series expansion\n \n #strf_coefs=np.matmul(wgt_coefs.T,fir_coefs) # h1 in Taylor series\n \n pred[:,:,i] = np.squeeze(p).T\n resp[:,:,i] = np.squeeze(r).T\n \n r_val=stack.meta['r_val'] # test set prediction accuracy\n rvals.append(r_val)\n \nsys.path.append('/auto/users/hellerc/nems/charlie_population_coding') \nfrom NRF_tools import NRF_fit, eval_fit \nrN = NRF_fit(resp[:,:,np.newaxis,:], r0_strf = pred, model='NRF_only', spontonly=0,shuffle=True)\nfullModel = np.squeeze(rN) \n\ncc_rN = np.empty((resp.shape[1], resp.shape[-1]))\ncc_r0 = np.empty((resp.shape[1], resp.shape[-1]))\nfor i in range(0, resp.shape[1]):\n for cell in range(0, resp.shape[-1]):\n cc_rN[i, cell]=np.corrcoef(fullModel[:,i,cell], resp[:,i,cell])[0][1]\n cc_r0[i,cell]=np.corrcoef(pred[:,i,cell], resp[:,i,cell])[0][1]\n\nplt.subplot(211)\nplt.plot(np.nanmean(cc_rN,1), '-o', color='r')\nplt.plot(np.nanmean(cc_r0,1), '-o', color='b')\nplt.ylabel('pearsons corr coef')\nplt.xlabel('pip trial')\nplt.legend(['rN', 'r0 (strf)'])\npup_m = np.squeeze(np.mean(pup,0)/2)\nplt.title('rN vs. pupil: %s, r0 vs. pupil: %s' \n %(np.corrcoef(np.nanmean(cc_rN,1), pup_m)[0][1], np.corrcoef(np.nanmean(cc_r0,1), pup_m)[0][1]))\n\ndiff = np.nanmean(cc_rN,1)-np.nanmean(cc_r0,1)\nplt.subplot(212)\nplt.plot(pup_m,'-o', color='k', alpha=0.5, lw=2)\nplt.plot(diff, '-o', color='g')\nplt.legend(['pup', 'rN-r0'])\nplt.xlabel('pip trials')\nplt.title('corr coef btwn rN-r0 and pupil: %s' \n %(np.corrcoef(diff, pup_m)[0][1]))\n\n\n\nprint('Comparing different models...')\ndef onpick3(event):\n ind = event.ind\n print('onpick3 scatter:', cellids[ind][0])\n \nrN_perf = eval_fit(resp, fullModel)\nr0_perf = eval_fit(resp, pred)\n\nx = np.linspace(-1,1,3)\nncols = 8\nnrows = 10\ncellcount = resp.shape[-1]\nrepcount = resp.shape[1]\nbincount = resp.shape[0]\nspontonly=0\nfs=100\nstim=0\ncolor = np.arange(cellcount)\nfig = plt.figure()\nfor rep in range(0, repcount):\n ax = fig.add_subplot(nrows,ncols,rep+1)\n ax.scatter(r0_perf['bytrial'][rep,stim,:], rN_perf['bytrial'][rep,stim,:], c=color,s=10,picker=True)\n ax.plot(x, x, '-k',lw=2)\n ax.axis([-.1,1,-.1,1])\n ax.set_title(rep+1, fontsize=7)\n fig.canvas.mpl_connect('pick_event',onpick3)\n if rep != (ncols*nrows - ncols):\n ax.set_xticks([])\n ax.set_yticks([])\n else:\n ax.set_ylabel('rN')\n ax.set_xlabel('r0')\nfig.suptitle('r0 vs. rN for each cell \\n stim: %s, rawid: %s, spontonly: %s, fs: %s' %(stim+1, rawid, spontonly, fs))\n\n\n", "sub_path": "misc/Test_Files/pps_strf_pred_demo.py", "file_name": "pps_strf_pred_demo.py", "file_ext": "py", "file_size_in_byte": 5812, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.path.append", "line_number": 9, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "imp.reload", "line_number": 30, "usage_type": "call"}, {"api_name": "nems.modules", "line_number": 30, "usage_type": "argument"}, {"api_name": "imp.reload", "line_number": 31, "usage_type": "call"}, {"api_name": "nems.main", "line_number": 31, "usage_type": "argument"}, {"api_name": "imp.reload", "line_number": 32, "usage_type": "call"}, {"api_name": "nems.fitters", "line_number": 32, "usage_type": "argument"}, {"api_name": "imp.reload", "line_number": 33, "usage_type": "call"}, {"api_name": "nems.keyword", "line_number": 33, "usage_type": "argument"}, {"api_name": "imp.reload", "line_number": 34, "usage_type": "call"}, {"api_name": "nems.utilities", "line_number": 34, "usage_type": "argument"}, {"api_name": "imp.reload", "line_number": 35, "usage_type": "call"}, {"api_name": "nems.stack", "line_number": 35, "usage_type": "argument"}, {"api_name": "sys.path.append", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sqlalchemy.create_engine", "line_number": 46, "usage_type": "call"}, {"api_name": "baphy_charlie.load_baphy_file2", "line_number": 49, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "numpy.argwhere", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "nems.utilities.io.load_single_model", "line_number": 79, "usage_type": "call"}, {"api_name": "nems.utilities.io", "line_number": 79, "usage_type": "attribute"}, {"api_name": "nems.utilities", "line_number": 79, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 115, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 120, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "NRF_tools.NRF_fit", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.squeeze", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.nanmean", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.nanmean", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 136, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "numpy.squeeze", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.nanmean", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "numpy.corrcoef", "line_number": 149, "usage_type": "call"}, {"api_name": "NRF_tools.eval_fit", "line_number": 158, "usage_type": "call"}, {"api_name": "NRF_tools.eval_fit", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}]} +{"seq_id": "103643937", "text": "import logging\nfrom PySide2 import QtCore\nfrom .sensors_connector_bsp import SensorsSerialConnector\nimport configurations.static_app_configurations as app_config\n\nmodule_logger = logging.getLogger(app_config.LOGGER_NAME)\n\nclass SensorConnector(QtCore.QObject):\n weightChangedSignal = QtCore.Signal(float)\n newWidthSensorsSignal = QtCore.Signal(list)\n autoLeftRightWidthChanged = QtCore.Signal(float, float)\n physicalStartSignal = QtCore.Signal(str) # this simulate the left and right btn\n physicalErrorSignal = QtCore.Signal(str)\n def __init__(self):\n super(SensorConnector, self).__init__()\n self.sensor_vals = {\n 0: {\"state\": False, \"msg\": \"start2\"},\n 1: {\"state\": False, \"msg\": \"y-axis error\"},\n 2: {\"state\": False, \"msg\": \"z-axis error\"},\n 3: {\"state\": False, \"msg\": \"emergency status\"},\n 4: {\"state\": False, \"msg\": \"emergency stop\"},\n 5: {\"state\": False, \"msg\": \"start\"},\n }\n self.auto_right_width = 0\n self.auto_left_width = 0\n self.__width_sensor_readings = list()\n self.__current_measured_weight = 0.0\n self.__serial_interface_thread = SensorsSerialConnector()\n self.__serial_interface_thread.weightChanged.connect(self.handle_weight_changed)\n self.__serial_interface_thread.newReading.connect(self.handle_new_sensor_readings_received)\n\n def start(self):\n self.__serial_interface_thread.start()\n\n def handle_weight_changed(self, new_weight):\n pass\n\n def get_weight(self):\n return self.__current_measured_weight\n\n def handle_new_sensor_readings_received(self, readings):\n for i in range(6):\n if readings[i] != self.sensor_vals[i]['state']:\n self.sensor_vals[i]['state'] = readings[i]\n if readings[i]:\n if i == 5:\n self.physicalStartSignal.emit(\"left\")\n module_logger.debug(\"left physical button clicked\")\n elif i == 0:\n self.physicalStartSignal.emit(\"right\")\n module_logger.debug(\"right physical button clicked\")\n else:\n self.physicalErrorSignal.emit(self.sensor_vals[i]['msg'])\n sensor_readings = readings[6:16]\n if not self._lists_equal(sensor_readings, self.__width_sensor_readings):\n self.__width_sensor_readings = sensor_readings\n # calculate the width\n self.get_width_from_sensors(sensor_readings)\n self.autoLeftRightWidthChanged.emit(self.auto_left_width, self.auto_right_width)\n self.newWidthSensorsSignal.emit(sensor_readings)\n\n def _lists_equal(self, l1, l2):\n if len(l1) != len(l2):\n return False\n for i in range(len(l1)):\n if l1[i] != l2[i]:\n return False\n return True\n\n def get_width_from_sensors(self, array_of_sensor):\n array_of_sensor = array_of_sensor.copy()\n sensors_counts = len(array_of_sensor)\n if array_of_sensor[0] and array_of_sensor[sensors_counts - 1]:\n return \"error\"\n elif array_of_sensor[0] == 0 and array_of_sensor[sensors_counts - 1] == 0:\n self.auto_left_width = 0\n self.auto_right_width = 0\n return \"no reading\"\n else:\n active_dir = \"left\"\n if array_of_sensor[sensors_counts - 1]:\n array_of_sensor.reverse()\n active_dir = \"right\"\n last_one = 0\n for i in range(sensors_counts):\n if array_of_sensor[i] == 1:\n last_one = i\n if active_dir == \"right\":\n self.auto_right_width = app_config.SENSOR_MAP[last_one]\n else:\n self.auto_left_width = app_config.SENSOR_MAP[last_one]\n return app_config.SENSOR_MAP[last_one], active_dir\n\n def close_service(self):\n self.__serial_interface_thread.requestInterruption()\n\n def control_servo_state(self, is_on):\n if is_on is True:\n self.__serial_interface_thread.turn_on_servo()\n else:\n self.__serial_interface_thread.turn_off_servo()", "sub_path": "models/sensors_connector_hal.py", "file_name": "sensors_connector_hal.py", "file_ext": "py", "file_size_in_byte": 4242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "configurations.static_app_configurations.LOGGER_NAME", "line_number": 6, "usage_type": "attribute"}, {"api_name": "configurations.static_app_configurations", "line_number": 6, "usage_type": "name"}, {"api_name": "PySide2.QtCore.QObject", "line_number": 8, "usage_type": "attribute"}, {"api_name": "PySide2.QtCore", "line_number": 8, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 9, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 9, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 10, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 10, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 11, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 11, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 12, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 12, "usage_type": "name"}, {"api_name": "PySide2.QtCore.Signal", "line_number": 13, "usage_type": "call"}, {"api_name": "PySide2.QtCore", "line_number": 13, "usage_type": "name"}, {"api_name": "sensors_connector_bsp.SensorsSerialConnector", "line_number": 28, "usage_type": "call"}, {"api_name": "configurations.static_app_configurations.SENSOR_MAP", "line_number": 89, "usage_type": "attribute"}, {"api_name": "configurations.static_app_configurations", "line_number": 89, "usage_type": "name"}, {"api_name": "configurations.static_app_configurations.SENSOR_MAP", "line_number": 91, "usage_type": "attribute"}, {"api_name": "configurations.static_app_configurations", "line_number": 91, "usage_type": "name"}, {"api_name": "configurations.static_app_configurations.SENSOR_MAP", "line_number": 92, "usage_type": "attribute"}, {"api_name": "configurations.static_app_configurations", "line_number": 92, "usage_type": "name"}]} +{"seq_id": "489745006", "text": "# OpenCV Camera Calibration(카메라 왜곡 펴기)\nimport cv2\nimport glob\n\nimages = glob.glob('img/*.jpg')\ntotal_images = len(images)\n\nidx = 0\n\nwhile True:\n fname = images[idx]\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n out_str = f'{idx}/{total_images}'\n cv2.putText(img, out_str, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)\n\n cv2.imshow('img', img)\n\n key = cv2.waitKey(0)\n\n if key == 27: # ESC\n break\n\n elif key == 0x61:\n idx -= 1\n\n elif key == 0x64:\n idx += 1\n\n if idx < 0:\n idx = 0\n\ncv2.destroyAllWindows()", "sub_path": "OpenCV 09일차/실습/opencv_60.py", "file_name": "opencv_60.py", "file_ext": "py", "file_size_in_byte": 613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "glob.glob", "line_number": 5, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 13, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 16, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "285116995", "text": "import django\nimport time\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.test import TestCase, Client\nfrom django.utils import timezone\nfrom fluent_comments import get_model as get_comment_model\nfrom fluent_comments.compat import CommentForm\nfrom article.models import Article\n\n\nclass CommentsTests(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(CommentsTests, cls).setUpClass()\n Comment = get_comment_model()\n\n now = timezone.now()\n cls.site = Site.objects.get(pk=1)\n cls.admin = User.objects.create_superuser('superuser', 'myemail@test.com', 'secret')\n cls.article = Article.objects.create(\n title=\"Testing article\",\n slug=\"testing-article\",\n content=\"This is testing article\",\n publication_date=now,\n enable_comments=True,\n )\n cls.article_ctype = ContentType.objects.get_for_model(cls.article)\n cls.comment = Comment.objects.create(\n content_type=cls.article_ctype,\n object_pk=cls.article.pk,\n user=cls.admin,\n user_name=\"Test-Name\",\n user_email=\"test@example.com\",\n user_url=\"http://example.com\",\n comment=\"Test-Comment\",\n submit_date=now,\n site=cls.site,\n is_public=True,\n is_removed=False,\n )\n\n def test_admin_comments_access(self):\n self.client.login(username=self.admin.username, password='secret')\n response = self.client.get(reverse('admin:fluent_comments_fluentcomment_changelist'))\n self.assertContains(response, \">Test-Name<\", status_code=200)\n\n def test_get_article_with_comment(self):\n response = self.client.get(reverse('article-details', kwargs={\"slug\": \"testing-article\"}))\n self.assertContains(response, \"Comment\", status_code=200)\n\n def test_get_article_with_comment(self):\n response = self.client.get(reverse('article-details', kwargs={\"slug\": \"testing-article\"}))\n self.assertContains(response, \"Comment\", status_code=200)\n\n def test_comment_post(self):\n content_type = \"article.article\"\n object_pk = \"1\"\n timestamp = str(int(time.time()))\n form = CommentForm(Article())\n security_hash = form.generate_security_hash(content_type, object_pk, timestamp)\n post_data = {\n \"content_type\": content_type,\n \"object_pk\": object_pk,\n \"name\": \"Testing name\",\n \"email\": \"test@email.com\",\n \"comment\": \"Testing comment\",\n \"timestamp\": timestamp,\n \"security_hash\": security_hash,\n }\n response = self.client.post(reverse(\"comments-post-comment-ajax\"), post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertContains(response, \"Testing comment\", status_code=200)\n self.assertEqual(response.status_code, 200, response.content.decode(\"utf-8\"))\n", "sub_path": "example/article/tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 3081, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.test.TestCase", "line_number": 14, "usage_type": "name"}, {"api_name": "fluent_comments.get_model", "line_number": 19, "usage_type": "call"}, {"api_name": "django.utils.timezone.now", "line_number": 21, "usage_type": "call"}, {"api_name": "django.utils.timezone", "line_number": 21, "usage_type": "name"}, {"api_name": "django.contrib.sites.models.Site.objects.get", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.sites.models.Site.objects", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.sites.models.Site", "line_number": 22, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.create_superuser", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "article.models.Article.objects.create", "line_number": 24, "usage_type": "call"}, {"api_name": "article.models.Article.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "article.models.Article", "line_number": 24, "usage_type": "name"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model", "line_number": 31, "usage_type": "call"}, {"api_name": "django.contrib.contenttypes.models.ContentType.objects", "line_number": 31, "usage_type": "attribute"}, {"api_name": "django.contrib.contenttypes.models.ContentType", "line_number": 31, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 48, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 52, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 56, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "fluent_comments.compat.CommentForm", "line_number": 63, "usage_type": "call"}, {"api_name": "article.models.Article", "line_number": 63, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "556420934", "text": "from vnpy.trader.constant import (Exchange, Interval)\nimport pandas as pd\nfrom vnpy.trader.database import database_manager\nfrom vnpy.trader.object import (BarData,TickData)\nfrom datetime import datetime, timedelta, timezone\nimport sys\n\n# 封装函数\ndef move_df_to_mongodb(imported_data:pd.DataFrame,collection_name:str):\n ticks = []\n start = None\n count = 0\n utc_8 = timezone(timedelta(hours=8))\n for row in imported_data.itertuples():\n\n tick = TickData(\n symbol = row.symbol,\n exchange = row.exchange,\n datetime = row.datetime.replace(tzinfo=utc_8),\n #datetime = row.datetime,\n name = \"TickDataName\",\n volume = row.volume,\n open_interest = row.open_interest,\n turnover = row.turnover,\n last_price = row.last_price,\n last_volume = row.last_volume,\n last_amount = row.last_amount,\n limit_up = row.limit_up,\n limit_down = row.limit_down,\n open_price = row.open_price,\n high_price = row.high_price,\n low_price = row.low_price,\n pre_close = row.pre_close,\n bid_price_1 = row.bid_price_1,\n bid_price_2 = row.bid_price_2,\n bid_price_3 = row.bid_price_3,\n bid_price_4 = row.bid_price_4,\n bid_price_5 = row.bid_price_5,\n ask_price_1 = row.ask_price_1,\n ask_price_2 = row.ask_price_2,\n ask_price_3 = row.ask_price_3,\n ask_price_4 = row.ask_price_4,\n ask_price_5 = row.ask_price_5,\n bid_volume_1 = row.bid_volume_1,\n bid_volume_2 = row.bid_volume_2,\n bid_volume_3 = row.bid_volume_3,\n bid_volume_4 = row.bid_volume_4,\n bid_volume_5 = row.bid_volume_5,\n ask_volume_1 = row.ask_volume_1,\n ask_volume_2 = row.ask_volume_2,\n ask_volume_3 = row.ask_volume_3,\n ask_volume_4 = row.ask_volume_4,\n ask_volume_5 = row.ask_volume_5,\n gateway_name=\"DB\",\n )\n ticks.append(tick)\n\n # do some statistics\n count += 1\n if not start:\n start = tick.datetime\n end = tick.datetime\n\n # insert into database\n database_manager.save_tick_data(ticks, collection_name)\n print(f'Insert Tick: {count} from {start} - {end}')\n\nif __name__ == \"__main__\":\n #imported_data = pd.read_csv('D:\\Study\\数据\\PoboForVnpy\\cu7777\\cu7777_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:\\Study\\数据\\PoboForVnpy\\cu6666\\cu6666_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:/Study/数据/PoboForVnpy/al6666/al6666_20200907-20200911.csv',encoding='utf-8')\n #imported_data = pd.read_csv('D:/Study/数据/PoboForVnpy/al7777/al7777_20200907-20200911.csv',encoding='utf-8')\n \n sys_collection_name = sys.argv[1]\n sys_data_path = sys.argv[2]\n \n imported_data = pd.read_csv(sys_data_path,encoding='utf-8')\n \n \n # 将csv文件中 `市场代码`的 SC 替换成 Exchange.SHFE SHFE\n imported_data['exchange'] = Exchange.SHFE\n # 明确需要是float数据类型的列\n float_columns = ['volume','open_interest','last_price','last_volume','limit_up','limit_down','open_price','high_price','low_price','pre_close','bid_price_1','bid_price_2','bid_price_3','bid_price_4','bid_price_5','ask_price_1','ask_price_2','ask_price_3','ask_price_4','ask_price_5','bid_volume_1','bid_volume_2','bid_volume_3','bid_volume_4','bid_volume_5','ask_volume_1','ask_volume_2','ask_volume_3','ask_volume_4','ask_volume_5']\n for col in float_columns:\n imported_data[col] = imported_data[col].astype('float')\n # 明确时间戳的格式\n # %Y-%m-%d %H:%M:%S.%f 代表着你的csv数据中的时间戳必须是 2020-05-01 08:32:30.500000 格式\n datetime_format = '%Y-%m-%d %H:%M:%S.%f'\n imported_data['datetime'] = pd.to_datetime(imported_data['datetime'],format=datetime_format)\n\n\n #!!!!!!!!!!! 记得改名\n #move_df_to_mongodb(imported_data,'cu7777')\n #move_df_to_mongodb(imported_data,'cu6666')\n #move_df_to_mongodb(imported_data,'al6666')\n #move_df_to_mongodb(imported_data,'al7777')\n \n move_df_to_mongodb(imported_data, sys_collection_name)\n", "sub_path": "examples/import_csv.py", "file_name": "import_csv.py", "file_ext": "py", "file_size_in_byte": 4378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.DataFrame", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 13, "usage_type": "call"}, {"api_name": "vnpy.trader.object.TickData", "line_number": 16, "usage_type": "call"}, {"api_name": "vnpy.trader.database.database_manager.save_tick_data", "line_number": 65, "usage_type": "call"}, {"api_name": "vnpy.trader.database.database_manager", "line_number": 65, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 74, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 77, "usage_type": "call"}, {"api_name": "vnpy.trader.constant.Exchange.SHFE", "line_number": 81, "usage_type": "attribute"}, {"api_name": "vnpy.trader.constant.Exchange", "line_number": 81, "usage_type": "name"}, {"api_name": "pandas.to_datetime", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "154848042", "text": "import ray\n\nfrom ray_examples.streaming.shared.kafka_actors import KafkaProducer\nfrom ray_examples.streaming.shared.kafka_actors import BaseKafkaConsumer\nfrom ray_examples.streaming.shared.controller import BaseTemperatureController\n\n@ray.remote\nclass TemperatureController(BaseTemperatureController):\n def __init__(self, producer: KafkaProducer, id: str):\n super().__init__(id)\n self.producer = producer\n\n # Process new measurements\n def process_sensor_data(self, sensor: dict):\n if super().process_sensor_data(sensor):\n # publish new action to kafka\n self.producer.produce.remote({'control': self.previousCommand})\n\n@ray.remote\nclass TemperatureControllerManager:\n def __init__(self, producer: KafkaProducer):\n self.controllers = {}\n self.producer = producer\n\n def process_controller_message(self, key: str, value: dict):\n controller_id = value['id']\n if not controller_id in self.controllers: # create a new controller\n print(f'Creating a new controller {controller_id}')\n controller = TemperatureController.remote(producer=self.producer, id=controller_id)\n self.controllers[controller_id] = controller\n self.controllers[controller_id].process_new_message.remote(value)\n\n@ray.remote\nclass KafkaConsumer(BaseKafkaConsumer):\n def __init__(self, callback, group: str = 'ray', server: str = 'localhost:9092',\n topic: str = 'sensor', restart: str = 'earliest'):\n super().__init__(group=group, server = server, topic = topic, restart = restart)\n self.callback = callback\n\n# Start Ray\nray.init()\n\n# Start actors\nproducer = KafkaProducer.remote()\ncontroller = TemperatureControllerManager.remote(producer)\nn_consumers = 5 # Number of consumers\nconsumers = [KafkaConsumer.remote(controller.process_controller_message.remote) for _ in range(n_consumers)]\nrefs = [c.start.remote() for c in consumers]\n\ntry:\n ray.get(refs)\n\n# end gracefully\nexcept KeyboardInterrupt:\n for c in consumers:\n c.stop.remote()\nfinally:\n for c in consumers:\n c.destroy.remote()\n producer.destroy.remote()\n ray.kill(producer)\n ray.kill(controller)\n", "sub_path": "ray_examples/streaming/stateful_streaming/round_robin/ray_kafka_stateful.py", "file_name": "ray_kafka_stateful.py", "file_ext": "py", "file_size_in_byte": 2212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "ray_examples.streaming.shared.controller.BaseTemperatureController", "line_number": 8, "usage_type": "name"}, {"api_name": "ray_examples.streaming.shared.kafka_actors.KafkaProducer", "line_number": 9, "usage_type": "name"}, {"api_name": "ray.remote", "line_number": 7, "usage_type": "attribute"}, {"api_name": "ray_examples.streaming.shared.kafka_actors.KafkaProducer", "line_number": 21, "usage_type": "name"}, {"api_name": "ray.remote", "line_number": 19, "usage_type": "attribute"}, {"api_name": "ray_examples.streaming.shared.kafka_actors.BaseKafkaConsumer", "line_number": 34, "usage_type": "name"}, {"api_name": "ray.remote", "line_number": 33, "usage_type": "attribute"}, {"api_name": "ray.init", "line_number": 41, "usage_type": "call"}, {"api_name": "ray_examples.streaming.shared.kafka_actors.KafkaProducer.remote", "line_number": 44, "usage_type": "call"}, {"api_name": "ray_examples.streaming.shared.kafka_actors.KafkaProducer", "line_number": 44, "usage_type": "name"}, {"api_name": "ray.get", "line_number": 51, "usage_type": "call"}, {"api_name": "ray.kill", "line_number": 61, "usage_type": "call"}, {"api_name": "ray.kill", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "373081455", "text": "# -- Libraries --\n# Google API Libraries\nfrom apiclient.discovery import build\nfrom apiclient.errors import HttpError\nfrom oauth2client.tools import argparser\n\n# General-Purpose Libraries\nimport datetime as dt\nimport pickle\nfrom math import ceil, floor\n\n# -- User-Defined Functions (UDFs) --\n\n\n# Specifying the directory to write out pickles\ndirectory = '/Users/chuamelia/Google Drive/Fall 2019/\\\nIntroduction to Data Science/Term Project/\\\nGroup Term Project/YouTube Video View Analysis/'\n\n\ndef save_obj(obj, fname):\n # This writes out a python object as a pickle.\n with open(directory + fname + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef load_obj(fname):\n # This loads the pickled object.\n with open(directory + fname + '.pkl', 'rb') as f:\n return pickle.load(f)\n\n\ndef connectToYouTubeAPI(DEVELOPER_KEY):\n # Specifying API Service\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\n YOUTUBE_API_VERSION = \"v3\"\n\n # Creating connection to the Youtube Data API Service\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\n developerKey=DEVELOPER_KEY)\n return youtube\n\n\ndef getPlaylistItemsSnippet(uploads_playlist_id, page_token):\n \"\"\"\n This UDF retreives all items from the playlistItems.snippet\n section of the YouTube Data API.\n \"\"\"\n playlistItems = youtube.playlistItems().list(\n playlistId=uploads_playlist_id,\n pageToken=page_token,\n part=\"snippet\",\n maxResults=50).execute()\n return playlistItems\n\n\ndef getVideoIdsAndTokens(playlist_result):\n \"\"\"\n This UDF takes the result generated from getPlaylistItemsSnippet.\n It returns the videoIds and the nextPageToken needed\n to get the next batch of videoIds.\n \"\"\"\n # Get List of Videos in Playlist\n playlist_items = playlist_result['items']\n num_items = len(playlist_items)\n videosIds = [playlist_items[i]['snippet']['resourceId']['videoId']\n for i in range(num_items)]\n\n # Get the nextPageToken to find the next 50 Video Ids.\n # If it's the last page, there is no nextPageToken\n nextPageToken = None\n if 'nextPageToken' in playlist_result.keys():\n # Attempt to retreive the token, only if it exists\n nextPageToken = playlist_result['nextPageToken']\n return videosIds, nextPageToken\n\n\n# -- RETREIVING CREDENTIALS --\nKEY_CHAIN = load_obj('key_chain')\n\n# -- CONNECTION TO YOUTUBE DATA API --\nyoutube = connectToYouTubeAPI(KEY_CHAIN[5][1])\n\n# -- FIND UPLOADS PLAYLIST ID --\n# This retreives the playlistId for the playlist of all uploads.\n# Method learned from:\n# https://stackoverflow.com/questions/50199490/youtube-api-v3-get-every-video-id-from-given-channel\n\n# ChannelId for the BuzzFeedVideo YouTube Channel\nCHANNEL_ID = 'UCpko_-a4wgz2u_DgDgd9fqA'\n\n# Querying the channels.contentDetails resource\nresults = youtube.channels().list(\n id=CHANNEL_ID,\n pageToken=None,\n part=\"contentDetails\",\n maxResults=50).execute()\n\n# Pulling out the playlistId of the Uploads Playlist\nchannel_content_details = results['items'][0]['contentDetails']\nuploads_playlist_id = channel_content_details['relatedPlaylists']['uploads']\n# uploads_playlist_id = 'UUpko_-a4wgz2u_DgDgd9fqA'\n\n# -- RETREIVING ALL VIDEO IDs IN UPLOADS PLAYLIST --\n# This retreives all videoIds for the channel.\n\n# Creating connection to the Youtube Data API Service\nyoutube = connectToYouTubeAPI(KEY_CHAIN[5][1])\n\n# Create lists to store nextPageTokens and videoIds\npage_tokens = []\nvideo_ids = []\n\n# Retreiving snippet part of the playlistItems resource\nplaylist_result = getPlaylistItemsSnippet(uploads_playlist_id, None)\n\n# Getting the number of times we need to iterate through\n\n# PlaylistItems.pageInfo.totalResults provides\n# the number of videos in a playlist\ntotal_videos = playlist_result['pageInfo']['totalResults']\n\n# Dividing by 50 because we can retreive a maximum of 50 videos at a time\n# Using floor because the first batch won't have a nextPageToken as input\nnum_batches = floor(total_videos / 50)\nprint(total_videos)\n\n# Retreiving the first batch\nvideosIds, nextPageToken = getVideoIdsAndTokens(playlist_result)\nvideo_ids += videosIds\npage_tokens.append(nextPageToken)\n\n# Retreiving subsequent batches\nfor batch_num in range(num_batches):\n playlist_result = getPlaylistItemsSnippet(uploads_playlist_id, nextPageToken)\n\n videosIds, nextPageToken = getVideoIdsAndTokens(playlist_result)\n\n video_ids += videosIds\n page_tokens.append(nextPageToken)\n\n# Saving list of videoIds in case we need to refer back to it.\nts = dt.datetime.now().strftime(\"%Y%m%d-%M%S\")\nname = 'data/video_ids_list_' + ts\nsave_obj(video_ids, name)\n", "sub_path": "prod/py/getPlaylistIdAndVideoIds.py", "file_name": "getPlaylistIdAndVideoIds.py", "file_ext": "py", "file_size_in_byte": 4670, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pickle.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "pickle.HIGHEST_PROTOCOL", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 30, "usage_type": "call"}, {"api_name": "apiclient.discovery.build", "line_number": 39, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 143, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 143, "usage_type": "attribute"}]} +{"seq_id": "18014430", "text": "#!/usr/bin/env python3\nimport pytest\n\n\ndef is_line_valid(mini, maxi, char, pwd):\n if mini <= pwd.count(char) <= maxi:\n return True\n else:\n return False\n\n\n@pytest.mark.parametrize(\"input_line, valid\", [\n (\"1-3 a: abcde\", True),\n (\"1-3 b: cdefg\", False),\n (\"2-9 c: ccccccccc\", True)])\ndef test_is_line_valid(input_line, valid):\n\n # * is for tuple expansion\n assert is_line_valid(*parse_line(input_line)) == valid\n\n\ndef parse_line(line):\n mini = 0\n maxi = 0\n char = \"\"\n pwd = \"\"\n\n mini = int(line.split(\"-\", 1)[0])\n remainder = line.split(\"-\", 1)[1]\n\n maxi = int(remainder.split(\" \", 1)[0])\n remainder = str(remainder.split(\" \", 1)[1])\n\n char = remainder.split(\":\", 1)[0]\n remainder = remainder.split(\":\", 1)[1]\n pwd = remainder.strip()\n\n return mini, maxi, char, pwd\n\n\n@pytest.mark.parametrize(\"input_line, mini, maxi, char, pwd\", [\n (\"1-3 a: abcde\", 1, 3, \"a\", \"abcde\"),\n (\"1-3 b: cdefg\", 1, 3, \"b\", \"cdefg\"),\n (\"2-9 c: ccccccccc\", 2, 9, \"c\", \"ccccccccc\")])\ndef test_parse_line(input_line, mini, maxi, char, pwd):\n\n o_mini, o_maxi, o_char, o_pwd = parse_line(input_line)\n\n assert o_mini == mini\n assert o_maxi == maxi\n assert o_char == char\n assert o_pwd == pwd\n\n\n@pytest.mark.parametrize(\"input_line, valid\", [\n (\"1-3 a: abcde\", True),\n (\"1-3 b: cdefg\", False),\n (\"2-9 c: ccccccccc\", False)])\ndef test_is_line_valid_part2(input_line, valid):\n\n # * is for tuple expansion\n assert is_line_valid_part2(*parse_line(input_line)) == valid\n\n\ndef is_line_valid_part2(pos1, pos2, char, pwd):\n '''pos1 and pos2 are known as mini and maxi in Part 1'''\n\n # offset pos1 and pos2 by 1, to account for lack of zero-index\n pos1 = pos1 - 1\n pos2 = pos2 - 1\n\n # valid if only one position contains char\n # XOR => bool(a) != bool(b)\n if (pwd[pos1] == char) != (pwd[pos2] == char):\n return True\n else:\n return False\n\n\nlines = []\nwith open('day2.txt', 'r') as f:\n strings = f.read().splitlines()\n for item in strings:\n if item:\n lines.append(item)\nprint(len(lines))\n\n# PART 1\ncount = 0\nfor line in lines:\n if is_line_valid(*parse_line(line)):\n count += 1\nprint(count)\n\n# PART 2\ncount = 0\nfor line in lines:\n if is_line_valid_part2(*parse_line(line)):\n count += 1\nprint(count)\n", "sub_path": "day2.py", "file_name": "day2.py", "file_ext": "py", "file_size_in_byte": 2537, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pytest.mark.parametrize", "line_number": 12, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 41, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 41, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 55, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "499980229", "text": "from flask_restful import Resource, reqparse\nfrom nltk.tokenize import word_tokenize\nimport nltk\n\nclass TemSentenceTokenizer(Resource):\n\n parser = reqparse.RequestParser()\n parser.add_argument('sentence',\n type=str,\n required=True,\n help=\"გთხოვთ შეიყვანოთ სწორი წინადადება\")\n\n def get(self):\n try:\n data = TemSentenceTokenizer.parser.parse_args()\n sentence = data['sentence']\n\n tokenized_words = word_tokenize(sentence)\n\n except Exception as error:\n return {'error' : error}\n\n else:\n return {'result': tokenized_words}, 200\n\n\nclass PosTagging(Resource):\n\n parser = reqparse.RequestParser()\n parser.add_argument('text',\n type=str,\n required=True,\n help=\"გთხოვთ შეიყვანოთ სწორი წინადადება\")\n\n def get(self):\n data = PosTagging.parser.parse_args()\n text = data['text']\n try:\n tokenized_words = word_tokenize(text)\n tags = nltk.pos_tag(tokenized_words)\n\n except Exception as error:\n return {'error': error}\n\n else:\n return {'result': tags}, 200\n\n", "sub_path": "resources/methods.py", "file_name": "methods.py", "file_ext": "py", "file_size_in_byte": 1371, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask_restful.Resource", "line_number": 5, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 7, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 7, "usage_type": "name"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 18, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 27, "usage_type": "name"}, {"api_name": "flask_restful.reqparse.RequestParser", "line_number": 29, "usage_type": "call"}, {"api_name": "flask_restful.reqparse", "line_number": 29, "usage_type": "name"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 39, "usage_type": "call"}, {"api_name": "nltk.pos_tag", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "109033819", "text": "#!/usr/bin/python\n# switch column and row\n\nimport argparse\nimport os, sys\nfrom string import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--input\", help=\"original file\")\nparser.add_argument(\"-o\", \"--out\", help=\"converted file\")\nargs = parser.parse_args()\n\nfp=open(args.input)\nfw=open(args.out,\"w\")\nhd=[]\nn=0\ncnt={}\nfor it in fp:\n its=it.strip().split()\n hd.append(its[0])\n cnt[its[0]]=its[1:len(its)]\n if n==0:\n n=len(its)-1\nfp.close()\nfw.write(\"\\t\".join(hd)+\"\\n\")\nfor j in range(0,n):\n ln=\"\"\n for f in hd:\n if len(ln)==0:\n ln=cnt[f][j]\n else:\n ln=ln+\"\\t\"+cnt[f][j]\n fw.write(ln+\"\\n\")\nfw.close()\n", "sub_path": "switchColRow.py", "file_name": "switchColRow.py", "file_ext": "py", "file_size_in_byte": 682, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}]} +{"seq_id": "181401791", "text": "# -*- coding: utf-8 -*-\nfrom openerp import api, models, fields, registry\nimport json\nimport base64\nimport logging\nimport time\n\n_logger = logging.getLogger(__name__)\n\nclass pos_big_data(models.Model):\n\n _name = \"pos.big.data\"\n\n model = fields.Char('Model', required=1)\n fields_load = fields.Text('Field load', required=1)\n domain = fields.Text('Domains load', required=1)\n log = fields.Binary('Log', required=1)\n\n @api.model\n def api_get_data(self, model_datas):\n _logger.info('start api_get_data')\n start_time = time.time()\n results = {}\n vals = model_datas['model_datas']\n for val in vals:\n domain_pos = val['domain']\n domain = []\n for d in domain_pos:\n domain.append((d[0], d[1], d[2]))\n fields_load = val['fields']\n model = val['model']\n datas = self.search([('model', '=', model)])\n if datas:\n results[datas[0].model] = json.loads(base64.decodestring(datas[0].log).decode('utf-8'))\n else:\n records = self.env[model].search(domain)\n values = records.read(fields_load)\n self.create({\n 'model': model,\n 'fields_load': fields_load,\n 'domain': domain,\n 'log': base64.encodestring(json.dumps(values).encode('utf-8')),\n })\n results[model] = values\n _logger.info('end api_get_data %s' % (time.time() - start_time))\n return results", "sub_path": "pos_fast/models/pos_big_data.py", "file_name": "pos_big_data.py", "file_ext": "py", "file_size_in_byte": 1567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "openerp.models.Model", "line_number": 10, "usage_type": "attribute"}, {"api_name": "openerp.models", "line_number": 10, "usage_type": "name"}, {"api_name": "openerp.fields.Char", "line_number": 14, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 14, "usage_type": "name"}, {"api_name": "openerp.fields.Text", "line_number": 15, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 15, "usage_type": "name"}, {"api_name": "openerp.fields.Text", "line_number": 16, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 16, "usage_type": "name"}, {"api_name": "openerp.fields.Binary", "line_number": 17, "usage_type": "call"}, {"api_name": "openerp.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "base64.decodestring", "line_number": 34, "usage_type": "call"}, {"api_name": "base64.encodestring", "line_number": 42, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 45, "usage_type": "call"}, {"api_name": "openerp.api.model", "line_number": 19, "usage_type": "attribute"}, {"api_name": "openerp.api", "line_number": 19, "usage_type": "name"}]} +{"seq_id": "349988134", "text": "# Could use a data store of some sort, but let's just use files\nimport gmusicapi, urllib.parse, os, sys, datetime, time, oauth2client, argparse\nfrom pathlib import Path\nfrom yaml import safe_load\nfrom pushover import init, Client\n\n_CREDS_FILE = \"./creds\"\n_STORED_TRACKS_FILE = \"./stored_tracks\"\n_MISSING_LOG = \"./missing_tracks_log\"\n_NEW_LOG = \"./new_tracks_log\"\n_SLEEP = 86400 # One day\n\n# Log a message with a timestamp\ndef log(message, file=sys.stdout):\n print(f\"{datetime.datetime.now()} {message}\", file=file, flush=True)\n\n\n# Check each environment variable in a list is set\ndef check_env_vars(var_list):\n\n _missing_vars = 0\n for _env_var in var_list:\n if not os.environ.get(_env_var):\n _missing_vars = _missing_vars + 1\n log (f\"ERROR: Unable to get environment variable {_env_var}\", file=sys.stderr)\n\n if _missing_vars:\n log(\"One or more environment variables not set... exiting\", file=sys.stderr)\n exit(1)\n\n\n# Send pushover alert with details of missing tracks\ndef send_alert(missing_tracks, new_tracks):\n\n log(\"Sending track change alert\")\n\n # Construct a message\n _message = f\"Google Play Music Library - Track Changes:\\n\"\n for _track in missing_tracks:\n _message = _message + f\" MISSING: {_track['title']} - {_track['artist']} - {_track['album']}\\n\"\n\n for _track in new_tracks:\n _message = _message + f\" NEW: {_track['title']} - {_track['artist']} - {_track['album']}\\n\"\n\n Client(os.environ[\"_PUSHOVER_USER_KEY\"]).send_message(_message, title=\"Google Play Music Library - Track Changes\")\n\n\n# Read stored tracks, url-decode them\ndef read_stored_tracks(file):\n\n _stored_tracks = list()\n\n # If we can find the file given, read in track info from it\n if Path(file).is_file():\n log(f\"Found existing list at {file}\")\n with open(file, 'r') as _stored_tracks_file:\n _stored_tracks = safe_load(_stored_tracks_file)\n else:\n log(f\"Stored tracks file {file} not found\")\n\n # URL-decode stored track details\n for _track in _stored_tracks:\n _track[\"artist\"] = urllib.parse.unquote(_track[\"artist\"])\n _track[\"title\"] = urllib.parse.unquote(_track[\"title\"])\n _track[\"album\"] = urllib.parse.unquote(_track[\"album\"])\n\n return _stored_tracks\n\n\n# Get a list of tracks currently in your Google Play Music library\ndef get_google_play_tracks():\n\n # Try creating oauth2creds from the file manually\n _oa2_creds = oauth2client.client.OAuth2Credentials.from_json(os.environ[\"_CREDS\"])\n\n # Create mobile client\n _mobile_client = gmusicapi.Mobileclient()\n\n # This login attempt will fail, but we can get a valid device ID from the execption thrown\n try:\n _mobile_client.oauth_login(\"1234567890\", _oa2_creds, \"en_GB\")\n except gmusicapi.exceptions.InvalidDeviceId as e:\n _device_id = e.valid_device_ids[0]\n\n # Attempt login\n if not _mobile_client.oauth_login(_device_id, oauth_credentials=_oa2_creds, locale=\"en_GB\"):\n log(f\"FATAL: Unable to login using device ID {_device_id} and supplied credentials\", file=sys.stderr)\n log(\" Credentials need renewing?\", file=sys.stderr)\n exit(1)\n\n # Get raw track list\n _library_tracks_raw = _mobile_client.get_all_songs()\n \n # We're only interested in title, artist and album\n _library_tracks = list()\n for track in _library_tracks_raw:\n _library_tracks.append({\"artist\": track[\"artist\"], \"album\": track[\"album\"], \"title\": track[\"title\"]})\n\n return _library_tracks\n\n\n# Log missing tracks to a file\ndef write_track_file(track_list, file, mode):\n\n try:\n with open(file, mode) as _tracks_file:\n\n # Write each track out, URL-encoded everything\n for _track in sorted(track_list, key = lambda i: i['title']):\n _tracks_file.write(f\"- title: \\\"{urllib.parse.quote(_track['title'])}\\\"\\n\")\n _tracks_file.write(f\" artist: \\\"{urllib.parse.quote(_track['artist'])}\\\"\\n\")\n _tracks_file.write(f\" album: \\\"{urllib.parse.quote(_track['album'])}\\\"\\n\")\n\n except IOError:\n log(\"FATAL: Error opening tracks file {file} for writing\")\n exit(1)\n\n\ndef main():\n\n # Check required env vars are set\n check_env_vars([\"_PUSHOVER_API_KEY\", \"_PUSHOVER_USER_KEY\", \"_CREDS\"])\n\n # Parse arguments - there may be a -d\n _parser = argparse.ArgumentParser()\n _parser.add_argument(\"-d\", \"--daemon\", help=\"Run in daemon mode\", action=\"store_true\", default=False)\n _args = _parser.parse_args()\n\n if _args.daemon:\n init(os.environ[\"_PUSHOVER_API_KEY\"])\n Client(os.environ[\"_PUSHOVER_USER_KEY\"]).send_message(\"Starting Google Play Music Monitor\")\n\n\n while True:\n\n _missing_tracks = list()\n _new_tracks = list()\n \n # Get current tracks in google play library. Requires a device ID\n _current_tracks = get_google_play_tracks()\n\n # Read stored tracks from a previous run\n _stored_tracks = read_stored_tracks(_STORED_TRACKS_FILE)\n\n # Look for tracks removed from our library since we last retrieved them\n for _stored_track in _stored_tracks:\n _found = False\n \n for _current_track in _current_tracks:\n if _current_track == _stored_track:\n # We've found it\n _found = True\n\n if not _found:\n log(f\"MISSING TRACK: {_stored_track['title']} - {_stored_track['artist']} - {_stored_track['album']}\")\n _missing_tracks.append(_stored_track)\n\n # While we're at it, look for tracks added to the library since we last stored it, but only if we \n # found some stored tracks\n if _stored_tracks:\n for _current_track in _current_tracks:\n _found = False\n\n for _stored_track in _stored_tracks:\n if _stored_track == _current_track:\n _found = True\n\n if not _found:\n log(f\"NEW TRACK: {_current_track['title']} - {_current_track['artist']} - {_current_track['album']}\")\n _new_tracks.append(_current_track)\n\n\n # If we have missing or new tracks send a pushover alert\n if _missing_tracks or _new_tracks:\n send_alert(_missing_tracks, _new_tracks)\n else:\n log(\"No changes\")\n\n # Write tracks to logs of missing and new, and update the stored tracks file\n write_track_file(_missing_tracks, _MISSING_LOG, 'a')\n write_track_file(_new_tracks, _NEW_LOG, \"a\")\n write_track_file(_current_tracks, _STORED_TRACKS_FILE, 'w')\n\n # If we're not in daemon mode we're done\n if not _args.daemon:\n break\n\n log(f\"Sleeping for {_SLEEP} seconds...\")\n time.sleep(_SLEEP)\n\nif __name__ == \"__main__\":\n main()", "sub_path": "g-monitor.py", "file_name": "g-monitor.py", "file_ext": "py", "file_size_in_byte": 6875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.stdout", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 25, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pushover.Client", "line_number": 45, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 54, "usage_type": "call"}, {"api_name": "yaml.safe_load", "line_number": 57, "usage_type": "call"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 63, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 63, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 63, "usage_type": "name"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 64, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 64, "usage_type": "name"}, {"api_name": "urllib.parse.parse.unquote", "line_number": 65, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 65, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 65, "usage_type": "name"}, {"api_name": "oauth2client.client.OAuth2Credentials.from_json", "line_number": 74, "usage_type": "call"}, {"api_name": "oauth2client.client", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 74, "usage_type": "attribute"}, {"api_name": "gmusicapi.Mobileclient", "line_number": 77, "usage_type": "call"}, {"api_name": "gmusicapi.exceptions", "line_number": 82, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 87, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 88, "usage_type": "attribute"}, {"api_name": "urllib.parse.parse.quote", "line_number": 110, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 110, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 110, "usage_type": "name"}, {"api_name": "urllib.parse.parse.quote", "line_number": 111, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 111, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 111, "usage_type": "name"}, {"api_name": "urllib.parse.parse.quote", "line_number": 112, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 112, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 112, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 125, "usage_type": "call"}, {"api_name": "pushover.init", "line_number": 130, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pushover.Client", "line_number": 131, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 131, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}]} +{"seq_id": "183531318", "text": "#-*-coding:utf-8-*-\n#首先本来就有ip池,然后测试这些给出的ip是否可用。\n#代理的时候,被访问的网址是http,代理也要对应http,https对应https\nfrom bs4 import BeautifulSoup\nimport requests\nimport random\n\ndef get_random_ip(ip_list):#构造ip\n proxy_ip = random.choice(ip_list)\n proxies = {'http': 'http://'+proxy_ip,'https': 'https://'+proxy_ip,}#最好这样构造,这样http,https都可以访问\n #proxies = {'http': 'http://' + proxy_ip }\n return proxies\n\nif __name__ == '__main__':\n url = 'http://httpbin.org/get'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n ip_list = ['110.52.235.184:9999']\n proxies = get_random_ip(ip_list)\n print(proxies)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res=requests.get(url,headers=headers,proxies=proxies)\n print(res.text)\n if res.status_code==200:\n print('这是有效的ip:',proxies)\n else:\n print('这不是有效的ip!',proxies)\n\n", "sub_path": "proxies/test_ip.py", "file_name": "test_ip.py", "file_ext": "py", "file_size_in_byte": 1227, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "random.choice", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "333006596", "text": "#!/usr/bin/env python\n\"\"\"\nTODO\n\"\"\"\nimport argparse\nimport cPickle as pickle\nimport dtw\nimport matplotlib.pyplot as plt\nimport librosa\nimport logging\nimport mir_eval\nimport numpy as np\nimport os\nimport pandas as pd\nimport scipy\nimport sklearn\nimport time\n\nfrom joblib import Parallel, delayed\nimport msaf\nfrom msaf import jams2\n\n\n# Directory to store the features\nfeatures_dir = \"../features_beats\"\n\n# Distances to use for the DTW\ndist_dict = {\n \"L1\": scipy.spatial.distance.cityblock,\n \"L2\": scipy.spatial.distance.euclidean,\n \"correlation\": scipy.spatial.distance.correlation\n}\n\n# Normalization techniques for the threshold and f-measure computation\nnorms = [\"none\", \"min\", \"max\", \"hmean\"]\n\n\ndef compute_threshold(intervals=None, labels=None, scores=None, norm=None):\n \"\"\"Computes the thresholds for the given inputs.\n\n Parameters\n ----------\n intervals : np.array\n Estimated segment boundary intervals.\n labels : np.array\n Estimated segment labels.\n scores : np.array\n DTW scores.\n norm : str\n Normalization method.\n\n Returns\n -------\n thr : float > 0\n Threshold for which to optimally cut the DTW score matrix.\n fmeasure : float > 0\n F-measure resulting using threshold.\n \"\"\"\n\n label_agreement = np.zeros((len(labels), len(labels)), dtype=bool)\n\n for i in range(len(labels)):\n for j in range(i, len(labels)):\n label_agreement[i, j] = (labels[i] == labels[j])\n label_agreement[j, i] = label_agreement[i, j]\n\n time_norm = 1\n\n durations = np.diff(intervals, axis=1).ravel()\n\n if norm == 'min':\n time_norm = np.minimum.outer(durations, durations)\n\n elif norm == 'max':\n time_norm = np.maximum.outer(durations, durations)\n\n elif norm == 'hmean':\n time_norm = 2./np.add.outer(durations, durations)\n time_norm *= np.multiply.outer(durations, durations)\n\n # TODO: have the label agreement index out nan-valued scores\n\n scores = scores / time_norm\n\n label_agreement[np.tril_indices_from(label_agreement, k=0)] = False\n\n label_agreement[~np.isfinite(scores)] = False\n\n label_disagreement = ~label_agreement\n\n label_disagreement[np.tril_indices_from(label_disagreement, k=0)] = False\n\n label_disagreement[~np.isfinite(scores)] = False\n\n tp_scores = scores[label_agreement]\n fp_scores = scores[label_disagreement]\n\n num_pos = np.sum(label_agreement)\n num_neg = np.sum(label_disagreement)\n\n if num_pos == 0 or num_neg == 0:\n return 0.0, 0.0\n\n y_true = np.concatenate([np.zeros(len(tp_scores)), np.ones(len(fp_scores))])\n y_score = np.concatenate([tp_scores, fp_scores])\n\n fpr, tpr, thr = sklearn.metrics.roc_curve(y_true, y_score)\n\n tp = num_pos * tpr\n fp = num_neg * fpr\n\n precision = tp / (tp + fp)\n recall = tpr\n\n fmeasure = np.asarray([mir_eval.util.f_measure(p, r)\n for p, r in zip(precision, recall)])\n\n k = np.argmax(fmeasure)\n\n return thr[k], fmeasure[k]\n\n\ndef read_features(features_file):\n \"\"\"Reads the features from the pickle file.\n Parameters\n ----------\n features_file : str\n Path to the features file.\n\n Returns\n -------\n cqgram : np.array\n Subseg-sync constant-Q power spectrogram.\n intframes : np.array\n The frame indeces.\n \"\"\"\n with open(features_file, \"r\") as f:\n features = pickle.load(f)\n return features[\"cqgram\"], features[\"intframes\"]\n\n\ndef save_features(cqgram, intframes, subseg, features_file):\n \"\"\"Reads the features from the pickle file.\n Parameters\n ----------\n cqgram : np.array\n Subseg-sync constant-Q power spectrogram.\n intframes : np.array\n The frame indeces.\n subseg : np.array\n Subseq-index times.\n features_file : str\n Path to the output features file.\n \"\"\"\n features = {}\n features[\"cqgram\"] = cqgram\n features[\"intframes\"] = intframes\n features[\"subseg\"] = subseg\n with open(features_file, \"w\") as f:\n pickle.dump(features, f, protocol=-1)\n\n\ndef compute_features(audio_file, intervals, level):\n \"\"\"Computes the subseg-sync cqt features from the given audio file, if\n they are not previously computed. Saves the results in the feat_dir folder.\n\n Parameters\n ----------\n audio_file : str\n Path to the audio file.\n intervals : np.array\n Intervals containing the estimated boundaries.\n level : str\n Level in the hierarchy.\n\n Returns\n -------\n cqgram : np.array\n Subseg-sync constant-Q power spectrogram.\n intframes : np.array\n The frame indeces.\n \"\"\"\n # Check if features have already been computed\n if level == \"small_scale\":\n features_file = os.path.join(features_dir, os.path.basename(audio_file).split('.')[0] +\n \"_small_scale.mp3.pk\")\n else:\n features_file = os.path.join(features_dir, os.path.basename(audio_file) +\n \".pk\")\n if os.path.isfile(features_file):\n return read_features(features_file)\n\n y, sr = librosa.load(audio_file, sr=11025)\n\n # Default hopsize is 512\n hopsize = 512\n cqgram = librosa.logamplitude(librosa.cqt(y, sr=sr, hop_length=hopsize)**2, ref_power=np.max)\n\n # Track beats\n y_harmonic, y_percussive = librosa.effects.hpss(y)\n tempo, beats = librosa.beat.beat_track(y=y_percussive, sr=sr,\n hop_length=hopsize)\n\n # Synchronize\n cqgram = librosa.feature.sync(cqgram, beats, aggregate=np.median)\n\n intframes = None\n if intervals is not None:\n # convert intervals to frames\n intframes = librosa.time_to_frames(intervals, sr=sr, hop_length=hopsize)\n\n # Match intervals to subseg points\n intframes = librosa.util.match_events(intframes, beats)\n\n # Save the features\n save_features(cqgram, intframes, beats, features_file)\n\n return cqgram, intframes\n\n\ndef make_cost_matrix(audio_file, intervals, labels, dist, level):\n \"\"\"Computes the cost matrix of the DTW from the given audio file.\n\n Parameters\n ----------\n audio_file : str\n Path to the audio file.\n intervals : np.array\n Intervals containing the estimated boundaries.\n labels : np.array\n Estimated segment labels.\n dist : fun\n Distance function to be used for the DTW\n level : str\n Level in the hierarchy.\n\n Returns\n -------\n D : np.array\n DTW scores.\n P : list\n List containing np.arrays() representing the DTW paths.\n \"\"\"\n # Computes the features (return existing ones if already computed)\n cqgram, intframes = compute_features(audio_file, intervals, level)\n\n # Score matrix\n D = np.nan * np.zeros((len(labels), len(labels)), dtype=np.float32)\n np.fill_diagonal(D, 0)\n\n # Path matrix\n P = []\n for i in range(len(labels)):\n P.append([np.nan] * len(labels))\n for i in range(len(labels)):\n P[i][i] = 0\n\n for i in range(len(labels)):\n x_slice = cqgram[:, intframes[i, 0]:intframes[i, 1]].T\n if intframes[i, 1] - intframes[i, 0] < 2:\n continue\n for j in range(i+1, len(labels)):\n if intframes[j, 1] - intframes[j, 0] < 2:\n continue\n y_slice = cqgram[:, intframes[j, 0]:intframes[j, 1]].T\n\n dtw_cost, distance, path = dtw.dtw(x_slice, y_slice, dist=dist)\n D[i, j] = dtw_cost\n D[j, i] = D[i, j]\n path = list(path)\n path[0] = np.asarray(path[0], dtype=np.int32)\n path[1] = np.asarray(path[1], dtype=np.int32)\n P[i][j] = path\n\n return D, P\n\n\ndef compute_score(file_struct, level, dist_key):\n \"\"\"Computes the DTW scores for the given file.\n\n Parameters\n ----------\n file_struct : FileStruct (msaf)\n Object containing the struct.\n level : str\n Level of the hierarchy to be considered.\n dist_key : str\n Distance measure identifier.\n\n Returns\n -------\n ret : dict\n Dictionary with the results, including the following keys:\n intervals : reference boundary intervals,\n labels : reference segment labels,\n scores : DTW scores,\n paths : DTW paths,\n thresholds : thresholds found for the different normalizations,\n fmeasures : fmeasures computes for the different normalizations,\n file_name : name of the file\n \"\"\"\n try:\n ref_inter, ref_labels = jams2.converters.load_jams_range(\n file_struct.ref_file, \"sections\", annotator=0, context=level)\n\n assert len(ref_labels) > 0\n\n D, P = make_cost_matrix(file_struct.audio_file, ref_inter, ref_labels,\n dist=dist_dict[dist_key], level=level)\n thresholds = {}\n fmeasures = {}\n for norm in norms:\n thresholds[norm], fmeasures[norm] = compute_threshold(\n intervals=ref_inter, labels=ref_labels, scores=D, norm=norm)\n except IndexError as e:\n logging.warning(\"warning: problem computing threshold %s at level %s\" %\n (file_struct.audio_file, level))\n ref_inter = None\n ref_labels = None\n D = None\n P = None\n thresholds = None\n fmeasures = None\n except (AssertionError, IOError) as e:\n logging.warning(\"warning: no annotations for %s\" %\n file_struct.audio_file)\n ref_inter = None\n ref_labels = None\n D = None\n P = None\n thresholds = None\n fmeasures = None\n finally:\n cqgram, intframes = compute_features(file_struct.audio_file, None, level)\n ret = {\n \"intervals\": ref_inter,\n \"labels\": ref_labels,\n \"scores\": D,\n \"paths\": P,\n \"thresholds\": thresholds,\n \"fmeasures\": fmeasures,\n \"file_name\": os.path.basename(file_struct.audio_file)\n }\n return ret\n\n\ndef save_results(dataset, level, dist_key, scores):\n \"\"\"Saves the results.\n\n Parameters\n ----------\n dataset : str\n Name of the dataset.\n level : str\n Level of dataset being considered.\n dist_key : str\n Type of distance\n scores : dict\n Dictionary containing the scores for all the files in the dataset.\n \"\"\"\n result = {\n \"level\": level,\n \"dist\": dist_key,\n \"file_scores\": scores\n }\n out_file = \"scores_datasetE%s_levelE%s_distE%s.pk\" % (dataset, level,\n dist_key)\n with open(out_file, \"w\") as f:\n pickle.dump(result, f, protocol=-1)\n\n\ndef main(ds_path, n_jobs):\n \"\"\"Main function to compute DTW scores for a given root dataset and\n number of processors.\n\n Parameters\n ----------\n ds_path : str\n Path to the root of the dataset.\n n_jobs : int > 0\n Number of processes to use.\n \"\"\"\n\n # Datasets from which to compute the DTWs\n datasets = [\"SALAMI\", \"Isophonics\"]\n\n # Different levels for the datasets\n dataset_levels = {\n \"Isophonics\": [\"function\"],\n #\"SALAMI\": [\"function\", \"large_scale\", \"small_scale\"]\n #\"SALAMI\": [\"function\", \"large_scale\"]\n \"SALAMI\": [\"function\"]\n }\n\n # Make sure the features folder exists\n msaf.utils.ensure_dir(features_dir)\n\n # Main loop\n for dataset in datasets:\n # Obtain all the files for the given dataset\n files = msaf.io.get_dataset_files(ds_path, ds_name=dataset)\n\n # Compute results for the specific level and distance\n for level in dataset_levels[dataset]:\n for dist_key in dist_dict.keys():\n if dataset != \"SALAMI\" and level != \"function\":\n continue\n logging.info(\"Computing: %s, %s, %s\" %\n (dataset, level, dist_key))\n\n # Compute scores using multiple cpus\n scores = Parallel(n_jobs=n_jobs)(delayed(compute_score)(\n file_struct, level, dist_key)\n for file_struct in files[:])\n\n # Save all results\n save_results(dataset, level, dist_key, scores)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Computes the DTW scores, paths, thresholds, and f-measures\"\n \" for multiple collections.\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"ds_path\",\n action=\"store\",\n help=\"Input path to dataset\")\n parser.add_argument(\"-j\",\n action=\"store\",\n dest=\"n_jobs\",\n default=1,\n type=int,\n help=\"The number of threads to use\")\n\n args = parser.parse_args()\n start_time = time.time()\n\n # Setup the logger\n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',\n level=logging.INFO)\n\n # Call main function\n main(args.ds_path, args.n_jobs)\n\n # Done!\n logging.info(\"Done! Took %.2f seconds.\" % (time.time() - start_time))\n", "sub_path": "code/compute_dtw.py", "file_name": "compute_dtw.py", "file_ext": "py", "file_size_in_byte": 13207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scipy.spatial", "line_number": 29, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 30, "usage_type": "attribute"}, {"api_name": "scipy.spatial", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.minimum.outer", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.maximum.outer", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.add.outer", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.multiply.outer", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.tril_indices_from", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.tril_indices_from", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 115, "usage_type": "call"}, {"api_name": "mir_eval.util.f_measure", "line_number": 115, "usage_type": "call"}, {"api_name": "mir_eval.util", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 118, "usage_type": "call"}, {"api_name": "cPickle.load", "line_number": 138, "usage_type": "call"}, {"api_name": "cPickle.dump", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "librosa.load", "line_number": 193, "usage_type": "call"}, {"api_name": "librosa.logamplitude", "line_number": 197, "usage_type": "call"}, {"api_name": "librosa.cqt", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 197, "usage_type": "attribute"}, {"api_name": "librosa.effects.hpss", "line_number": 200, "usage_type": "call"}, {"api_name": "librosa.effects", "line_number": 200, "usage_type": "attribute"}, {"api_name": "librosa.beat.beat_track", "line_number": 201, "usage_type": "call"}, {"api_name": "librosa.beat", "line_number": 201, "usage_type": "attribute"}, {"api_name": "librosa.feature.sync", "line_number": 205, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.median", "line_number": 205, "usage_type": "attribute"}, {"api_name": "librosa.time_to_frames", "line_number": 210, "usage_type": "call"}, {"api_name": "librosa.util.match_events", "line_number": 213, "usage_type": "call"}, {"api_name": "librosa.util", "line_number": 213, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 248, "usage_type": "attribute"}, {"api_name": "numpy.fill_diagonal", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 254, "usage_type": "attribute"}, {"api_name": "dtw.dtw", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 271, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 272, "usage_type": "attribute"}, {"api_name": "msaf.jams2.converters.load_jams_range", "line_number": 303, "usage_type": "call"}, {"api_name": "msaf.jams2.converters", "line_number": 303, "usage_type": "attribute"}, {"api_name": "msaf.jams2", "line_number": 303, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 316, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 325, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 342, "usage_type": "call"}, {"api_name": "os.path", "line_number": 342, "usage_type": "attribute"}, {"api_name": "cPickle.dump", "line_number": 369, "usage_type": "call"}, {"api_name": "msaf.utils.ensure_dir", "line_number": 396, "usage_type": "call"}, {"api_name": "msaf.utils", "line_number": 396, "usage_type": "attribute"}, {"api_name": "msaf.io.get_dataset_files", "line_number": 401, "usage_type": "call"}, {"api_name": "msaf.io", "line_number": 401, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 408, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 412, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 412, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 421, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 424, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 436, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 439, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 440, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 446, "usage_type": "call"}, {"api_name": "time.time", "line_number": 446, "usage_type": "call"}]} +{"seq_id": "183518312", "text": "from factory import DjangoModelFactory, LazyAttribute, post_generation\n\nfrom demo_models.models import Bar\n\nfrom .factory_faker import Faker\nfrom .demo_models_foo import FooFactory\n\n__all__ = (\n 'BarFactory',\n)\n\n\nclass BaseBarFactory(DjangoModelFactory):\n \"\"\"Base Bar factory.\"\"\"\n\n char_field = Faker('text', max_nb_chars=50)\n\n class Meta(object):\n \"\"\"Meta class.\"\"\"\n\n model = Bar\n abstract = True\n\n @post_generation\n def foos(obj, created, extracted, **kwargs):\n \"\"\"Create `Foo` objects for the created `Bar` instance.\"\"\"\n if created:\n # Create 4 `Foo` objects.\n foos = FooFactory.create_batch(4, **kwargs)\n obj.foos.add(*foos)\n\n\nclass BarFactory(BaseBarFactory):\n \"\"\"Bar factory.\"\"\"\n", "sub_path": "report_builder_demo/factories/demo_models_bar.py", "file_name": "demo_models_bar.py", "file_ext": "py", "file_size_in_byte": 774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "factory.DjangoModelFactory", "line_number": 13, "usage_type": "name"}, {"api_name": "factory_faker.Faker", "line_number": 16, "usage_type": "call"}, {"api_name": "demo_models.models.Bar", "line_number": 21, "usage_type": "name"}, {"api_name": "demo_models_foo.FooFactory.create_batch", "line_number": 29, "usage_type": "call"}, {"api_name": "demo_models_foo.FooFactory", "line_number": 29, "usage_type": "name"}, {"api_name": "factory.post_generation", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "105187528", "text": "import warnings\nfrom datetime import datetime\n\nimport pytz\n\nfrom HardCode.scripts.cheque_bounce_analysis.Cheque_Bounce import cheque_user\nfrom HardCode.scripts.parameters_for_bl0.loan_app.loan_app_count_validate import loan_app_percentage\nfrom HardCode.scripts.parameters_for_bl0.rejection_msgs.rejecting_apps_count import get_app_rejection_count\nfrom HardCode.scripts.parameters_for_bl0.relative_verification.relative_validation import rel_validate\nfrom HardCode.scripts.loan_analysis.overdue_details import get_overdue_details\nfrom HardCode.scripts.update_analysis import update\nfrom HardCode.scripts.parameters_for_bl0.salary.last_5_salary import latest_salary\nfrom HardCode.scripts.parameters_for_bl0.available_balance.last_month_avbl_bal import average_balance\nfrom HardCode.scripts.Util import conn, logger_1\n\nwarnings.filterwarnings(\"ignore\")\n\ndef exception_feeder(**kwargs):\n client = kwargs.get('client')\n msg = kwargs.get('msg')\n user_id = kwargs.get('user_id')\n\n logger = logger_1('exception_feeder',user_id)\n\n logger.error(msg)\n r = {'status': False, 'message': msg,\n 'modified_at': str(datetime.now(pytz.timezone('Asia/Kolkata'))), 'cust_id': user_id,'result':False,'result_type':'kyc'}\n if client:\n client.exception.before_kyc.insert_one(r)\n return r\n\ndef result_output_false(msg):\n return {'status': False, 'message': msg,\"result\":False,\"result_type\":\"kyc\"}\n\ndef result_output_block(months,reason):\n return {'status': True, 'message': \"success\",\"result\":False,\"result_type\":\"kyc\",\n \"months\":months,\"reason\":reason}\n\ndef before_kyc_function(**kwargs):\n user_id = kwargs.get('user_id')\n sms_json = kwargs.get('sms_json')\n app_data = kwargs.get('app_data')\n contacts = kwargs.get('contacts')\n profession = kwargs.get('profession')\n\n sms_count = len(sms_json)\n\n\n # ==> creating logger and checking user_id\n logger = logger_1('bl0', user_id)\n if not isinstance(user_id, int):\n try:\n logger.info(\"user_id not int converting into int\")\n user_id = int(user_id)\n logger.info(\"user_id successfully converted into int\")\n except BaseException as e:\n return exception_feeder(user_id=-1, msg='user_id has a issue got id' + str(user_id))\n try:\n logger.info('making connection with data')\n client = conn()\n except BaseException as e:\n logger.critical('error in connection')\n return exception_feeder(user_id=user_id, msg=\"Exception in making data-\"+str(e))\n logger.info('connection success')\n\n # ==> fetching data\n data = client.dynamic_input.before_kyc({\"input\":\"before_kyc\"})\n # >>==>> dynamic inputs\n sms_count_check = data['check_sms']\n sms_count_variable = data['variable_sms'] # 300\n sms_count_months = data['month_sms'] # 3\n loan_app_percentage_check = data['check_app_percentage']\n loan_app_percentage_variable = data['variable_app_percentage']# 0.7\n loan_app_percentage_months = data['month_app_percentage'] # -1\n relatives_check = data['check_relatives']\n relatives_variable = data['variable_relatives'] # 3\n relatives_months = data['month_relatives'] # 2\n loan_rejection_check = data['check_rejection']\n # loan_rejection_premium_app_variable = 0\n # loan_rejection_premium_app_months = 3\n loan_rejection_normal_app_variable = data['variable_normal_rejection']# 5\n loan_rejection_normal_app_months = data['month_normal_rejection']# 3\n loan_overdue_check = data['check_overdue']\n loan_overdue_variable = data['variable_overdue']# 15\n loan_overdue_months = data['month_overdue']# 2\n cheque_bounce_check = data['check_cheque']\n cheque_bounce_variable = data['variable_cheque']# 3\n cheque_bounce_months = data['month_cheque'] #2\n salaried_check = data['check_salaried']\n salaried_months = data['month_salaried'] #2\n salaried_variable = data['variable_salaried'] # 0\n last_month_balance1 = data['last_month_balance1'] #1500\n last_month_balance2 = data['last_month_balance2'] #3000\n sms_new_credit_variable = data['sms_new_credit_variable'] #400\n total_loans_check = data['total_loans_check']\n total_loans_variable = data['total_loans_variable'] #2\n # ===> Analysis\n try:\n result = update(user_id=user_id,sms_json=sms_json)\n if not result['status']:\n msg = \"sms updation failed due to some reason-\" + result['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"sms updation failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n # ===> Analysis Complete\n\n # >>==>> Data Extraction\n logger.info(\"Starting Relatives Validation\")\n try:\n result_relatives = rel_validate(user_id,contacts)\n if not result_relatives['status']:\n msg = \"rejection check failed due to some reason-\"+result_relatives['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"relatives validation failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n logger.info('relatives validation function complete')\n\n # >>=>> Rejection check\n logger.info('starting rejection check')\n try:\n result_rejection = get_app_rejection_count(user_id) # returns a dictionary\n if not result_rejection['status']:\n msg = \"rejection check failed due to some reason-\"+result_rejection['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"rejection check failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n logger.info('rejection check complete')\n\n\n # >>=>> Overdue_details\n logger.info('starting overdue fetch')\n try:\n result_overdue = get_overdue_details(user_id)\n overdue_days = len (result_overdue['result']['overdue_days_list'])\n if not result_overdue['status']:\n msg = \"overdue check failed due to some reason-\"+result_overdue['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"overdue check failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n logger.info('overdue check complete')\n\n\n # >>==>> Loan app percentage\n logger.info(\"Starting loan app percentage\")\n try:\n result_app_percentage = loan_app_percentage(user_id=user_id,app_data=app_data)\n\n if not result_app_percentage['status']:\n msg = \"loan app percentage failed due to some reason-\"+result_app_percentage['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"loan app percentage failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n\n\n # >>==>> Cheque Bounce\n logger.info(\"Starting cheque bounce\")\n try:\n result_cheque = cheque_user(user_id=user_id)\n\n if not result_cheque['status']:\n msg = \"cheque failed due to some reason-\"+result_cheque['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"cheque failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n\n # >>==>> Last month Salary\n logger.info(\"Starting Last month Salary\")\n try:\n salary = latest_salary(user_id)\n\n if not salary['status']:\n msg = \"Last month Salary failed due to some reason-\"+salary['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"Last month Salary failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n\n # >>==>> Average Available Balance\n logger.info(\"Starting Average Available Balance\")\n try:\n balance = average_balance(user_id)\n\n if not balance['status']:\n msg = \"Average Available Balance failed due to some reason-\"+balance['message']\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n except BaseException as e:\n msg = \"Average Available Balance failed due to some reason-\"+str(e)\n logger.error(msg)\n exception_feeder(client=client, user_id=user_id,\n msg=msg)\n return result_output_false(msg)\n\n # ===> Data extraction complete\n\n # ===> Updating data\n logger.info(\"making dict for data\")\n dict_update ={\"sms_count\":sms_count,\"loan_app_percentage\":result_app_percentage['percentage'],'modified_at': str(datetime.now(pytz.timezone('Asia/Kolkata')))}\n dict_update['relatives_count']=result_relatives\n dict_update['normal_app_rejection']=result_rejection['normal_app']\n dict_update['overdue_days']=overdue_days\n dict_update['cheque_bounce_count']=result_cheque['count']\n dict_update['average_balance'] = balance['last_avbl_bal']\n dict_update['salary'] = salary['salary']\n client.analysis.parameters.update_one({\"cust_id\":user_id},{\"$push\":{'parameter_1': dict_update}},upsert=True)\n logger.info(\"data updated\")\n #==> Data Updation Complete\n logger.info(\"checking parameters\")\n if sms_count_check:\n if sms_countloan_app_percentage_variable:\n return result_output_block(months = loan_app_percentage_months,reason=\"loan_app_percentage\")\n\n if relatives_check:\n if result_relatives>relatives_variable:\n return result_output_block(months = relatives_months,reason=\"relatives_count\")\n\n if loan_rejection_check:\n # if result_rejection['premium_app']>loan_rejection_premium_app_variable:\n # return result_output_block(months = loan_rejection_premium_app_months,reason=\"loan_rejection_premium_app\")\n if result_rejection['normal_app']>loan_rejection_normal_app_variable:\n return result_output_block(months = loan_rejection_normal_app_months,reason=\"loan_rejection_normal_app\")\n\n if loan_overdue_check:\n if overdue_days > loan_overdue_variable:\n return result_output_block(months = loan_overdue_months,reason=\"loan_overdue\")\n\n if cheque_bounce_check:\n if result_cheque['count'] > cheque_bounce_variable:\n return result_output_block(months=cheque_bounce_months, reason=\"cheque_bounce\")\n\n logger.info(\"user passed from kyc\")\n\n if total_loans_check:\n if result['total_loans'] < total_loans_variable:\n\n if salaried_check :\n if salary['salary'] > salaried_variable:\n if sms_count < sms_count_variable and balance['last_avbl_bal'] < last_month_balance1:\n return result_output_block(months=salaried_months, reason=\"last_month_balance_or_sms_count\")\n\n else: # profession yet to add\n if balance['last_avbl_bal'] < last_month_balance2 and sms_count < sms_new_credit_variable :\n return result_output_block(months=salaried_months, reason=\"last_month_balance_or_sms_count\")\n\n logger.info(\"new user passed from cibil\")\n\n return {\n \"status\":True,\n \"message\":\"success\",\n \"result\": True,\n \"result_type\": \"kyc\"\n }\n", "sub_path": "HardCode/scripts/before_kyc.py", "file_name": "before_kyc.py", "file_ext": "py", "file_size_in_byte": 13026, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "HardCode.scripts.Util.logger_1", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 27, "usage_type": "call"}, {"api_name": "HardCode.scripts.Util.logger_1", "line_number": 50, "usage_type": "call"}, {"api_name": "HardCode.scripts.Util.conn", "line_number": 60, "usage_type": "call"}, {"api_name": "HardCode.scripts.update_analysis.update", "line_number": 99, "usage_type": "call"}, {"api_name": "HardCode.scripts.parameters_for_bl0.relative_verification.relative_validation.rel_validate", "line_number": 117, "usage_type": "call"}, {"api_name": "HardCode.scripts.parameters_for_bl0.rejection_msgs.rejecting_apps_count.get_app_rejection_count", "line_number": 135, "usage_type": "call"}, {"api_name": "HardCode.scripts.loan_analysis.overdue_details.get_overdue_details", "line_number": 154, "usage_type": "call"}, {"api_name": "HardCode.scripts.parameters_for_bl0.loan_app.loan_app_count_validate.loan_app_percentage", "line_number": 174, "usage_type": "call"}, {"api_name": "HardCode.scripts.cheque_bounce_analysis.Cheque_Bounce.cheque_user", "line_number": 193, "usage_type": "call"}, {"api_name": "HardCode.scripts.parameters_for_bl0.salary.last_5_salary.latest_salary", "line_number": 211, "usage_type": "call"}, {"api_name": "HardCode.scripts.parameters_for_bl0.available_balance.last_month_avbl_bal.average_balance", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 248, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 248, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 248, "usage_type": "call"}]} +{"seq_id": "402003442", "text": "# -*- coding: UTF-8 -*-\nfrom auxpackage.tookit.analysis_package import ap\nfrom pandas.plotting import register_matplotlib_converters\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nregister_matplotlib_converters()\n\n\ndef draw_price(l_close, l_each_benchmark_pct):\n \"\"\"\n 画出基准曲线图\n :param l_close: DataFrame.Series, 收盘价列表\n :param l_each_benchmark_pct: list, 基准每个Kline的change_pct\n :return:\n \"\"\"\n ap.sound(f'entry: draw_price')\n plt.figure(figsize=(18, 8))\n ax_close = plt.subplot(2, 1, 1)\n ax_close.plot(l_close, color='teal')\n ax_pct = plt.subplot(2, 1, 2)\n ax_pct.plot(l_each_benchmark_pct, color='grey')\n plt.show()\n\n\ndef draw_signal(l_close, l_signal):\n \"\"\"\n 画出基准价格和信号\n :param l_close: DataFrame.Series, 收盘价列表\n :param l_signal: DataFrame.Series, 信号列表\n :return:\n \"\"\"\n ap.sound(f'entry: draw_signal')\n plt.figure(figsize=(18, 8))\n ax_close = plt.subplot(2, 1, 1)\n ax_close.plot(l_close, color='teal')\n ax_signal = plt.subplot(2, 1, 2)\n ax_signal.bar(x=l_signal.index, height=l_signal.values, color='grey')\n plt.show()\n\n\ndef draw_back(l_cum_benchmark_pct, l_cum_strategy_pct):\n \"\"\"\n 画出策略收益曲线和基准收益曲线\n :param l_cum_benchmark_pct: array, 累计基准收益率\n :param l_cum_strategy_pct: array, 累计策略收益率\n :return:\n \"\"\"\n ap.sound(f'entry: draw_back')\n plt.figure(figsize=(18, 8))\n plt.plot(l_cum_benchmark_pct, color='teal')\n plt.plot(l_cum_strategy_pct, color='grey')\n plt.legend(['benchmark', 'strategy yield curve'], loc=\"best\")\n plt.show()\n\n\ndef draw_srtoke_distribution(l_stroke_holdtime, l_stroke_pct):\n \"\"\"\n 画出每一笔交易的收益和持仓时间分布图\n :param l_stroke_holdtime: array, 每笔交易的持有时间列表\n :param l_stroke_pct: array, 每笔交易的收益\n :return:\n \"\"\"\n ap.sound(f'entry: draw_srtoke_distribution')\n df = pd.DataFrame()\n df['holdtime'] = l_stroke_holdtime\n df['strategy'] = l_stroke_pct\n with sns.axes_style(\"dark\"):\n sns.jointplot('holdtime', 'strategy', data=df,\n kind='kde', color='grey', space=0, pct=6)\n plt.show()\n", "sub_path": "ning/auxpackage/loop_backtest/module/m_draw.py", "file_name": "m_draw.py", "file_ext": "py", "file_size_in_byte": 2281, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.plotting.register_matplotlib_converters", "line_number": 8, "usage_type": "call"}, {"api_name": "auxpackage.tookit.analysis_package.ap.sound", "line_number": 18, "usage_type": "call"}, {"api_name": "auxpackage.tookit.analysis_package.ap", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "auxpackage.tookit.analysis_package.ap.sound", "line_number": 34, "usage_type": "call"}, {"api_name": "auxpackage.tookit.analysis_package.ap", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "auxpackage.tookit.analysis_package.ap.sound", "line_number": 50, "usage_type": "call"}, {"api_name": "auxpackage.tookit.analysis_package.ap", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "auxpackage.tookit.analysis_package.ap.sound", "line_number": 65, "usage_type": "call"}, {"api_name": "auxpackage.tookit.analysis_package.ap", "line_number": 65, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 66, "usage_type": "call"}, {"api_name": "seaborn.axes_style", "line_number": 69, "usage_type": "call"}, {"api_name": "seaborn.jointplot", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "533264793", "text": "from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport csv,numpy as np,pandas as pd\nimport os\n\ndata = pd.read_csv(\"Training.csv\")\ndf = pd.DataFrame(data)\ndf.head()\ncols = df.columns\nprint()\ncols = cols[:-1]\n#print(cols)\nx = df[cols]\ny = df['prognosis']\n#print(y)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)\n\n#print (\"DecisionTree\")\n#dt = DecisionTreeClassifier()\ndt = RandomForestClassifier()\nclf_dt=dt.fit(x_train,y_train)\n#print(clf_dt)\n#print (\"Acurracy: \", clf_dt.score(x_test,y_test))\n# with open('templates/Testing.csv', newline='') as f:\n# reader = csv.reader(f)\n# symptoms = next(reader)\n# symptoms = symptoms[:len(symptoms)-1]\n\nindices = [i for i in range(132)]\n#print(indices)\nsymptoms = df.columns.values[:-1]\n\ndictionary = dict(zip(symptoms,indices))\n#print(dictionary)\n\ndef dosomething(symptom):\n user_input_symptoms = symptom\n user_input_label = [0 for i in range(132)]\n for i in user_input_symptoms:\n #print(i)\n idx = dictionary[i]\n user_input_label[idx] = 1\n\n user_input_label = np.array(user_input_label)\n user_input_label = user_input_label.reshape((-1,1)).transpose()\n disease = dt.predict(user_input_label)\n #print(disease[0])\n return disease[0]\n\nprint(dosomething(['polyuria','chest_pain','vomiting']))\nprint(dosomething(['dark_urine','yellowish_skin','high_fever']))\nprint(dosomething(['nausea','headache','high_fever','diarrhoea']))\nprint(dosomething(['malaise','sweating','high_fever','fatigue','loss_of_appetite']))\n\n\n#prediction = []\n#for i in range(7):\n #pred = dosomething(['headache','muscle_weakness','puffy_face_and_eyes','mild_fever','skin_rash']) \n #prediction.append(pred) \n#print(prediction)headache,muscle_weakness,puffy_face_and_eyes,mild_fever,skin_rash", "sub_path": "Mobile_app/medicalchatbot/checkdisease.py", "file_name": "checkdisease.py", "file_ext": "py", "file_size_in_byte": 1920, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_csv", "line_number": 7, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 17, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "193811136", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2019. Mike Herbert\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\nimport logging\nimport re\nfrom difflib import SequenceMatcher\n\nfrom geofinder import GeoKeys, Geodata, Loc\n\n\nclass MatchScore:\n \"\"\"\n Calculate how close the database result place name is to the users input place name.\n 1) Recursively remove largest text sequence in both to end up with just mismatched text\n 2) Calculate the percent that didnt match in each comma separated term of user input\n 3) Score is based on percent mismatch weighted for each term (City is higher, county is lower)\n\n A standard text difference, such as Levenstein, was not used because those treat both strings as equal, whereas this\n treats the User text as more important than DB result and also weights each token\n\n \"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(__name__)\n self.weight = [0.7, 1.0, 0.2, 0.6, 0.9]\n\n # Out weight + Feature weight must be less than 1.0.\n self.out_weight = 0.17\n self.feature_weight = 0.06\n if self.out_weight + self.feature_weight > 1.0:\n self.logger.error('Out weight + Feature weight must be less than 1.0')\n\n self.wildcard_penalty = 20.0\n self.first_token_match_bonus = 27.0\n self.wrong_order_penalty = 2.0\n\n def match_score(self, inp_place: Loc.Loc, res_place: Loc.Loc) -> int:\n \"\"\"\n :param inp_place: Input place structure with users text\n :param res_place: Result place structure with DB result\n :return: score 0-100 reflecting the difference between the user input and the result. 0 is perfect match, 100 is no match\n Score is also adjusted based on Feature type. More important features (large city) get lower result\n \"\"\"\n inp_len = [0] * 5\n num_inp_tokens = 0.0\n in_score = 0\n\n # Create full place title (prefix,city,county,state,country) from input place.\n inp_title = inp_place.get_five_part_title()\n inp_title = GeoKeys.normalize_match_title(inp_title, inp_place.country_iso)\n inp_tokens = inp_title.split(',')\n\n # Create full place title (prefix,city,county,state,country) from result place\n res_place.prefix = ' '\n res_title = res_place.get_five_part_title()\n res_title = GeoKeys.normalize_match_title(res_title, res_place.country_iso)\n res_tokens = res_title.split(',')\n\n # Store length of original input tokens. This is used for percent unmatched calculation\n for it, tk in enumerate(inp_tokens):\n inp_tokens[it] = inp_tokens[it].strip(' ')\n inp_len[it] = len(inp_tokens[it])\n\n # Create a list of all the words in result and save result len for percent calc\n res_word_list = ', '.join(map(str, res_tokens))\n orig_res_len = len(res_word_list)\n\n # Create a list of all the words in input\n input_words = ', '.join(map(str, inp_tokens))\n\n # Remove any matching sequences in input list and result\n res_word_list, input_words = self.remove_matching_sequences(res_word_list, input_words)\n\n # For each input token calculate percent of new (unmatched) size vs original size\n unmatched_input_tokens = input_words.split(',')\n\n # Each token in place hierarchy gets a different weighting\n # Prefix, city,county, state, country\n score_diags = ''\n\n # Calculate percent of USER INPUT text that was unmatched, then apply weighting\n for idx, tk in enumerate(inp_tokens):\n if inp_len[idx] > 0:\n unmatched_percent = int(100.0 * len(unmatched_input_tokens[idx].strip(' ')) / inp_len[idx])\n in_score += unmatched_percent * self.weight[idx]\n score_diags += f' {idx}) [{tk}]{inp_len[idx]} {unmatched_percent}% * {self.weight[idx]} '\n # self.logger.debug(f'{idx}) Rem=[{unmatched_input_tokens[idx].strip(\" \" )}] wgtd={unmatched_percent * self.weight[idx]}')\n num_inp_tokens += 1.0 * self.weight[idx]\n # self.logger.debug(f'{idx} [{inp_tokens2[idx]}:{inp_tokens[idx]}] rawscr={sc}% orig_len={inp_len[idx]} wgt={self.weight[idx]}')\n if idx < 2:\n # If the full first or second token of the result is in input then improve score\n # Bonus for a full match as against above partial matches\n if res_tokens[idx] in inp_tokens[idx]:\n in_score -= self.first_token_match_bonus\n\n # Average over number of tokens (with fractional weight). Gives 0-100% regardless of weighting and number of tokens\n in_score = in_score / num_inp_tokens\n # self.logger.debug(f'raw in={in_score} numtkn={num_inp_tokens}')\n\n # Calculate percent of DB RESULT text that was unmatched\n if orig_res_len > 0:\n out_score = int(100.0 * len(res_word_list.strip(' ')) / orig_res_len)\n # self.logger.debug(f\"Out=[{res_word_list.strip(' ')}] orig_len={orig_res_len}\")\n else:\n out_score = 0\n\n if not inp_place.standard_parse:\n # If Tokens were not in hierarchical order, give penalty\n parse_penalty = self.wrong_order_penalty\n else:\n parse_penalty = 0.0\n\n if '*' in inp_place.original_entry:\n # if it was a wildcard search it's hard to rank - add a penalty\n wildcard_penalty = self.wildcard_penalty\n else:\n wildcard_penalty = 0.0\n\n # Feature score is to ensure \"important\" places get higher rank (large city, etc)\n feature_score = Geodata.Geodata.get_priority(res_place.feature)\n\n # Add up scores - Each item is 0-100 and weighed as below\n in_weight = 1.0 - self.out_weight - self.feature_weight\n\n score = in_score * in_weight + out_score * self.out_weight + feature_score * self.feature_weight + parse_penalty + wildcard_penalty\n\n # self.logger.debug(f'SCORE {score:.1f} [{res_title}] out={out_score * out_weight:.1f} '\n # f'in={in_score:.1f} feat={feature_score * feature_weight:.1f} parse={parse_penalty}\\n {score_diags}')\n\n return score\n\n def _remove_matching_seq(self, text1: str, text2: str, attempts: int) -> (str, str):\n \"\"\"\n Find largest matching sequence. Remove it in text1 and text2.\n Private - called by remove_matching_sequences which provides a wrapper\n Call recursively until attempts hits zero or there are no matches longer than 1 char\n :param text1:\n :param text2:\n :param attempts: Number of times to remove largest text sequence\n :return:\n \"\"\"\n s = SequenceMatcher(None, text1, text2)\n match = s.find_longest_match(0, len(text1), 0, len(text2))\n if match.size > 1:\n # Remove matched sequence from inp and out\n item = text1[match.a:match.a + match.size]\n text2 = re.sub(item, '', text2)\n text1 = re.sub(item, '', text1)\n if attempts > 0:\n # Call recursively to get next largest match and remove it\n text1, text2 = self._remove_matching_seq(text1, text2, attempts - 1)\n return text1, text2\n\n def remove_matching_sequences(self, text1: str, text2: str) -> (str, str):\n \"\"\"\n Find largest sequences that match between text1 and 2. Remove them from text1 and text2.\n :param text1:\n :param text2:\n :return:\n \"\"\"\n # Prepare strings for input to remove_matching_seq\n # Swap all commas in text1 string to '@'. This way they will never match comma in text2 string\n # Ensures we don;t remove commas and don't match across tokens\n text2 = re.sub(',', '@', text2)\n text1, text2 = self._remove_matching_seq(text1=text1, text2=text2, attempts=15)\n # Restore commas in inp\n text2 = re.sub('@', ',', text2)\n return text1.strip(' '), text2.strip(' ')\n", "sub_path": "geofinder/MatchScore.py", "file_name": "MatchScore.py", "file_ext": "py", "file_size_in_byte": 8745, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 39, "usage_type": "call"}, {"api_name": "geofinder.Loc.Loc", "line_number": 52, "usage_type": "attribute"}, {"api_name": "geofinder.Loc", "line_number": 52, "usage_type": "name"}, {"api_name": "geofinder.GeoKeys.normalize_match_title", "line_number": 65, "usage_type": "call"}, {"api_name": "geofinder.GeoKeys", "line_number": 65, "usage_type": "name"}, {"api_name": "geofinder.GeoKeys.normalize_match_title", "line_number": 71, "usage_type": "call"}, {"api_name": "geofinder.GeoKeys", "line_number": 71, "usage_type": "name"}, {"api_name": "geofinder.Geodata.Geodata.get_priority", "line_number": 135, "usage_type": "call"}, {"api_name": "geofinder.Geodata.Geodata", "line_number": 135, "usage_type": "attribute"}, {"api_name": "geofinder.Geodata", "line_number": 135, "usage_type": "name"}, {"api_name": "difflib.SequenceMatcher", "line_number": 157, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 162, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 163, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 179, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 182, "usage_type": "call"}]} +{"seq_id": "234265558", "text": "import h2o\nimport tempfile\nimport os\nfrom h2o.estimators import H2OIsolationForestEstimator, H2OGenericEstimator\nfrom tests import pyunit_utils\n\n\ndef mojo_model_irf_test():\n\n # GLM\n airlines = h2o.import_file(path=pyunit_utils.locate(\"smalldata/testng/airlines_train.csv\"))\n irf = H2OIsolationForestEstimator(ntrees=1)\n irf.train(x = [\"Origin\", \"Dest\"], y = \"Distance\", training_frame=airlines)\n\n original_model_filename = tempfile.mkdtemp()\n original_model_filename = irf.download_mojo(original_model_filename)\n \n model = H2OGenericEstimator.from_file(original_model_filename)\n assert model is not None\n predictions = model.predict(airlines)\n assert predictions is not None\n assert predictions.nrows == 24421\n assert model._model_json[\"output\"][\"variable_importances\"] is None\n assert model._model_json[\"output\"][\"model_summary\"] is not None\n assert len(model._model_json[\"output\"][\"model_summary\"]._cell_values) > 0\n\n generic_mojo_filename = tempfile.mkdtemp(\"zip\", \"genericMojo\");\n generic_mojo_filename = model.download_mojo(path=generic_mojo_filename)\n assert os.path.getsize(generic_mojo_filename) == os.path.getsize(original_model_filename)\n \nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(mojo_model_irf_test)\nelse:\n mojo_model_irf_test()\n", "sub_path": "h2o-py/tests/testdir_generic_model/pyunit_generic_model_mojo_irf.py", "file_name": "pyunit_generic_model_mojo_irf.py", "file_ext": "py", "file_size_in_byte": 1322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "h2o.import_file", "line_number": 11, "usage_type": "call"}, {"api_name": "tests.pyunit_utils.locate", "line_number": 11, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 11, "usage_type": "name"}, {"api_name": "h2o.estimators.H2OIsolationForestEstimator", "line_number": 12, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 15, "usage_type": "call"}, {"api_name": "h2o.estimators.H2OGenericEstimator.from_file", "line_number": 18, "usage_type": "call"}, {"api_name": "h2o.estimators.H2OGenericEstimator", "line_number": 18, "usage_type": "name"}, {"api_name": "tempfile.mkdtemp", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.getsize", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tests.pyunit_utils.standalone_test", "line_number": 32, "usage_type": "call"}, {"api_name": "tests.pyunit_utils", "line_number": 32, "usage_type": "name"}]} +{"seq_id": "314108351", "text": "\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\n\nimport base64\nimport datetime\nimport io\nfrom os import path\n\n# -------------------------------------------------------------------------------\n# Function to convert a plot to an image that can be integrated into an HTML page\n# -------------------------------------------------------------------------------\ndef plot_to_img(fig):\n pngImage = io.BytesIO()\n FigureCanvas(fig).print_png(pngImage)\n pngImageB64String = \"data:image/png;base64,\"\n pngImageB64String += base64.b64encode(pngImage.getvalue()).decode('utf8')\n return pngImageB64String\n\n# -------------------------------------------------------\n# Function that get a dataset that include in the columns \n# -------------------------------------------------------\ndef Get_NormelizedWeatherDataset():\n dfw = pd.read_csv(path.join(path.dirname(__file__), \"..\\\\static\\\\data\\\\weather_description.csv\"))\n # Keep only the columns I will need\n dff = pd.DataFrame(columns=list(['datetime', 'Weather', 'State']))\n # Re-arrange the dataset in a way that I will have a olumn with the state name, and for each day, the weather description\n for col in dfw.columns: \n if (col != 'datetime'):\n dft = dfw[['datetime', col]].copy()\n dft['State'] = col\n dft = dft.rename(columns={col: 'Weather'})\n dff = dff.append(dft)\n # Change string type to date type\n dff['datetime'] = pd.to_datetime(pd.Series(dff['datetime']))\n # remove minutes and second part\n dff['datetime'] = dff['datetime'].dt.date\n # remove rows with Non fields\n dff = dff.dropna()\n # remove duplicate rows\n dff.drop_duplicates(inplace=True)\n return (dff)\n\n# This Function set three new columns tha indicate if the weather description was Cloudy, Misty or Clear\ndef MakeDF_ReadyFor_Analysis(dfm):\n dfm['Weather'] = dfm['Weather'].str.upper()\n dfm['cloud'] = ((dfm['Weather'].str.find('CLOUD')>=0) | (dfm['Weather'].str.find('DRIZZLE')>=0) | (dfm['Weather'].str.find('RAIN')>=0)| (dfm['Weather'].str.find('THUNDERSTORM')>=0) | (dfm['Weather'].str.find('SNOW')>=0))\n dfm['mist'] = ((dfm['Weather'].str.find('MIST')>=0) | (dfm['Weather'].str.find('FOG')>=0) | (dfm['Weather'].str.find('HAZE')>=0))\n dfm['clear'] = ((dfm['Weather'].str.find('CLEAR')>=0) | (dfm['Weather'].str.find('FEW CLOUDS')>=0) | (dfm['Weather'].str.find('SCATTERED CLOUDS')>=0))\n return dfm\n\ndef MergeUFO_and_Weather_datasets(dff, df3):\n return (pd.merge(dff, df3, how='outer', on=['datetime', 'State']))\n\ndef Get_NormelizedUFOTestmonials():\n df = pd.read_csv(path.join(path.dirname(__file__), \"..\\\\static\\\\data\\\\UFOTestemonials.csv\"))\n df1 = df.drop(['Event_URL' , 'Event_Date', 'Day' , 'Month', 'Year', 'Hour', 'Minute', 'Summary', 'Event_URL'], 1)\n df2 = Convert_StateCode_ToFullName(df1)\n df3 = df2.dropna()\n df3['Event_Time'] = pd.to_datetime(pd.Series(df3['Event_Time']), format='%Y-%m-%dT%H:%M:%SZ', errors = 'coerce')\n df3['datetime'] = df3['Event_Time'].dt.date\n #df3 = df3.drop(['Event_Time'], 1)\n return df3\n\n\n\ndef Convert_StateCode_ToFullName(df):\n df_short_state = pd.read_csv(path.join(path.dirname(__file__), \"..\\\\static\\\\data\\\\USStatesCodes.csv\"))\n s = df_short_state.set_index('Code')['State']\n return (df.replace(s))\n\n\ndef get_states_choices():\n df_short_state = pd.read_csv(path.join(path.dirname(__file__), \"..\\\\static\\\\data\\\\USStatesCodes.csv\"))\n df1 = df_short_state.groupby('State').sum()\n l = df1.index\n m = list(zip(l , l))\n return m\n\n", "sub_path": "GadFinalProjectDemo/Models/DataQuery.py", "file_name": "DataQuery.py", "file_ext": "py", "file_size_in_byte": 3701, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "io.BytesIO", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_agg.FigureCanvasAgg", "line_number": 19, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 30, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 39, "usage_type": "call"}, {"api_name": "pandas.merge", "line_number": 57, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 60, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 64, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "153808731", "text": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom poetry.core.packages.package import Package\n\nfrom poetry.factory import Factory\nfrom poetry.mixology.failure import SolveFailure\nfrom poetry.mixology.version_solver import VersionSolver\nfrom poetry.packages import DependencyPackage\n\n\nif TYPE_CHECKING:\n from poetry.packages.project_package import ProjectPackage\n from poetry.repositories import Repository\n from tests.mixology.version_solver.conftest import Provider\n\n\ndef add_to_repo(\n repository: Repository,\n name: str,\n version: str,\n deps: dict[str, str] | None = None,\n python: str | None = None,\n) -> None:\n package = Package(name, version)\n if python:\n package.python_versions = python\n\n if deps:\n for dep_name, dep_constraint in deps.items():\n package.add_dependency(Factory.create_dependency(dep_name, dep_constraint))\n\n repository.add_package(package)\n\n\ndef check_solver_result(\n root: ProjectPackage,\n provider: Provider,\n result: dict[str, str] | None = None,\n error: str | None = None,\n tries: int | None = None,\n locked: dict[str, Package] | None = None,\n use_latest: list[str] | None = None,\n) -> None:\n if locked is not None:\n locked = {k: DependencyPackage(l.to_dependency(), l) for k, l in locked.items()}\n\n solver = VersionSolver(root, provider, locked=locked, use_latest=use_latest)\n try:\n solution = solver.solve()\n except SolveFailure as e:\n if error:\n assert str(e) == error\n\n if tries is not None:\n assert solver.solution.attempted_solutions == tries\n\n return\n\n raise\n except AssertionError as e:\n if error:\n assert str(e) == error\n return\n raise\n\n packages = {}\n for package in solution.packages:\n packages[package.name] = str(package.version)\n\n assert result == packages\n\n if tries is not None:\n assert solution.attempted_solutions == tries\n", "sub_path": "tests/mixology/helpers.py", "file_name": "helpers.py", "file_ext": "py", "file_size_in_byte": 2028, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "typing.TYPE_CHECKING", "line_number": 13, "usage_type": "name"}, {"api_name": "poetry.repositories.Repository", "line_number": 20, "usage_type": "name"}, {"api_name": "poetry.core.packages.package.Package", "line_number": 26, "usage_type": "call"}, {"api_name": "poetry.factory.Factory.create_dependency", "line_number": 32, "usage_type": "call"}, {"api_name": "poetry.factory.Factory", "line_number": 32, "usage_type": "name"}, {"api_name": "poetry.packages.project_package.ProjectPackage", "line_number": 38, "usage_type": "name"}, {"api_name": "tests.mixology.version_solver.conftest.Provider", "line_number": 39, "usage_type": "name"}, {"api_name": "poetry.core.packages.package.Package", "line_number": 43, "usage_type": "name"}, {"api_name": "poetry.packages.DependencyPackage", "line_number": 47, "usage_type": "call"}, {"api_name": "poetry.mixology.version_solver.VersionSolver", "line_number": 49, "usage_type": "call"}, {"api_name": "poetry.mixology.failure.SolveFailure", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "527059825", "text": "from __future__ import annotations\nfrom typing import List\nfrom random import randint, random\nfrom nesim.ip import IP, IPPacket\nfrom nesim import utils\nfrom nesim.devices.error_detection import get_error_detection_data\nfrom nesim.devices.utils import data_size, extend_to_byte_divisor, from_bit_data_to_hex, from_bit_data_to_number, from_number_to_bit_data, from_str_to_bin\n\n\nclass Frame():\n\n def __init__(self, bit_data: List[int]) -> None:\n self.is_valid = False\n\n if len(bit_data) < 48:\n return\n\n self.to_mac = from_bit_data_to_number(bit_data[:16])\n self.from_mac = from_bit_data_to_number(bit_data[16:32])\n self.frame_data_size = from_bit_data_to_number(bit_data[32:40]) * 8\n self.error_size = from_bit_data_to_number(bit_data[40:48]) * 8\n total_size = self.frame_data_size + self.error_size\n\n if len(bit_data) - 48 < total_size:\n return\n\n top_data_pos = 48 + 8*self.frame_data_size\n self.data = bit_data[48: top_data_pos]\n self.error_data = bit_data[top_data_pos: top_data_pos + 8 * self.error_size]\n self.bit_data = bit_data\n self.is_valid = True\n self.additional_info = ''\n\n if self.frame_data_size / 8 == 8:\n arpq = from_str_to_bin('ARPQ')\n ip = ''.join(map(str, self.data[32:64]))\n mac_dest_str = ''.join(map(str, bit_data[:16]))\n arpq_data = ''.join(map(str, self.data[:32]))\n if arpq_data.endswith(arpq):\n if mac_dest_str == '1'*16:\n self.additional_info = f'(ARPQ) Who is {IP.from_bin(ip)} ?'\n else:\n self.additional_info = '(ARPQ) response'\n\n def __str__(self) -> str:\n from_mac = from_bit_data_to_hex(from_number_to_bit_data(self.from_mac, 16))\n to_mac = from_bit_data_to_hex(from_number_to_bit_data(self.to_mac, 16))\n\n data = from_bit_data_to_hex(self.data)\n valid, packet = IPPacket.parse(self.data)\n if valid:\n data = str(packet)\n\n return f'{from_mac} -> {to_mac} | {data} | {self.additional_info}'\n\n def __repr__(self) -> str:\n return str(self)\n\n @staticmethod\n def build(dest_mac: List[int], orig_mac: List[int], data: List[int]) -> Frame:\n data = extend_to_byte_divisor(data)\n\n e_size, e_data = get_error_detection_data(\n data, utils.CONFIG['error_detection']\n )\n\n rand = random()\n if rand < utils.CONFIG['error_prob']:\n ind = randint(0, len(data) - 1)\n data[ind] = (data[ind] + 1) % 2\n\n size = data_size(data)\n final_data = dest_mac + \\\n orig_mac + \\\n size + \\\n e_size + \\\n data + \\\n e_data\n\n frame = Frame(final_data)\n return frame\n", "sub_path": "nesim/frame.py", "file_name": "frame.py", "file_ext": "py", "file_size_in_byte": 2876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "nesim.devices.utils.from_bit_data_to_number", "line_number": 18, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_bit_data_to_number", "line_number": 19, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_bit_data_to_number", "line_number": 20, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_bit_data_to_number", "line_number": 21, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_str_to_bin", "line_number": 35, "usage_type": "call"}, {"api_name": "nesim.ip.IP.from_bin", "line_number": 41, "usage_type": "call"}, {"api_name": "nesim.ip.IP", "line_number": 41, "usage_type": "name"}, {"api_name": "nesim.devices.utils.from_bit_data_to_hex", "line_number": 46, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_number_to_bit_data", "line_number": 46, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_bit_data_to_hex", "line_number": 47, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_number_to_bit_data", "line_number": 47, "usage_type": "call"}, {"api_name": "nesim.devices.utils.from_bit_data_to_hex", "line_number": 49, "usage_type": "call"}, {"api_name": "nesim.ip.IPPacket.parse", "line_number": 50, "usage_type": "call"}, {"api_name": "nesim.ip.IPPacket", "line_number": 50, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 60, "usage_type": "name"}, {"api_name": "nesim.devices.utils.extend_to_byte_divisor", "line_number": 61, "usage_type": "call"}, {"api_name": "nesim.devices.error_detection.get_error_detection_data", "line_number": 63, "usage_type": "call"}, {"api_name": "nesim.utils.CONFIG", "line_number": 64, "usage_type": "attribute"}, {"api_name": "nesim.utils", "line_number": 64, "usage_type": "name"}, {"api_name": "random.random", "line_number": 67, "usage_type": "call"}, {"api_name": "nesim.utils.CONFIG", "line_number": 68, "usage_type": "attribute"}, {"api_name": "nesim.utils", "line_number": 68, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 69, "usage_type": "call"}, {"api_name": "nesim.devices.utils.data_size", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "581905060", "text": "import threading\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium import common\nfrom req import Req\nfrom datetime import datetime\nfrom browser import Browser\n\n\ndef get_ports(name_file='ports'): # получает список поротов для proxy\n with open(name_file, 'r', encoding='utf-8') as file:\n ports = file.readlines()\n return list(map(lambda p: tuple(map(lambda i: int(i), (p.split()))), ports))\n\n\ndef check_captcha_google(driver): # Проверяет не подсовывает ли google капчу\n try:\n driver.find_element_by_id(\"captcha-form\")\n except common.exceptions.NoSuchElementException:\n return False\n else:\n return True\n\n\ndef choose_by(driver): # выбирает by в настройках гугла\n driver.find_element_by_xpath('.//a[text()=\"Настройки\"]').click()\n driver.find_element_by_xpath('.//a[text()=\"Настройки поиска\"]').click()\n driver.find_element_by_xpath('.//div[@id=\"regiontBY\"]/div/span').click()\n driver.find_element_by_xpath('.//div[text()=\"Сохранить\"]').click() # .send_keys (u '\\ ue007')\n time.sleep(1)\n driver.switch_to.alert.accept()\n\n\ndef check_captcha_yandex(driver): # Проверяет не подсовывает ли yandex капчу\n if 'Ой!' in driver.title:\n return True\n else:\n return False\n\n\ndef ran_pages_google(req_i, driver, namber = 0, namber_page = 0): # Проверка сраницы с ответами гугл\n if check_captcha_google(driver): # Проверяем не подсовывает ли google капчу\n return None, None\n page = driver.find_element(By.XPATH, \"//*[@id='search']\") # page = driver.find_element_by_id(\"search\")\n time.sleep(1)\n results = page.find_elements(By.XPATH, \".//div[@class='g']\")\n if len(results) < 7:\n time.sleep(8)\n results = page.find_elements(By.XPATH, \".//div[@class='g']\")\n for i, result in enumerate(results):\n links = result.find_elements(By.XPATH, \".//a\")\n for link in links:\n if req_i.site_promoted in link.get_attribute('href'):\n return namber + 1, link.get_attribute('href')\n else:\n namber += 1\n namber_page += 1\n if namber_page == 10: # листает 10 страниц, если не находит, возврщает 101\n return 101, None\n driver.find_element_by_xpath(\".//a[@aria-label='Page {0}'][text()='{0}']\".format(namber_page + 1)).click()\n return ran_pages_google(req_i, driver, namber, namber_page)\n\n\ndef ran_pages_yandex(req_i, driver, namber = 0, namber_page = 0): # Проверка сраницы с ответами яндекс\n time.sleep(5) # необходимо для полной прогрузки страницы, может найти не весь список результатов или капчу\n results = driver.find_elements(By.XPATH, \".//li[@class='serp-item' and @data-cid]\") # получаем список результатов\n if len(results) == 0 and check_captcha_yandex(driver): # проверка на капчу\n return None, None\n elif len(results) < 7: # страница могла все же не загрузиться полностью\n time.sleep(10) # еще раз ищем список результатов\n results = driver.find_elements(By.XPATH, \".//li[@class='serp-item' and @data-cid]\")\n for i, r in enumerate(results):\n try:\n r.find_element(By.XPATH, \".//div[contains(@class, 'label') and text()='реклама']\") # проверка рекламы\n except common.exceptions.NoSuchElementException: # Значит не реклама, продолжаем\n find_cite = r.find_element(By.XPATH, \".//a\").get_attribute('href')\n if req_i.site_promoted in find_cite:\n return namber + 1, find_cite # возвращаем номер и найденую ссылку\n elif 'yandex' not in find_cite:\n namber += 1\n else:\n continue\n namber_page += 1\n if namber_page == 10: # листает 10 страниц, если не находит, возврщает 101\n return 101, None\n try:\n aria_label = driver.find_element_by_xpath(\".//div[@aria-label='Страницы']\") # aria-label=\"Страницы\"\n aria_label.find_element_by_xpath(\".//a[text()='{0}']\".format(namber_page + 1)).click()\n except common.exceptions.NoSuchElementException:\n return None, None # возвращаем None, пробуем с другого ip\n return ran_pages_yandex(req_i, driver, namber, namber_page)\n\n\ndef run_scraper(ports, reqs, requests_google, requests_yandex):\n\n def search_google(driver, use_req): # Поиск в google\n try:\n with lock_g:\n driver.get('https://www.google.by')\n choose_by(driver) # выбор в настройках гугл региона поиска\n if check_captcha_google(driver): # проверка на капчу\n raise\n page = driver.find_element(By.XPATH, \".//input[@title='Search' or @title='Поиск' or @title='Шукаць']\")\n page.send_keys(use_req.value_req)\n page.send_keys(Keys.RETURN)\n use_req.position_google, use_req.url_result_google = ran_pages_google(use_req, driver)\n except: # common.exceptions.NoSuchElementException\n use_req.position_google, use_req.url_result_google = None, None\n\n def search_yandex(driver, use_req): # Поиск в яндексе\n try:\n driver.get('https://yandex.by')\n page = driver.find_element(By.XPATH, \".//*[@id='text']\") # Поиск\n page.send_keys(use_req.value_req)\n page.send_keys(Keys.RETURN)\n use_req.position_yandex, use_req.url_result_yandex = ran_pages_yandex(use_req, driver)\n except: # common.exceptions.NoSuchElementException\n use_req.position_yandex, use_req.url_result_yandex = None, None\n\n while any((requests_google, requests_yandex)):\n with lock_rest:\n browser = Browser(ports=ports) # headless=False -- если необходим графический интерфейс браузера\n browser.implicitly_wait(8)\n string = \"\\r Необработаных за��росов yandex: {} google {} port {}\" \\\n \"\".format(len(requests_yandex), len(requests_google), browser.use_proxy_port)\n print(string, end=\"\")\n while requests_google: # цикл работает если не все запросы в гугл выполнены\n with lock_g:\n use_req_for_google = reqs[requests_google.pop()] # берет последний id запроса в списке requests_google\n search_google(browser, use_req_for_google)\n flag_bad_proxy = True if use_req_for_google.position_google is None else False\n if flag_bad_proxy: # если попалась капча поднимется флаг, переходим к яндексу\n requests_google.append(use_req_for_google.id) # возвращаем не обработаный запрос\n break\n else: # если ответ получен, запишим результат и попробуем следующий запрос\n with lock_w:\n req = reqs[use_req_for_google.id]\n try:\n req.combine(use_req_for_google) # обьеденияем экзмляр Req со своим клоном\n except KeyError as err:\n print(err)\n while requests_yandex:\n with lock_y:\n use_req_for_yandex = reqs[requests_yandex.pop()]\n search_yandex(browser, use_req_for_yandex)\n flag_bad_proxy = True if use_req_for_yandex.position_yandex is None else False\n if flag_bad_proxy: # если попалась капча поднимется флаг, меняем ip и чистим куки\n requests_yandex.append(use_req_for_yandex.id) # возвращаем не обработаный запрос\n break\n else: # если ответ получен, запишим результат и попробуем следующий запрос\n with lock_w:\n req = reqs[use_req_for_yandex.id]\n try:\n req.combine(use_req_for_yandex) # обьеденияем экзмляр Req со своим клоном\n except KeyError as err:\n print(err)\n with lock_rest:\n browser.delete_all_cookies() # чистим куки\n browser.quit()\n Browser.restart_proxy(ports[1]) # меняем ip\n\n return 'поток с поротоm {} закончил работу'.format(ports[0])\n\n\ndef pool_thread(ports, reqs, requests_google, requests_yandex): # запускает парсинг в несколько потоков\n pool = []\n for port in ports:\n stream = threading.Thread(target=run_scraper, args=(port, reqs, requests_google, requests_yandex))\n pool.append(stream)\n for stream in pool:\n stream.start()\n for stream in pool:\n stream.join()\n\n\nif __name__ == '__main__':\n read_file_name = 'list_requests' # read_file_name = input('filename: ')\n if 'json' in (read_file_name):\n reqs = Req.read_json(read_file_name)\n else:\n reqs = Req.read_txt(read_file_name)\n requests_google = [req.id for req in reqs] # список id не сделаных запросов гугл\n requests_google.reverse() # переверням. теперь можно брать первые id с конца\n requests_yandex = requests_google.copy() # список id не сделаный запросов в яндекс\n time_now = datetime.now(tz=None)\n print(\"time start {}:{}:{}\".format(time_now.hour, time_now.minute, time_now.second))\n ports = get_ports() # получаем список портов\n print(ports)\n lock_w, lock_y, lock_g, lock_rest = threading.RLock(), threading.RLock(), threading.RLock(), threading.RLock()\n pool_thread(ports, reqs, requests_google, requests_yandex)\n Req.create_json(reqs)\n time_now = datetime.now(tz=None)\n print(\"time finish {}:{}:{}\".format(time_now.hour, time_now.minute, time_now.second))\n\n", "sub_path": "parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 10730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "selenium.common.exceptions", "line_number": 20, "usage_type": "attribute"}, {"api_name": "selenium.common", "line_number": 20, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 45, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 45, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 47, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 47, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 50, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 50, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 52, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 52, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 66, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 67, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 67, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 71, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 72, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 72, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 75, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 75, "usage_type": "name"}, {"api_name": "selenium.common.exceptions", "line_number": 76, "usage_type": "attribute"}, {"api_name": "selenium.common", "line_number": 76, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 77, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 77, "usage_type": "name"}, {"api_name": "selenium.common.exceptions", "line_number": 90, "usage_type": "attribute"}, {"api_name": "selenium.common", "line_number": 90, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 104, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 104, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 106, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 106, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 114, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 114, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 116, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 116, "usage_type": "name"}, {"api_name": "browser.Browser", "line_number": 123, "usage_type": "call"}, {"api_name": "browser.implicitly_wait", "line_number": 124, "usage_type": "call"}, {"api_name": "browser.use_proxy_port", "line_number": 126, "usage_type": "attribute"}, {"api_name": "req.combine", "line_number": 140, "usage_type": "call"}, {"api_name": "req.combine", "line_number": 155, "usage_type": "call"}, {"api_name": "browser.delete_all_cookies", "line_number": 159, "usage_type": "call"}, {"api_name": "browser.quit", "line_number": 160, "usage_type": "call"}, {"api_name": "browser.Browser.restart_proxy", "line_number": 161, "usage_type": "call"}, {"api_name": "browser.Browser", "line_number": 161, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 169, "usage_type": "call"}, {"api_name": "req.Req.read_json", "line_number": 180, "usage_type": "call"}, {"api_name": "req.Req", "line_number": 180, "usage_type": "name"}, {"api_name": "req.Req.read_txt", "line_number": 182, "usage_type": "call"}, {"api_name": "req.Req", "line_number": 182, "usage_type": "name"}, {"api_name": "req.id", "line_number": 183, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 186, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 186, "usage_type": "name"}, {"api_name": "threading.RLock", "line_number": 190, "usage_type": "call"}, {"api_name": "req.Req.create_json", "line_number": 192, "usage_type": "call"}, {"api_name": "req.Req", "line_number": 192, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 193, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 193, "usage_type": "name"}]} +{"seq_id": "144481137", "text": "import os\nimport numpy as np\nfrom keras.preprocessing import image\nimport pickle\n\nX_train = []\ny_train = []\n\nfor classes in os.listdir('train'):\n for images in os.listdir(os.path.join('train', classes)):\n tmp = image.load_img(os.path.join('train', classes, images), target_size = (100, 176))\n tmp = image.img_to_array(tmp)\n X_train.append(tmp)\n y_train.append(classes)\n\nX_test = []\ny_test = []\n\nfor classes in os.listdir('validation'):\n for images in os.listdir(os.path.join('validation', classes)):\n tmp = image.load_img(os.path.join('validation', classes, images), target_size = (100, 176))\n tmp = image.img_to_array(tmp)\n X_test.append(tmp)\n y_test.append(classes)\n\nprint('X_train: ', X_train[:5], '\\t', X_train[3000:3005], '\\t',X_train[-5:])\nprint('y_train: ', y_train[:5], '\\t', y_train[3000:3005], '\\t',y_train[-5:])\nprint('X_test: ', X_test[:5], '\\t', X_test[3000:3005], '\\t',X_test[-5:])\nprint('y_test: ', y_test[:5], '\\t', y_test[3000:3005], '\\t',y_test[-5:])\n\nprint(len(X_train), '\\t', len(X_test))\n\nX_train = np.array(X_train)\ny_train = np.array(y_train)\nX_test = np.array(X_test)\ny_test = np.array(y_test)\n\nwith open('X_train.pkl', 'wb') as file:\n pickle.dump(X_train, file)\n\nwith open('y_train.pkl', 'wb') as file:\n pickle.dump(y_train, file)\n\nwith open('X_test.pkl', 'wb') as file:\n pickle.dump(X_test, file)\n\nwith open('y_test.pkl', 'wb') as file:\n pickle.dump(y_test, file)", "sub_path": "data_create.py", "file_name": "data_create.py", "file_ext": "py", "file_size_in_byte": 1463, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.listdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 11, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 12, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 19, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 21, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 22, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 22, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 39, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 42, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 45, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "598105733", "text": "'''\nCreated on Mar 10, 2015\n\n@author: puneeth\n'''\n\nimport pandas, csv, time, math\nfrom collections import Counter\n\nstart = time.time()\nprint(str(start))\n\nifname = 'train.csv'\nofname = 'train_data.csv'\n\nprint('Read train.csv')\ndf_train = pandas.read_csv(ifname)\n \nprint('Consolidate 40 Soil Types. Use only one Soil Type')\ndf_train['Soil'] = 0\nfor i in range(1, 41):\n df_train['Soil'] = df_train['Soil'] + i * df_train['Soil_Type' + str(i)]\n \nprint('Consolidate 4 Wilderness Areas. Use only one Wilderness Area')\ndf_train['Wilderness_Area'] = 0\nfor i in range(1, 5):\n df_train['Wilderness_Area'] = df_train['Wilderness_Area'] + i * df_train['Wilderness_Area' + str(i)]\n \nprint('Remove 40 Soil Types and 4 Wilderness Areas')\nfor i in range(1, 41):\n df_train.pop('Soil_Type' + str(i))\n if i < 5:\n df_train.pop('Wilderness_Area' + str(i))\n\ntrain_dict = df_train.to_dict()\n \n# print('Put above data into train_data.csv')\n# df_train.to_csv(ofname, index=False)\n \ntifname = 'test.csv'\ntofname = 'test_data.csv'\n\nprint('Read test.csv')\ndf_test = pandas.read_csv(tifname)\n \nprint('Consolidate 40 Soil Types. Use only one Soil Type')\ndf_test['Soil'] = 0\nfor i in range(1, 41):\n df_test['Soil'] = df_test['Soil'] + i * df_test['Soil_Type' + str(i)]\n \nprint('Consolidate 4 Wilderness Areas. Use only one Wilderness Area')\ndf_test['Wilderness_Area'] = 0\nfor i in range(1, 5):\n df_test['Wilderness_Area'] = df_test['Wilderness_Area'] + i * df_test['Wilderness_Area' + str(i)]\n \nprint('Remove 40 Soil Types and 4 Wilderness Areas')\nfor i in range(1, 41):\n df_test.pop('Soil_Type' + str(i))\n if i < 5:\n df_test.pop('Wilderness_Area' + str(i))\n \n# print('Put above data into test_data.csv')\n# df_test.to_csv(tofname, index=False)\n\ncover_grouped = Counter(df_train.Cover_Type)\nprint('Count of each cover type:', cover_grouped, len(cover_grouped))\n\nc = len(cover_grouped)\n\nprob_cover_grouped = Counter(df_train.Cover_Type)\nfor cover_type in prob_cover_grouped:\n prob_cover_grouped[cover_type] = prob_cover_grouped[cover_type] / len(df_train.index)\nprint('Probability of each cover type:', prob_cover_grouped)\n\n# exit()\n\nprint('Count each Feature')\nelevation_grouped = (df_train[['Elevation', 'Cover_Type']].groupby(['Elevation', 'Cover_Type'],\n as_index=False, sort=False)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \naspect_grouped = (df_train[['Aspect', 'Cover_Type']].groupby(['Aspect', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nslope_grouped = (df_train[['Slope', 'Cover_Type']].groupby(['Slope', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nh_hydro_grouped = (df_train[['Horizontal_Distance_To_Hydrology', 'Cover_Type']].groupby(['Horizontal_Distance_To_Hydrology', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nv_hydro_grouped = (df_train[['Vertical_Distance_To_Hydrology', 'Cover_Type']].groupby(['Vertical_Distance_To_Hydrology', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nh_roadways_grouped = (df_train[['Horizontal_Distance_To_Roadways', 'Cover_Type']].groupby(['Horizontal_Distance_To_Roadways', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nhillshade9am_grouped = (df_train[['Hillshade_9am', 'Cover_Type']].groupby(['Hillshade_9am', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nhillshadenoon_grouped = (df_train[['Hillshade_Noon', 'Cover_Type']].groupby(['Hillshade_Noon', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nhillshade3pm_grouped = (df_train[['Hillshade_3pm', 'Cover_Type']].groupby(['Hillshade_3pm', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nh_fire_grouped = (df_train[['Horizontal_Distance_To_Fire_Points', 'Cover_Type']].groupby(['Horizontal_Distance_To_Fire_Points', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n \nsoil_grouped = (df_train[['Soil', 'Cover_Type']].groupby(['Soil', 'Cover_Type'],\n as_index=False, sort=True)['Soil'].count() + 1) / (cover_grouped[1] + c)\n \nwilderness_grouped = (df_train[['Wilderness_Area', 'Cover_Type']].groupby(['Wilderness_Area', 'Cover_Type'],\n as_index=False, sort=True)['Cover_Type'].count() + 1) / (cover_grouped[1] + c)\n\nelevation_prob_dict = elevation_grouped.to_dict()\naspect_prob_dict = aspect_grouped.to_dict()\nslope_prob_dict = slope_grouped.to_dict()\nh_hydro_prob_dict = h_hydro_grouped.to_dict()\nv_hydro_prob_dict = v_hydro_grouped.to_dict()\nh_roadways_prob_dict = h_roadways_grouped.to_dict()\nhillshade9am_prob_dict = hillshade9am_grouped.to_dict()\nhillshadenoon_prob_dict = hillshadenoon_grouped.to_dict()\nhillshade3pm_prob_dict = hillshade3pm_grouped.to_dict()\nh_fire_prob_dict = h_fire_grouped.to_dict()\nsoil_prob_dict = soil_grouped.to_dict()\nwilderness_prob_dict = wilderness_grouped.to_dict()\n\nresult_dict = {}\n\ntest_dict = df_test.to_dict()\n\nprint('Length of test.csv', len(df_test.index))\n\nprint('Start Classifying Test')\nloopstart = time.time()\nfor count in range(0, len(df_test.index)):\n class_count = [[]]\n x = 0\n \n for cover_type in range(1, 8):\n try: \n try:\n elevation_prob = elevation_prob_dict[(train_dict['Elevation'][count], cover_type)]\n except KeyError:\n elevation_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n aspect_prob = aspect_prob_dict[(train_dict['Aspect'][count], cover_type)]\n except KeyError:\n aspect_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n slope_prob = slope_prob_dict[train_dict['Slope'][count], cover_type]\n except KeyError:\n slope_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n h_hydro_prob = h_hydro_prob_dict[(train_dict['Horizontal_Distance_To_Hydrology'][count], cover_type)]\n except KeyError:\n h_hydro_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n v_hydro_prob = v_hydro_prob_dict[(train_dict['Vertical_Distance_To_Hydrology'][count], cover_type)]\n except KeyError:\n v_hydro_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n h_roadways_prob = h_roadways_prob_dict[(train_dict['Horizontal_Distance_To_Roadways'][count], cover_type)]\n except KeyError:\n h_roadways_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n hillshade9am_prob = hillshade9am_prob_dict[(train_dict['Hillshade_9am'][count], cover_type)]\n except KeyError:\n hillshade9am_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n hillshadenoon_prob = hillshadenoon_prob_dict[(train_dict['Hillshade_Noon'][count], cover_type)]\n except KeyError:\n hillshadenoon_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n hillshade3pm_prob = hillshade3pm_prob_dict[(train_dict['Hillshade_3pm'][count], cover_type)]\n except KeyError:\n hillshade3pm_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n h_fire_prob = h_fire_prob_dict[(train_dict['Horizontal_Distance_To_Fire_Points'][count], cover_type)]\n except KeyError:\n h_fire_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n soil_prob = soil_prob_dict[(train_dict['Soil'][count], cover_type)]\n except KeyError:\n soil_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n try:\n wilderness_prob = wilderness_prob_dict[(train_dict['Wilderness_Area'][count], cover_type)]\n except KeyError:\n wilderness_prob = (0 + 1) / (cover_grouped[cover_type] + c)\n \n class_cover = float(elevation_prob) * float(aspect_prob) * float(slope_prob) * float(h_hydro_prob) * float(v_hydro_prob) * float(h_roadways_prob) * float(hillshade9am_prob) * float(hillshadenoon_prob) * float(hillshade3pm_prob) * float(h_fire_prob) * float(soil_prob) * float(wilderness_prob) * float(prob_cover_grouped[cover_type])\n# print(type(class_cover), class_cover)\n \n class_count.append([math.fabs(class_cover), cover_type])\n \n if class_cover == 7:\n class_count.sort(reverse=True)\n print(class_count)\n result_dict[df_test.Id[count]] = class_count[0][1]\n \n except KeyError:\n x = x + 0\n \n if count % 20000 == 0:\n loopend = time.time()\n print(count, str((loopend - loopstart) / 60))\n \n# class_count.sort(reverse=True)\n# result_dict[df_test.Id[count]] = class_count[0][1]\n \nf = open(\"pandababy5.csv\", \"w\")\nwriter = csv.writer(f)\nwriter.writerow(['Id', 'Cover_Type'])\nfor key, value in result_dict.items():\n writer.writerow([key, value])\n\nf.close()\n\nend = time.time()\nprint(str(end))\n\nruntime = float(end - start) / 60\n\nprint('Runtime:', str(runtime))", "sub_path": "Kaggle-Forest-Cover/src/tests/pandababy5.py", "file_name": "pandababy5.py", "file_ext": "py", "file_size_in_byte": 10190, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 44, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 65, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 70, "usage_type": "call"}, {"api_name": "time.time", "line_number": 134, "usage_type": "call"}, {"api_name": "math.fabs", "line_number": 204, "usage_type": "call"}, {"api_name": "time.time", "line_number": 215, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 222, "usage_type": "call"}, {"api_name": "time.time", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "394730911", "text": "from airflow import DAG\r\nfrom airflow.operators.bash_operator import BashOperator\r\nfrom datetime import datetime, timedelta\r\n\r\ndefault_args = {}\r\ndefault_args['owner'] = 'Jed'\r\ndefault_args['depends_on_past'] = False\r\ndefault_args['start_date'] = datetime(2019, 4, 15)\r\ndefault_args['email'] = '''jed@contentiq.com'''\r\ndefault_args['email_on_failure'] = True\r\ndefault_args['email_on_retry'] = False\r\ndefault_args['retries'] = 3\r\ndefault_args['retry_delay'] = timedelta(minutes=1)\r\n\r\nwith DAG(\"Post_Launch_Reporting\", default_args = default_args, schedule_interval=\"*/30 * * * *\", max_active_runs = 1, catchup = False) as DAG_PostLaunchReporting:\r\n Install_xlsxwriter = BashOperator(task_id=\"Install_xlsxwriter\", bash_command=\"pip install --user xlsxwriter\")\r\n PostLaunchReporting = BashOperator(task_id=\"PostLaunchReporting\", bash_command=\"python /PythonCode/PostLaunchReporting.py\", run_as_user='root')\r\n\r\nInstall_xlsxwriter >> PostLaunchReporting", "sub_path": "airflow/dags/DAG_PostLaunchReporting.py", "file_name": "DAG_PostLaunchReporting.py", "file_ext": "py", "file_size_in_byte": 954, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 13, "usage_type": "call"}, {"api_name": "airflow.DAG", "line_number": 15, "usage_type": "call"}, {"api_name": "airflow.operators.bash_operator.BashOperator", "line_number": 16, "usage_type": "call"}, {"api_name": "airflow.operators.bash_operator.BashOperator", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "249245658", "text": "import re\n\nimport tqdm\n\nimport utils\n\n\ndef compare_articles(articles):\n # print(articles)\n thematics = []\n articles = list(articles)\n w = 0\n t = tqdm.tqdm(\n total=len(articles),\n bar_format=\"Aggregating |{bar}|{n_fmt}/{total_fmt} {percentage:3.0f}% {rate_fmt}\")\n thematic_number = 0\n articles.sort(key=lambda x: len(x[\"keywords\"]), reverse=True)\n if len(articles) > 0:\n print(\"First article from sorting has\", len(articles[0][\"keywords\"]), \"keywords.\")\n print(\"Last article from sorting has\", len(articles[-1][\"keywords\"]), \"keywords.\")\n while len(articles) > 0:\n t.update()\n w = w + 1\n d = articles.pop()\n url, kw = d[\"url\"], d[\"keywords\"]\n kw = filter_keywords(kw)\n if kw:\n if len(thematics) == 0:\n # No thematics, create first thematic\n thematic_number += 1\n thematics.append(\n {\"keywords\": kw,\n \"thematic_number\": thematic_number,\n \"articles\": [{\"dict\": d, \"matching\": list(kw)}]})\n else:\n best = (0, set([]), set([]))\n for i in range(len(thematics)):\n matching = set(thematics[i][\"keywords\"].keys()) & set(kw.keys())\n\n if len(matching) > len(best[1]):\n best = (i, list(matching), list(kw.keys()))\n\n best_thematic = thematics[best[0]]\n number_of_keywords = len(best[2])\n if number_of_keywords == 0:\n number_of_keywords = 1e100\n if len(best[1]) / number_of_keywords > 0.33:\n best_thematic[\"articles\"].append({\"dict\": d, \"matching\": best[1]})\n if w % 1000 == 0:\n thematics.sort(key=lambda x: len(x[\"articles\"]), reverse=True)\n else:\n # create new thematic\n thematic_number += 1\n thematics.append(\n {\"keywords\": kw, \"thematic_number\": thematic_number,\n \"articles\": [{\"dict\": d, \"matching\": list(kw)}]})\n else:\n # print(\"No keywords after filtering\")\n # No keywords after filtering\n thematic_number += 1\n thematics.append(\n {\"keywords\": kw, \"thematic_number\": thematic_number, \"articles\": [{\"dict\": d, \"matching\": list(kw)}]})\n t.close()\n print(\"Aggregated %s articles into thematics\" % w)\n i = 0\n for t in thematics:\n for _ in t['articles']:\n i += 1\n if i != w:\n raise Exception(\"Number of input articles != number of output articles\")\n # print(thematics)\n return thematics\n\n\ndef filter_keywords(kw_dict, max_words=2, min_words=1):\n out = {}\n items = sorted(kw_dict.items(), key=lambda x: x[1], reverse=True)[:15]\n for keyword, occurrences in items:\n keyword = utils.clean_string(keyword)\n if max_words >= len(keyword.split()) >= min_words:\n if len(keyword) > 2:\n if is_word_checked(keyword):\n out[keyword] = occurrences\n\n if len(out) > 0:\n return out\n else:\n return None\n\n\ndef is_word_checked(string):\n avoid = [\"getty\",\n \"new york times\",\n \"http\",\n \"www\",\n \"continue reading\",\n \"people\",\n \"year\",\n \"reuters\",\n \"world\",\n \"subscrib\",\n \"bloomberg\",\n \"cnn\",\n \"news\",\n \"full\",\n \"article\",\n \"rtcom\",\n \"read\",\n \"welle\",\n \"deutsche\",\n \"germany\",\n \"hong\",\n \"times\",\n \"kong\"]\n for a in avoid:\n if a in string:\n return False\n if has_numbers(string):\n return False\n return True\n\n\ndef has_numbers(inputString):\n return bool(re.search(r'\\d', inputString))\n", "sub_path": "aggregator.py", "file_name": "aggregator.py", "file_ext": "py", "file_size_in_byte": 4022, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tqdm.tqdm", "line_number": 13, "usage_type": "call"}, {"api_name": "utils.clean_string", "line_number": 79, "usage_type": "call"}, {"api_name": "re.search", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "464257136", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Album',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('album_name', models.CharField(max_length=200)),\n ('release_date', models.DateField(blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='Artist',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('first_name', models.CharField(max_length=50)),\n ('last_name', models.CharField(max_length=50)),\n ('nickname', models.CharField(blank=True, max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Band',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('band_name', models.CharField(max_length=200)),\n ('artist', models.ForeignKey(to='artists_app.Artist')),\n ],\n ),\n migrations.CreateModel(\n name='Genre',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('genre_name', models.CharField(choices=[('rock', 'Rock'), ('rnb', 'R&B'), ('alternative', 'Alternative'), ('country', 'Country'), ('hard_music', 'Hard Music'), ('rap', 'Rap'), ('electronic', 'Electronic'), ('latin', 'Latin'), ('classic', 'Classic'), ('jazz', 'Jazz')], max_length=200)),\n ],\n ),\n migrations.CreateModel(\n name='Track',\n fields=[\n ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),\n ('track_name', models.CharField(max_length=200)),\n ('track_number', models.IntegerField(default=1)),\n ('track_duration', models.CharField(blank=True, max_length=5)),\n ('album', models.ForeignKey(to='artists_app.Album')),\n ],\n options={\n 'ordering': ['track_number'],\n },\n ),\n migrations.AddField(\n model_name='album',\n name='artist',\n field=models.ForeignKey(to='artists_app.Artist'),\n ),\n migrations.AddField(\n model_name='album',\n name='genre_name',\n field=models.ManyToManyField(to='artists_app.Genre'),\n ),\n ]\n", "sub_path": "artists_app/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 2771, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 38, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 48, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 66, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 66, "usage_type": "name"}]} +{"seq_id": "18971110", "text": "#!/usr/bin/python3\n\n\"\"\"TODO\n2015-06-22\n\"\"\"\n\nimport socket,time, os, sys\nimport telnetlib\nimport mtapi_class\n\nif os.name == 'posix':\n\timport posix\n\tmd_command = 'mkdir -p'\n\tmv_command = 'mv'\n\trm_command = 'rm'\n\tslash='/'\nelif os.name =='nt':\n\tmd_command = 'md'\n\tmv_command = 'move'\n\trm_command = 'del'\n\tslash='\\\\'\n\n\n#-----------------------------------------------------------------------\ndef make_socket(address,port,timeout=10):\n\ts = None # socket\n\tfor res in socket.getaddrinfo(address, port, socket.AF_UNSPEC, socket.SOCK_STREAM):\n\t\taf, socktype, proto, canonname, sa = res\n\t\ttry:\n\t\t\t s = socket.socket(af, socktype, proto)\n\t\texcept (socket.error, msg):\n\t\t\ts = None\n\t\t\tcontinue\n\t\ttry:\n\t\t\ts.settimeout(timeout)\n\t\t\ts.connect(sa)\n\t\texcept (socket.error, OSError):\n\t\t\ts.close()\n\t\t\ts = None\n\t\t\tcontinue\n\t\tbreak\n\t\ts.timeout()\n\treturn s\n#-----------------------------------------------------------------------\ndef telnet_mt(HOST,PORT,user,passw,commands_list):\n\t\"\"\"\n\tPodlaczanie po telnet do mikrotika i wykonanie polecen\n\tZwraca:\n\t0 - wszystko ok, wykonane polecenia\n\t1 - zamkniety port\n\t2 - to nie jest mikrotik\n\t3 - mikrotik, ale zle haslo\n\t4 - problem z telnetem (zamulony mikrotik)\n\t\"\"\"\n\ttry:\n\t\ttn = telnetlib.Telnet(HOST,PORT,5)\n\t\t## Lacze czekam przez 3 sekundy na info \"MikroTik\"\n\t\tif (tn.expect([b'MikroTik'],5)[0] == 0): # zero oznacza pierwszy element z listy, jak przyjmuje expect\n\t\t\tfor i in range(len(passw)):\n\t\t\t\t#~ tn.read_until(b\"Login:\")\n\t\t\t\tif (tn.expect([b'Login:'],10)[0]):\n\t\t\t\t\treturn 4\n\t\t\t\ttn.write(user.encode('UTF-8') + b\"\\n\")\n\t\t\t\t#~ tn.read_until(b\"Password:\")\n\t\t\t\tif (tn.expect([b'Password:'],10)[0]):\n\t\t\t\t\treturn 4\n\t\t\t\t#~ tn.write(passw.encode('UTF-8') + b\"\\n\")\n\t\t\t\ttn.write(passw[i].encode('UTF-8') + b\"\\n\")\n\t\t\t\tif (tn.expect([b'MikroTik'],10)[0]):\n\t\t\t\t\ttn.close()\n\t\t\t\t\ttn = telnetlib.Telnet(HOST,PORT,10)\n\t\t\t\t\tcontinue ## nastepne haslo, jezeli jest\n\t\t\t\t\t#~ return 3 ## mikrotik, ale zle haslo\n\t\t\t\tif (tn.expect([b'] >'],15)[0]):\n\t\t\t\t\treturn 4\n\t\t\t\t#~ tn.read_until(b'>',1)\n\t\t\t\tfor command in commands_list:\n\t\t\t\t\ttn.write(command.encode('UTF-8')+b\"\\r\\n\")\n\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t# dummy command\n\t\t\t\t\ttn.write(\"ip service print\".encode('UTF-8')+b\"\\r\\n\")\n\t\t\t\t\tif (tn.expect([b'] >'],15)[0]):\n\t\t\t\t\t\treturn 4\n\t\t\t\t\t#~ tn.read_until(b'@')\n\t\t\t\t\t#~ tn.read_until(b'#')\n\t\t\t\t\t#~ tn.read_until(b'] >')\n\t\t\t\t\t#~ tn.expect([b'@'],10)\n\t\t\t\ttn.write(b\"quit\\r\\n\")\n\t\t\t\ttn.close()\n\t\t\t\treturn 0 # wszystko ok\n\t\t\ttn.close()\n\t\t\treturn 3 ##wszystkie hasla sprobowal i nie znalazl\n\t\telse:\n\t\t\ttn.close()\n\t\t\treturn 2 ## to nie jest mikrotik\n\texcept (OSError, EOFError): \n\t\treturn 1 ## zamkniety port\n\t#~ return 0 ## wszystko ok, zrobione\n#-----------------------------------------------------------------------\n\ndef log_write(log_file,log_string):\n\tlog_file.writelines(log_string)\n\tos.fsync(log_file) # file flush\n\ndef gefaa(api_answer,element):\n\t\"GET elemen from api answer\"\n\ttry:\n\t\treturn api_answer['='+element]\n\texcept KeyError:\n\t\treturn \"\"\n\n\ndef mt_process(HOST_IP,error_log_file,detail_log_file, info_log_file,info_human_log_file,good_log_file,check_ping,check_winbox_port,log_good,login,password,print_debug,read_eth_mac,human_readable,API_PORT,TELNET_PORT,INT_HOST=''):\n\tif (human_readable == 'True'):\n\t\t\tinfo_human_log_file.flush()\n\tif (log_good == 'True'):\n\t\t\tgood_log_file.flush()\n\terror_log_file.flush()\n\tdetail_log_file.flush()\n\tinfo_log_file.flush()\n\t\n\tlog_write(detail_log_file,'\\n------------------------------------------\\n')\n\t#~ print (\"\\n############# \"+HOST_IP+\" ##############\")\n\tprint (\" ++ Lacze do hosta IP: \"+HOST_IP+' ('+INT_HOST+') ')\n\tlog_write(detail_log_file,HOST_IP+' ('+INT_HOST+') '+'\\n')\n\t# sprawdzam czy host to MikroTik - sprawdzam Winboxa\n\tif ((check_ping == 'True') and (os.name == 'posix')):\n\t\t# sprawdza ping - zwraca 1 jezeli niepowodzenie\n\t\tif (os.system(\"ping -c 3 -W 3 \"+HOST_IP+\">/dev/null\")):\n\t\t\tprint(\" !! Host nie odpowiada! Pomijam..\")\n\t\t\tlog_write(detail_log_file,\" !! Host nie odpowiada! Pomijam..\")\n\t\t\tlog_write(error_log_file,HOST_IP+' ('+INT_HOST+') '+\" # host nie odpowiada\\n\")\n\t\t\t#~ continue\n\t\t\treturn 1\n\tif (check_winbox_port == \"True\"):\n\t\ts=make_socket(HOST_IP,8291,5)\n\t\tif s is None:\n\t\t\tprint(\" !! To nie jest MikroTik! Pomijam..\")\n\t\t\tlog_write(detail_log_file,\" !! To nie jest MikroTik! Pomijam..\")\n\t\t\tlog_write(error_log_file,HOST_IP+\" # to nie jest mikrotik\\n\")\n\t\t\t#~ continue\n\t\t\treturn 1\n\t\ts.close()\n\t# otwiera socket API - pierwsza proba\n\ts=make_socket(HOST_IP,API_PORT,5)\n\tif s is None:\n\t\tlog_write(detail_log_file,\" !! Blad polaczenia API (wylaczone api?)\\n\")\n\t\tprint(\" !! Blad polaczenia API\")\n\t\tprint(\" ++ Lacze sie przez telnet - wlacze API\")\n#TELNET - wlacz\n\t\tTELNET_ANSWER=telnet_mt(HOST_IP,TELNET_PORT,login,password,['ip service enable api','ip service set api address=0.0.0.0/0']) #zwraca 0-3\n\t\tif TELNET_ANSWER: ## jesli byl jakis blad (inny niz 0)\n\t\t\tif TELNET_ANSWER == 1: #zamkniety port\n\t\t\t\tlog_write(detail_log_file,\" !! Blad telnet, zamkniety port\\n\")\n\t\t\t\tprint(\" !! Blad telnet, zamkniety port\")\n\t\t\telif TELNET_ANSWER == 2: # to nie jest mikrotik\n\t\t\t\tlog_write(detail_log_file,\" !! Blad telnet, to nie MikroTik\\n\")\n\t\t\t\tprint(\" !! Blad telnet, to nie MikroTik\")\n\t\t\telif TELNET_ANSWER == 3: # mikrotik, ale zle haslo\n\t\t\t\tlog_write(detail_log_file,\" !! Blad telnet, zadne haslo nie pasuje\\n\")\n\t\t\t\tprint(\" !! Blad telnet, zadne haslo nie pasuje\")\n\t\t\telif TELNET_ANSWER == 4: # zamulony telnet (problem mikrotika)\n\t\t\t\tlog_write(detail_log_file,\" !! Blad telnet, zamulony mikrotik\\n\")\n\t\t\t\tprint(\" !! Blad telnet, zamulony mikrotik\")\n\t\t\tlog_write(error_log_file,HOST_IP+' ('+INT_HOST+') '+\" # blad telnet, zamulony mikrotik\\n\")\n\t\t\tprint(\" !! Blad! Pomijam MikroTika...\")\n\t\t\t#~ continue ## pomija dalsze linie, przechodzi do nastepnego MT\n\t\t\treturn 1\n\t\telse:\n\t\t\tprint(\" ++ Wlaczylem API, kontynuuje...\\n ++ Lacze do API\")\n\t\t\tlog_write(detail_log_file,\" ++ Wlaczylem API, kontynuuje...\\n\")\n\t\t\ttime.sleep(2)\n\t\t\ts=make_socket(HOST_IP,API_PORT,5) # otwiera socket API\n\t\t\tif s is None:\n\t\t\t\tlog_write(detail_log_file,\" !! Blad polaczenia API\\n\")\n\t\t\t\tlog_write(error_log_file,HOST_IP+' ('+INT_HOST+') '+\" # blad polaczenia API\\n\")\n\t\t\t\tprint(\" !! Blad polaczenia API\")\n\t\t\t\t#~ continue ## pomijam Mikrotika, przechodzi do nastepnego MT\n\t\t\t\treturn 1\n\n#********************** KONIEC TELNET *******************************\n\n # jesli polaczono to wykonaj komendy API\n\ttry:\n\t\t##wczesniej tylko proby czy jest API port dostepny, teraz podpiecie produkcyjne\n\t\ts.close()\n\t\ts = None\n\t\tLOGGED=0\n\t\tfor i in range(len(password)):\n\t\t\tif s is None:\n\t\t\t\ts=make_socket(HOST_IP,API_PORT,10)\n\t\t\tmtapi = mtapi_class.ApiRos(s) #klasa api\n\t\t\tif (print_debug == 'True'):\n\t\t\t\tmtapi.DEBUG_PRINT=True\n\t\t\tif mtapi.login(login, password[i]): # loguje sie z loginem i pass\n\t\t\t\t#zalogowano:\n\t\t\t\tLOGGED=1\n\t\t\t\tbreak # znalazl poprawne, nie sprawdza kolejnych hasel\n\t\t\telse:\n\t\t\t\ts.close() #trzeba utworzyc nowy socket, nie mozna probowac kilka razy na jednym\n\t\t\t\ts = None\n\t\t\t\tcontinue # kolejne haslo\n\t\tif not LOGGED: ## jesli sprawdzil wszystkie i niie bylo powodzenia\n\t\t\tlog_write(detail_log_file,\" !! Problem z logowaniem API, zadne haslo nie pasuje!\\n\")\n\t\t\tlog_write(error_log_file,HOST_IP+ ' '+INT_HOST+' '+' # problem z API - zadne haslo nie pasuje!\\n')\n\t\t\tprint(\" !! Problem z logowaniem, zadne haslo nie pasuje\")\n\t\t\t#~ continue ## pomijam Mikrotika, przechodzi do nastepnego MT\n\t\t\treturn 1\n\t\t\t\n\t\t#********************** POLACZONO PO API *******************************\n\t\t# jesli polaczono to wykonaj komendy API\n\t\tprint(\" ++ Polaczylem sie przez API\")\n\t\tlog_write(detail_log_file,\" ++ Polaczylem sie przez API \\n\")\n\t\t\n\t\t#\n\t\t#\n\t\tsep=\"|\"\n\t\tprint (' ++ Pobieram dane o MikroTiku')\n\t\tAPI_ANSWER=(mtapi.talk(['/system/identity/print']))\n\t\tlog_write(info_log_file,HOST_IP+sep+INT_HOST+sep+gefaa(API_ANSWER[0][1],\"name\")+sep)\n\t\tif human_readable == \"True\":\n\t\t\tlog_write(info_human_log_file,HOST_IP+sep+INT_HOST+sep+gefaa(API_ANSWER[0][1],\"name\")+sep)\n\t\t\t\n\t\tAPI_ANSWER=(mtapi.talk(['/system/resource/print']))\n\t\tw_stats=(\"board-name\",\"version\",\"bad-blocks\")\n\t\t#~ bad_blocks=API_ANSWER[0][1][\"=bad-blocks\"]\n\t\tfor i in range(len(API_ANSWER)-1):\n\t\t\tfor element in w_stats:\n\t\t\t\t#kazdy element\n\t\t\t\tlog_write(info_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\t\tif human_readable == \"True\":\n\t\t\t\t\tlog_write(info_human_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\n\t\tAPI_ANSWER=(mtapi.talk(['/system/routerboard/print']))\n\t\tw_stats=('serial-number','current-firmware','upgrade-firmware')\n\t\tfor i in range(len(API_ANSWER)-1):\n\t\t\tfor element in w_stats:\n\t\t\t\t#kazdy element\n\t\t\t\tlog_write(info_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\t\tif human_readable == \"True\":\n\t\t\t\t\tlog_write(info_human_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\n\t\t## ETHERNET INTERFACE\n\t\tif read_eth_mac=='True':\n\t\t\tAPI_ANSWER=(mtapi.talk(['/interface/ethernet/print','=.proplist=name,mac-address']))\n\t\t\ttry:\n\t\t\t\t#~ print(API_ANSWER[0][1])\n\t\t\t\tlog_write(info_log_file,gefaa(API_ANSWER[0][1],'mac-address'))\n\t\t\t\tif human_readable == \"True\":\n\t\t\t\t\tlog_write(info_human_log_file,gefaa(API_ANSWER[0][1],'mac-address'))\n\t\t\texcept (IndexError,KeyError):\n\t\t\t\tpass\n\t\t\tlog_write(info_log_file,sep)\n\t\t\tif human_readable == \"True\":\n\t\t\t\t\tlog_write(info_human_log_file,sep)\n\t\t\n\t\t#WIRELESS INTERFACE\n\t\t## Musi byc uzyty proplist, poniewaz na sofcie 6.X wyswietla wiecej info i nie wiedziec czemu buguje i zacina sie komunikacja API\n\t\t## dokladnie po =ht-guard-interval=any jest zacinka\n\t\t#~ API_ANSWER=(mtapi.talk(['/interface/wireless/getall','=.proplist=mode,name,ssid,frequency,mac-address,band,scan-list,disabled,comment,channel-width,default-authentication']))\n\t\tAPI_ANSWER=(mtapi.talk(['/interface/wireless/getall','=.proplist=mode,name,ssid,frequency,mac-address,band,scan-list,disabled,comment,channel-width,running']))\n\t\t#~ ## W sofcie 3.10 nie ma obslugi proplist\n\t\t#~ if (\"!trap\" in API_ANSWER[0]):\n\t\t\t# bez proplist, trzeba wiec wyswietlic cale\n\t\t\t#~ API_ANSWER=(mtapi.talk(['/interface/wireless/print']))\n\t\t# lista info do pobrania\n\t\t#~ w_stats=(\"mode\",\"name\",'ssid','frequency','channel-width','band','scan-list','mac-address','default-authenticate','disabled','comment') #informacje do pobrania wg kolejnosci\n\t\tw_stats=(\"mode\",\"name\",'ssid','frequency','channel-width','band','scan-list','mac-address','disabled','running','comment') #informacje do pobrania wg kolejnosci\n\t\t#~ w_log=\"\"\n\t\tfor i in range(len(API_ANSWER)-1):\n\t\t\tif human_readable==\"True\" and i>0:\n\t\t\t\t\tlog_write(info_human_log_file,\"\\n\"+sep*9) # 8 to ilosc kolumn do przesuniecia w stosunku do info o plycie\n\t\t\t\t\tif read_eth_mac=='True': #jesli odczytac nalezy jeszcze mac eth to jeszcze jeden separator (przesuniecei)\n\t\t\t\t\t\tlog_write(info_human_log_file,sep)\n\t\t\tfor element in w_stats:\n\t\t\t\t#kazdy element\n\t\t\t\t#~ w_log=w_log+gefaa(API_ANSWER[i][1],element)+sep\n\t\t\t\tlog_write(info_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\t\tif human_readable == \"True\":\n\t\t\t\t\tlog_write(info_human_log_file,gefaa(API_ANSWER[i][1],element)+sep)\n\t\t\n\t\t# zapis info do logow\n\t\t#~ log_write(info_log_file,HOST_IP+sep+identity+sep+board+sep+serial+sep+version+sep+current_firmware+'/'+upgrade_firmware+sep+bad_blocks+sep+w_log+'\\n')\n\t\tlog_write(info_log_file,'\\n')\n\t\tif human_readable == \"True\":\n\t\t\tlog_write(info_human_log_file,'\\n')\n\t\tlog_write(detail_log_file,\" ++ Ok! Pobralem informacje z MT\\n\")\n\t\tif (log_good == \"True\"):\n\t\t\tlog_write(good_log_file,HOST_IP+' ('+INT_HOST+') '+\"\\n\")\n\t\tprint(\" ++ Ok! Pobralem informacje z MT\")\n\t\treturn 0 ##Wszystko ok\n\n\n#====================================================================\n#\n\texcept (RuntimeError, ConnectionResetError, socket.timeout):\n\t\tlog_write(detail_log_file,\" !! Polaczenie przerwane przez MT\\n\")\n\t\tlog_write(error_log_file,HOST_IP+' ('+INT_HOST+') '+\" # Polaczenie przerwane przez MT\\n\")\n\t\tprint (\" !! Polaczenie przerwane przez MT!\")\n\t\tif s is not None:\n\t\t\ts.close()\n\t\t\t#~ continue ## pomija dalsze linie, przechodzi do nastepnego MT\n\t\t\treturn 1\n\t\t#continue\n#\n\tif s is not None: # Zamkniecie socketa, nastepny z listy\n\t\ts.close()\n\n########################################################################\n#################### MAIN ######################################\n########################################################################\ndef main(config_list_file):\n\t#\n\tpy_loc = os.path.dirname(os.path.abspath(__file__))+slash #lokalizacja tego pliku\n\ttoday=time.strftime(\"%Y-%m-%d\");\n\ttarget_ips=[] # do ipkow\n\n\t##\n\tlog_file_dir =py_loc+\"log\"+slash\n\tbackup_dir=py_loc+'backup'+slash\n\tlog_good='True'\n\tcheck_winbox_port='True'\n\tlogin=''\n\tpassword=[]\n\tprint_debug='False'\n\tcheck_ping='True'\n\ttry_behind_nat='False'\n\thuman_readable='False'\n\tread_eth_mac='False'\n\t#\n\tMT_list_file=py_loc+config_list_file #nazwa pliku konfiguracyjnego\n\n\t#---------------------------------------------------------------------\n\n\t#\n\ttry: #WCZYTANIE USTAWIEN Z PLIKU lista.txt\n\t\tplik_lista = open(MT_list_file)\n\t\tfor line in plik_lista:\n\t\t\tif (line[0] == '#' or line[0]=='' or line[0]=='\\n'): continue\n\t\t\tconf_tab=line.split('\\n')\n\n\t\t\tconf_tab=conf_tab[0].split('=')\n\t\t\tif conf_tab[0]=='login': login=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='password': \n\t\t\t\t#~ password=conf_tab[1].split()[0]\n\t\t\t\tpassword.append( conf_tab[1].split()[0])\n\t\t\telif conf_tab[0]=='try_behind_nat': try_behind_nat=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='log_good': log_good=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='check_winbox_port': check_winbox_port=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='print_debug': print_debug=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='check_ping': check_ping=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='human_readable': human_readable=conf_tab[1].split()[0]\n\t\t\telif conf_tab[0]=='read_eth_mac': read_eth_mac=conf_tab[1].split()[0]\n\t\t\t## Katalog logow\n\t\t\telif conf_tab[0]=='log_file_dir':\n\t\t\t\tlog_file_dir=conf_tab[1].split()[0]\n\t\t\t\tif log_file_dir[0]!=slash: log_file_dir=py_loc+log_file_dir ## dodaje na poczatku slash\n\t\t\t\tif log_file_dir[len(log_file_dir)-1]!=slash: log_file_dir+=slash # dodaje na koncu slash\n\t\t\t## IPKI\n\t\t\telse:\n\t\t\t\tif '.'in line: # jesli ip w linii\n\t\t\t\t\ttarget_ips.extend(line.split('\\n')) # IP\n\t\tplik_lista.close()\n\texcept FileNotFoundError:\n\t\t#~ log_write(detail_log_file,'Brak pliku konfiguracyjnego!')\n\t\tprint(\"Nie ma takiego pliku konfiguracyjnego!\")\n\t\texit(1)\n\t#jesli brakuje hasla/loginu\n\tif login=='' or password=='':\n\t\tprint(\" !! Nie podano loginu lub hasla!\")\n\t\t#~ log_write(detail_log_file,\" !! Nie podano loginu lub hasla!\\n\")\n\t\tprint('Przerywam prace..\\n')\n\t\texit(1)\n\n\n\t# \n\t#usuwa puste znaki w tablicy target_ips (lista)\n\tfor i in range(len(target_ips)):\n\t\ttry:\n\t\t\ttarget_ips.remove('')\n\t\texcept ValueError:\n\t\t\tbreak\n\n\n\t#\n\t#tworzy katalogi logow\n\tos.system(md_command+' \"'+log_file_dir+'error\" \"'+log_file_dir+'info_csv\" \"'+log_file_dir+'detail\"')\n\t#tworzy dojscia do plikow\n\tdetail_log_file=open(log_file_dir+\"detail\"+slash+today+'.log','w')\n\tinfo_log_file=open(log_file_dir+\"info_csv\"+slash+today+'.log','w')\n\terror_log_file=open(log_file_dir+\"error\"+slash+today+'.log','w')\n\tgood_log_file=''\n\tif log_good=='True':\n\t\tos.system(md_command+' \"'+log_file_dir+'good\"')\n\t\tgood_log_file=open(log_file_dir+\"good\"+slash+today+'.log','w')\n\tif human_readable=='True':\n\t\tos.system(md_command+' \"'+log_file_dir+'info_human\"')\n\t\tinfo_human_log_file=open(log_file_dir+\"info_human\"+slash+today+'.log','w')\n\t\t\n\t#=========================================================\n\t#=================== GLOWNA PETLA =======================\n\t#=========================================================\n\tfor HOST_IP in target_ips: # PETLA Z MT\n\t\tprint (\"\\n############# \"+HOST_IP+\" ##############\")\n\t\tif (mt_process(HOST_IP,error_log_file,detail_log_file, info_log_file,info_human_log_file,good_log_file,check_ping,check_winbox_port,log_good,login,password,print_debug,read_eth_mac,human_readable,8728,23)):\n\t\t\t##Jakis blad,nastepny mt\n\t\t\tcontinue\n\t\telse:\n\t\t\t#glowny mt ok:\n\t\t\tprint(\" ++ Pobralem dane z koncentratora.. probuje bazy:\")\n\t\t\tlog_write(detail_log_file,\" ++ Pobralem dane z koncentratora.. probuje bazy:\\n\")\n\t\t\tif try_behind_nat==\"True\": ##jezeli nalezy probowac za natem:\n\t\t\t\tfor i in range(2,16):\n\t\t\t\t\tINT_IP=\"10.5.50.\"+str(i)\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint(\"NEXT...\")\n\t\t\t\t\t\ts=make_socket(HOST_IP,8728,5) # otwiera socket API do glownego\n\t\t\t\t\t\tif s is None:\n\t\t\t\t\t\t\tlog_write(detail_log_file,\" !! Blad polaczenia API\\n\")\n\t\t\t\t\t\t\tlog_write(error_log_file,HOST_IP+\" # blad polaczenia API\\n\")\n\t\t\t\t\t\t\tprint(\" !! Blad polaczenia API\")\n\t\t\t\t\t\t\tbreak ## pomijam Mikrotika, przechodzi do nastepnego MT baazowego\n\t\t\t\t\t\t##wczesniej tylko proby czy jest API port dostepny, teraz podpiecie produkcyjne\n\t\t\t\t\t\ts.close()\n\t\t\t\t\t\ts = None\n\t\t\t\t\t\tLOGGED=0\n\t\t\t\t\t\tfor j in range(len(password)):\n\t\t\t\t\t\t\tif s is None:\n\t\t\t\t\t\t\t\ts=make_socket(HOST_IP,8728,10)\n\t\t\t\t\t\t\tmtapi = mtapi_class.ApiRos(s) #klasa api\n\t\t\t\t\t\t\tif (print_debug == 'True'):\n\t\t\t\t\t\t\t\tmtapi.DEBUG_PRINT=True\n\t\t\t\t\t\t\tif mtapi.login(login, password[j]): # loguje sie z loginem i pass\n\t\t\t\t\t\t\t\t#zalogowano:\n\t\t\t\t\t\t\t\tLOGGED=1\n\t\t\t\t\t\t\t\tbreak # znalazl poprawne, nie sprawdza kolejnych hasel\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\ts.close() #trzeba utworzyc nowy socket, nie mozna probowac kilka razy na jednym\n\t\t\t\t\t\t\t\ts = None\n\t\t\t\t\t\t\t\tcontinue # kolejne haslo\n\t\t\t\t\t\tif not LOGGED: ## jesli sprawdzil wszystkie i niie bylo powodzenia\n\t\t\t\t\t\t\tlog_write(detail_log_file,\" !! Problem z logowaniem API, zadne haslo nie pasuje!\\n\")\n\t\t\t\t\t\t\tlog_write(error_log_file,HOST_IP+' # problem z API - zadne haslo nie pasuje!\\n')\n\t\t\t\t\t\t\tprint(\" !! Problem z logowaniem, zadne haslo nie pasuje\")\n\t\t\t\t\t\t\tbreak ## pomijam Mikrotika, przechodzi do nastepnego MT bazowego\n\t\t\t\t\t\t\n\t\t\t\t\t\tprint(\" ++ Otwieram porty telnet oraz API na koncentratorze\")\n\t\t\t\t\t\tmtapi.talk([\"/ip/firewall/nat/add\",\"=chain=dstnat\",\"=protocol=tcp\",\"=action=dst-nat\",\"=dst-address=\"+HOST_IP,\"=to-addresses=\"+INT_IP,\"=dst-port=65301\",\"=to-ports=8728\"])\n\t\t\t\t\t\tmtapi.talk([\"/ip/firewall/nat/add\",\"=chain=dstnat\",\"=protocol=tcp\",\"=action=dst-nat\",\"=dst-address=\"+HOST_IP,\"=to-addresses=\"+INT_IP,\"=dst-port=2300\",\"=to-ports=23\"])\n\t\t\t\t\t\ttime.sleep(3)\n\t\t\t\t\t\tmt_process(HOST_IP,error_log_file,detail_log_file, info_log_file,info_human_log_file,good_log_file,\"False\",\"False\",log_good,login,password,print_debug,read_eth_mac,human_readable,65301,2300,INT_IP)\n\t\t\t\t\t\t#~ if(mt_process(HOST_IP,error_log_file,detail_log_file, info_log_file,info_human_log_file,good_log_file,\"False\",\"False\",log_good,login,password,print_debug,read_eth_mac,human_readable,65301,2300,INT_IP)):\n\t\t\t\t\t\t\t#~ continue ## problem z mt w wewnetrznej, nastepny wewnetrzny\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t##USUNIECIE REGULY\n\t\t\t\t\t\tAPI_ANSWER = mtapi.talk([\"/ip/firewall/nat/print\",\"?dst-port=2300\",\"?dst-port=65301\",\"?#|\"])\n\t\t\t\t\t\tfor j in range(len(API_ANSWER)-1):\n\t\t\t\t\t\t\tmtapi.talk([\"/ip/firewall/nat/remove\",\"=.id=\"+API_ANSWER[j][1]['=.id']])\n\t\t\t\t\t\t\t\n\t\t\t\t\texcept (RuntimeError, ConnectionResetError, socket.timeout):\n\t\t\t\t\t\tlog_write(detail_log_file,\" !! Polaczenie przerwane przez MT\\n\")\n\t\t\t\t\t\tlog_write(error_log_file,HOST_IP+\" # Polaczenie przerwane przez MT\\n\")\n\t\t\t\t\t\tprint (\" !! Polaczenie przerwane przez MT!\")\n\t\t\t\t\t\tif s is not None:\n\t\t\t\t\t\t\ts.close()\n\t\t\t\t\t\t\tcontinue ## pomija dalsze linie, przechodzi do nastepnego MT\n\t\t\t\t\t\t\t#~ return 1\n\t\t\t\t\t\t#continue\n\t\t\t\t#\n\t\t\t\t\tif s is not None: # Zamkniecie socketa, nastepny z listy\n\t\t\t\t\t\ts.close()\n\t\t\t\t\t\n\n\n\n\t#=========================================================\n\t#===========KONIEC GLOWNA PETLA =======================\n\t#=========================================================\n\n\n\tprint(\"**** ZAKONCZONO ****\\nLogi z pracy znajdziesz w folderze logi\")\n\tif (log_good == 'True'):\n\t\tgood_log_file.close()\n\tif (human_readable == 'True'):\n\t\tinfo_human_log_file.close()\n\terror_log_file.close()\n\tdetail_log_file.close()\n\tinfo_log_file.close()\n\tif os.name=='nt':\n\t\tos.system(\"pause\")\n\nif __name__ == \"__main__\":\n\ttry:\n\t\tmain(sys.argv[1])\n\texcept IndexError:\n\t\tprint(\"Nie podano pliku konfiguracyjnego jako parametr! \\n\")\n\n\t\n\n", "sub_path": "python/inwentarka_port/inwentarka_port.py", "file_name": "inwentarka_port.py", "file_ext": "py", "file_size_in_byte": 20129, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.name", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 17, "usage_type": "attribute"}, {"api_name": "socket.getaddrinfo", "line_number": 27, "usage_type": "call"}, {"api_name": "socket.AF_UNSPEC", "line_number": 27, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 27, "usage_type": "attribute"}, {"api_name": "socket.socket", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 31, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 37, "usage_type": "attribute"}, {"api_name": "telnetlib.Telnet", "line_number": 56, "usage_type": "call"}, {"api_name": "telnetlib.Telnet", "line_number": 71, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 79, "usage_type": "call"}, {"api_name": "os.fsync", "line_number": 103, "usage_type": "call"}, {"api_name": "os.name", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 129, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 172, "usage_type": "call"}, {"api_name": "mtapi_class.ApiRos", "line_number": 192, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 296, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 314, "usage_type": "call"}, {"api_name": "os.path", "line_number": 314, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 314, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 315, "usage_type": "call"}, {"api_name": "os.system", "line_number": 387, "usage_type": "call"}, {"api_name": "os.system", "line_number": 394, "usage_type": "call"}, {"api_name": "os.system", "line_number": 397, "usage_type": "call"}, {"api_name": "mtapi_class.ApiRos", "line_number": 430, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 450, "usage_type": "call"}, {"api_name": "socket.timeout", "line_number": 462, "usage_type": "attribute"}, {"api_name": "os.name", "line_number": 491, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 492, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 496, "usage_type": "attribute"}]} +{"seq_id": "165871676", "text": "\"\"\"\n\nFunctions for reading and writing data.\n\n\"\"\"\n\n\nimport re as _re\nimport json as _json\nfrom .fixation import FixationSequence as _FixationSequence\nfrom .text import TextBlock as _TextBlock\n\n\ndef read(file_path):\n \"\"\"\n\n Read in a JSON file. `eyekit.fixation.FixationSequence` and\n `eyekit.text.TextBlock` objects are automatically decoded and\n instantiated.\n\n \"\"\"\n with open(str(file_path), encoding=\"utf-8\") as file:\n data = _json.load(file, object_hook=_eyekit_decoder)\n return data\n\n\ndef write(data, file_path, compress=False):\n \"\"\"\n\n Write arbitrary data to a JSON file. If `compress` is `True`, the file is\n written in the most compact way; if `False`, the file will be more human\n readable. `eyekit.fixation.FixationSequence` and `eyekit.text.TextBlock`\n objects are automatically encoded.\n\n \"\"\"\n if compress:\n indent = None\n separators = (\",\", \":\")\n else:\n indent = \"\\t\"\n separators = (\",\", \": \")\n with open(str(file_path), \"w\", encoding=\"utf-8\") as file:\n _json.dump(\n data,\n file,\n default=_eyekit_encoder,\n ensure_ascii=False,\n indent=indent,\n separators=separators,\n )\n\n\ndef import_asc(file_path, variables=[], placement_of_variables=\"after_end\"):\n \"\"\"\n\n Import data from an ASC file produced from an SR Research EyeLink device\n (you will first need to use SR Research's Edf2asc tool to convert your\n original EDF files to ASC). The importer will extract all trials from the\n ASC file, where a trial is defined as a sequence of fixations (EFIX lines)\n that occur inside a START–END block. Optionally, the importer can extract\n user-defined variables from the ASC file and associate them with the\n appropriate trial. For example, if your ASC file contains messages like\n this:\n\n ```\n MSG 4244101 !V TRIAL_VAR trial_type practice\n MSG 4244101 !V TRIAL_VAR passage_id 1\n ```\n\n then you could extract the variables `\"trial_type\"` and `\"passage_id\"`. A\n variable is some string that is followed by a space; anything that follows\n this space is the variable's value. By default, the importer looks for\n variables that follow the END tag. However, if your variables are placed\n before the START tag, then set the `placement_of_variables` argument to\n `\"before_start\"`. If unsure, you should first inspect your ASC file to see\n what messages you wrote to the data stream and where they are placed. The\n importer will return a list of dictionaries, where each dictionary\n represents a single trial and contains the fixations along with any other\n extracted variables. For example:\n\n ```\n [\n {\n \"trial_type\" : \"practice\",\n \"passage_id\" : \"1\",\n \"fixations\" : FixationSequence[...]\n },\n {\n \"trial_type\" : \"test\",\n \"passage_id\" : \"2\",\n \"fixations\" : FixationSequence[...]\n }\n ]\n ```\n\n \"\"\"\n msg_regex = _re.compile( # regex for parsing variables from MSG lines\n r\"^MSG\\s+\\d+\\s+.*?(?P(\"\n + \"|\".join(map(_re.escape, variables))\n + r\"))\\s+(?P.+?)$\"\n )\n efix_regex = _re.compile( # regex for parsing fixations from EFIX lines\n r\"^EFIX\\s+(L|R)\\s+(?P.+?)\\s+(?P.+?)\\s+(?P.+?)\\s+(?P.+?)\\s+(?P.+?)\\s\"\n )\n # Open ASC file and extract lines that begin with START, END, MSG, or EFIX\n with open(str(file_path)) as file:\n raw_data = [\n line.strip()\n for line in file\n if line.startswith((\"START\", \"END\", \"MSG\", \"EFIX\"))\n ]\n # Determine the points where one trial ends and the next begins\n if placement_of_variables == \"before_start\":\n break_indices = [0] + [\n i for i, line in enumerate(raw_data) if line.startswith(\"END\")\n ]\n elif placement_of_variables == \"after_end\":\n break_indices = [\n i for i, line in enumerate(raw_data) if line.startswith(\"START\")\n ] + [len(raw_data)]\n else:\n raise ValueError(\n 'placement_of_variables should be set to \"before_start\" or \"after_end\".'\n )\n # Extract trials based on the identified break points\n extracted_trials = []\n for start, end in zip(break_indices[:-1], break_indices[1:]): # iterate over trials\n trial_lines = raw_data[start:end] # lines belonging to this trial\n trial = {var: None for var in variables}\n fixations = []\n for line in trial_lines:\n if line.startswith(\"EFIX\"):\n # Extract fixation from the EFIX line\n efix_extraction = efix_regex.match(line)\n if efix_extraction:\n fixations.append(\n (\n int(round(float(efix_extraction[\"x\"]), 0)),\n int(round(float(efix_extraction[\"y\"]), 0)),\n int(efix_extraction[\"start\"]),\n int(efix_extraction[\"end\"]),\n )\n )\n elif line.startswith(\"MSG\") and variables:\n # Attempt to extract a variable and its value from the MSG line\n msg_extraction = msg_regex.match(line)\n if msg_extraction:\n trial[msg_extraction[\"var\"]] = msg_extraction[\"val\"].strip()\n trial[\"fixations\"] = _FixationSequence(fixations)\n extracted_trials.append(trial)\n return extracted_trials\n\n\ndef import_csv(\n file_path,\n x_header=\"x\",\n y_header=\"y\",\n start_header=\"start\",\n end_header=\"end\",\n trial_header=None,\n):\n \"\"\"\n\n Import data from a CSV file (requires Pandas to be installed). By default,\n the importer expects the CSV file to contain the column headers, `x`, `y`,\n `start`, and `end`, but this can be customized by setting the relevant\n arguments to whatever column headers your CSV file contains. Each row of\n the CSV file is expected to represent a single fixation. If your CSV file\n contains data from multiple trials, you should also specify the column\n header of a trial identifier, so that the data can be segmented into\n trials. The importer will return a list of dictionaries, where each\n dictionary represents a single trial and contains the fixations along with\n the trial identifier (if specified). For example:\n\n ```\n [\n {\n \"trial_id\" : 1,\n \"fixations\" : FixationSequence[...]\n },\n {\n \"trial_id\" : 2,\n \"fixations\" : FixationSequence[...]\n }\n ]\n ```\n\n \"\"\"\n try:\n import pandas as _pd\n except ModuleNotFoundError as e:\n e.msg = 'The import_csv function requires Pandas. Run \"pip install pandas\" to use this function.'\n raise\n raw_data = _pd.read_csv(str(file_path))\n if trial_header is None:\n fixations = [\n tuple(fxn)\n for _, fxn in raw_data[\n [x_header, y_header, start_header, end_header]\n ].iterrows()\n ]\n return [{\"fixations\": _FixationSequence(fixations)}]\n trial_identifiers = raw_data[trial_header].unique()\n extracted_trials = []\n for identifier in trial_identifiers:\n trial_subset = raw_data[raw_data[trial_header] == identifier]\n fixations = [\n tuple(fxn)\n for _, fxn in trial_subset[\n [x_header, y_header, start_header, end_header]\n ].iterrows()\n ]\n trial = {trial_header: identifier, \"fixations\": _FixationSequence(fixations)}\n extracted_trials.append(trial)\n return extracted_trials\n\n\ndef _eyekit_encoder(obj):\n \"\"\"\n\n Convert a `FixationSequence` or `TextBlock` object into something JSON\n serializable that can later be decoded by _eyekit_decoder().\n\n \"\"\"\n if isinstance(obj, _FixationSequence):\n return {\"__FixationSequence__\": obj._serialize()}\n if isinstance(obj, _TextBlock):\n return {\"__TextBlock__\": obj._serialize()}\n raise TypeError(f\"Object of type {obj.__class__.__name__} is not JSON serializable\")\n\n\ndef _eyekit_decoder(obj):\n \"\"\"\n\n Decode an object into a `FixationSequence` or `TextBlock` if the key\n implies that it is one of those types.\n\n \"\"\"\n if \"__FixationSequence__\" in obj:\n return _FixationSequence(obj[\"__FixationSequence__\"])\n if \"__TextBlock__\" in obj:\n return _TextBlock(**obj[\"__TextBlock__\"])\n return obj\n", "sub_path": "eyekit/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 8554, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 97, "usage_type": "call"}, {"api_name": "re.escape", "line_number": 99, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 102, "usage_type": "call"}, {"api_name": "fixation.FixationSequence", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 194, "usage_type": "call"}, {"api_name": "fixation.FixationSequence", "line_number": 202, "usage_type": "call"}, {"api_name": "fixation.FixationSequence", "line_number": 213, "usage_type": "call"}, {"api_name": "fixation.FixationSequence", "line_number": 225, "usage_type": "argument"}, {"api_name": "text.TextBlock", "line_number": 227, "usage_type": "argument"}, {"api_name": "fixation.FixationSequence", "line_number": 240, "usage_type": "call"}, {"api_name": "text.TextBlock", "line_number": 242, "usage_type": "call"}]} +{"seq_id": "605277246", "text": "#!/usr/bin/env python3\n\"\"\"\n Copyright (c) 2018 Intel Corporation.\n\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so, subject to\n the following conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport os\nimport sys\nimport logging as log\n\nfrom openvino.inference_engine import IENetwork, IECore\nimport cv2\nimport utils\n\n\n\nclass Network:\n \"\"\"\n Load and configure inference plugins for the specified target devices \n and performs synchronous and asynchronous modes for the specified infer requests.\n \"\"\"\n\n def __init__(self):\n\n #Initializing class variables\n self.ie = None\n self.network = None\n\n self.input_blob = None\n self.output_blob = None\n\n self.exec_network = None\n self.infer_request = None\n return \n\n def load_model(self, model, device=\"CPU\", cpu_extension=None):\n \n # Load the model\n log.debug('Initiating IE core....')\n startTime = cv2.getTickCount()\n self.ie = IECore()\n\n self.network = self.ie.read_network(model=utils.getMOFiles(model)['model'], weights=utils.getMOFiles(model)['weights'])\n log.debug('Model metadata read Sucessfully')\n \n # Check for supported layers\n if not utils.isLayersSupported(self.ie,self.network,device):\n log.error('Cannot continue due to unsupported layers. Check if extension exist !! Exiting....')\n exit(1)\n\n # Add necessary extensions \n if cpu_extension and \"CPU\" in device:\n self.ie.add_extension(cpu_extension, device)\n\n # Load the IENetwork\n log.debug('Initiating Model loading....')\n startTime = cv2.getTickCount()\n self.exec_network = self.ie.load_network(network=self.network, device_name=device,num_requests=0)\n \n log.debug('Model Loaded in %s seconds' , utils.timeLapse(startTime))\n \n # Get the input layer\n self.input_blob = next(iter(self.network.inputs))\n log.debug(\"%s\", self.input_blob)\n\n self.output_blob = next(iter(self.network.outputs))\n log.debug(\"%s\", self.output_blob)\n\n # Return the loaded inference engine\n log.debug(\"IR successfully loaded into Inference Engine.\")\n return self.exec_network \n\n def get_input_shape(self):\n\n # Return the shape of the input layer\n log.debug(\"Returning input shape\")\n return self.network.inputs[self.input_blob].shape\n\n def exec_net(self,image,request_id):\n\n # Start an asynchronous request\n startTime = cv2.getTickCount()\n self.exec_network.start_async(request_id=request_id, inputs={self.input_blob: image})\n log.debug('Async request started. Time lapse :: %s seconds' , {utils.timeLapse(startTime)})\n \n return\n\n def wait(self,request_id):\n\n # Wait for the request to be complete\n startTime = cv2.getTickCount()\n status = self.exec_network.requests[request_id].wait(-1)\n log.debug('Waiting for the reuest to complete. Time lapse :: %s seconds' , {utils.timeLapse(startTime)})\n\n return status \n\n def get_output(self,request_id):\n\n # Extract and return the output results\n log.debug(\"Returning out\")\n return self.exec_network.requests[request_id].outputs[self.output_blob] \n", "sub_path": "inference.py", "file_name": "inference.py", "file_ext": "py", "file_size_in_byte": 4212, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.debug", "line_number": 57, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 58, "usage_type": "call"}, {"api_name": "openvino.inference_engine.IECore", "line_number": 59, "usage_type": "call"}, {"api_name": "utils.getMOFiles", "line_number": 61, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 62, "usage_type": "call"}, {"api_name": "utils.isLayersSupported", "line_number": 65, "usage_type": "call"}, {"api_name": "logging.error", "line_number": 66, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.timeLapse", "line_number": 78, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.timeLapse", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 109, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 111, "usage_type": "call"}, {"api_name": "utils.timeLapse", "line_number": 111, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "579557987", "text": "\"\"\"\n 作者:shao\n 功能:集合的训练\n 版本:v3.0\n V1.0:使用元组tuple\n V2.0:使用list\n 新增功能:使用集合set\n 时间:2018-11-22\n\"\"\"\n\nfrom datetime import datetime\n\n\ndef is_leap_year(year):\n \"\"\"\n 判断年份是否为闰年\n :param year: 输入年份\n :return: True 是闰年 FALSE 不是闰年\n \"\"\"\n is_leap = False\n if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):\n is_leap = True\n return is_leap\n\n\ndef main():\n input_year_str = input('请按格式输入需要判断的年份(yyyy-mm-dd): ')\n input_year = datetime.strptime(input_year_str, '%Y-%m-%d')\n year = input_year.year\n month = input_year.month\n day = input_year.day\n # 30天月数集合,不包括2月份\n _30_in_month_set = {4, 6, 9, 11}\n # 31天月数集合\n _31_in_month_set = {1, 3, 5, 7, 8, 10, 12}\n days = day\n for i in range(1, month):\n if i in _30_in_month_set:\n days += 30\n elif i in _31_in_month_set:\n days += 31\n else:\n days += 28 # 默认假设不是闰年\n\n # 针对闰年的处理\n if is_leap_year(year) and month > 2:\n days += 1\n\n print('{}年是否为闰年:{}'.format(year, is_leap_year(year)))\n print('您输入的日期是{}年的第{}天'.format(year, days))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "jihe_tranv3.py", "file_name": "jihe_tranv3.py", "file_ext": "py", "file_size_in_byte": 1388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "581021620", "text": "import sqlite3\r\nimport datetime\r\nimport dateutil.parser\r\n\r\n\r\nEMPTY_PROBABILITY_THRESHOLD = 0.5\r\nMAX_EXTRACTION = 1000\r\n\r\n\r\nclass PersistentEstimator:\r\n def __init__(self, db_filename):\r\n self.__filename = db_filename\r\n self.__dict = dict()\r\n\r\n connection = sqlite3.connect(db_filename)\r\n cursor = connection.cursor()\r\n cursor.execute('''CREATE TABLE IF NOT EXISTS estimations\r\n (id INT PRIMARY KEY, probability REAL, requests INT, date TEXT)''')\r\n connection.commit()\r\n\r\n for row in cursor.execute(\"SELECT id,probability,requests,date FROM estimations\"):\r\n print(row)\r\n self.__dict[row[0]] = Estimation(row[1], row[2], dateutil.parser.parse(row[3]).date())\r\n connection.close()\r\n\r\n def is_probably_empty(self, bank):\r\n if bank.id not in self.__dict:\r\n return False\r\n return self.__dict[bank.id].is_probably_empty()\r\n\r\n def add(self, bank, probability):\r\n if bank.id not in self.__dict:\r\n self.__dict[bank.id] = Estimation(0)\r\n self.__dict[bank.id].add(probability)\r\n self.__update_db(bank)\r\n\r\n def __update_db(self, bank):\r\n connection = sqlite3.connect(self.__filename)\r\n cursor = connection.cursor()\r\n estimate = self.__dict[bank.id]\r\n tuple = cursor.execute(\"SELECT id FROM estimations WHERE id=?\", [bank.id]).fetchone()\r\n if tuple is None:\r\n cursor.execute(\"INSERT INTO estimations VALUES (?,?,?,?)\",\r\n [bank.id,\r\n estimate.probability,\r\n estimate.request_quantity,\r\n estimate.expiration_date.isoformat()])\r\n else:\r\n cursor.execute(\"UPDATE estimations SET probability = ?, requests = ?, date=? WHERE id = ?\",\r\n [estimate.probability,\r\n estimate.request_quantity,\r\n estimate.expiration_date.isoformat(),\r\n bank.id])\r\n connection.commit()\r\n connection.close()\r\n pass\r\n\r\n\r\nclass Estimation:\r\n def __init__(self, probability, request_quantity=0, expiration_date=None):\r\n self.probability = probability\r\n self.expiration_date = expiration_date\r\n self.request_quantity = request_quantity\r\n if expiration_date is None:\r\n self.expiration_date = self.__get_expiration_date(self.__today())\r\n\r\n @staticmethod\r\n def __get_expiration_date(dat):\r\n if dat.isoweekday() in [6, 7]:\r\n dat += datetime.timedelta(days=dat.isoweekday() % 5)\r\n dat += datetime.timedelta(days=1)\r\n return dat\r\n\r\n @staticmethod\r\n def __today():\r\n today = datetime.date.today()\r\n if datetime.datetime.now().hour < 8:\r\n today -= datetime.timedelta(days=1)\r\n return today\r\n\r\n def __probablity(self):\r\n if self.__today() >= self.expiration_date:\r\n self.probability = 0\r\n self.request_quantity = 0\r\n self.expiration_date = self.__get_expiration_date(self.__today())\r\n if self.request_quantity <= MAX_EXTRACTION:\r\n return 0\r\n return self.probability / MAX_EXTRACTION\r\n\r\n def is_probably_empty(self):\r\n return self.__probablity() > EMPTY_PROBABILITY_THRESHOLD\r\n\r\n def add(self, prob):\r\n if self.__today() >= self.expiration_date:\r\n self.probability = 0\r\n self.request_quantity = 0\r\n self.expiration_date = self.__get_expiration_date(self.__today())\r\n self.probability += prob\r\n self.request_quantity += 1\r\n", "sub_path": "estimations.py", "file_name": "estimations.py", "file_ext": "py", "file_size_in_byte": 3678, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}, {"api_name": "dateutil.parser.parser.parse", "line_number": 23, "usage_type": "call"}, {"api_name": "dateutil.parser.parser", "line_number": 23, "usage_type": "attribute"}, {"api_name": "dateutil.parser", "line_number": 23, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 76, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "491640129", "text": "import statistics\nimport sys\nimport time\nfrom queue import Queue\n\nfrom Robot import Robot\n\nimport brickpi3\n\nDESIRED_DISTANCE = 30\nKP = 5\nVC = 720\n\nrobot = Robot(\"B\", \"C\")\nrobot.setMotorLimits(70, 720)\n\ntry:\n\n readings = Queue(3)\n readings.put(sys.maxsize)\n readings.put(-sys.maxsize - 1)\n\n while True:\n try:\n # Get actual distance from the wall\n actualDistance = robot.getDistance()\n\n # Put it into the readings queue\n readings.put(actualDistance)\n\n # Take the mean\n actualDistance = statistics.median(list(readings.queue))\n\n # Set the speed accordingly\n leftVelocity = VC - (KP * (DESIRED_DISTANCE - actualDistance)) / 2\n rightVelocity = VC + (KP * (DESIRED_DISTANCE - actualDistance)) / 2\n robot.speedVelocity(leftVelocity, rightVelocity)\n\n print(\"Measured distance: \" + str(actualDistance))\n print(\"Desired distance: \" + str(DESIRED_DISTANCE))\n print(\"Left velocity: \" + str(leftVelocity))\n print(\"Right velocity: \" + str(rightVelocity))\n\n readings.get()\n\n except brickpi3.SensorError as error:\n print(error)\n\n time.sleep(0.02)\n\nexcept KeyboardInterrupt:\n robot.resetAll()", "sub_path": "old/LEGO-Wall.py", "file_name": "LEGO-Wall.py", "file_ext": "py", "file_size_in_byte": 1287, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "Robot.Robot", "line_number": 14, "usage_type": "call"}, {"api_name": "queue.Queue", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.maxsize", "line_number": 20, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 21, "usage_type": "attribute"}, {"api_name": "statistics.median", "line_number": 32, "usage_type": "call"}, {"api_name": "brickpi3.SensorError", "line_number": 46, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}]} +{"seq_id": "270754229", "text": "import socket\nimport cv2\nimport random\nimport time\nimport numpy as np\nimport argparse\nparser = argparse.ArgumentParser(description = 'use your raspberry pi to make a security camera')\nparser.add_argument('--port', nargs = '?', const = 1234, type = int, help = 'the port you want to use(default as 1234)')\nparser.add_argument('--c', nargs = '?', const = 1, type = int, help = 'the integer use to VideoCapture usually 1, sometimes 0, vary by system')\nargs = parser.parse_args()\nport = args.port\n# get raspberry pi's own ip\nip = socket.gethostbyname(socket.gethostname())\n# connect to the camera\ncap = cv2.VideoCapture(1)\nif not cap.isOpened():\n print('try changing --c to 0')\n quit(1)\nwhile True:\n try:\n #try to connect to client\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n s.bind((ip, port))\n s.listen(5)\n c,addr = s.accept()\n #making sure the client is ready\n while True:\n msg = c.recv(1024)\n if msg == b'ready':\n c.send(b'start')\n break\n while True:\n ret,img = cap.read()\n f = cv2.imencode('.png',img)[1].tostring()\n # making sure the client is on the same pace\n c.send(b'start')\n c.sendall(f)\n c.send(b'done')\n except BrokenPipeError as err:\n print('client closed')\n else:\n break\n", "sub_path": "on_raspberry_pi/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 13, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 15, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 22, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 22, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 22, "usage_type": "attribute"}, {"api_name": "socket.SOL_SOCKET", "line_number": 23, "usage_type": "attribute"}, {"api_name": "socket.SO_REUSEADDR", "line_number": 23, "usage_type": "attribute"}, {"api_name": "cv2.imencode", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "355760026", "text": "# IMPORT DISCORD.PY. ALLOWS ACCESS TO DISCORD'S API.\nimport discord\n\n# GETS THE CLIENT OBJECT FROM DISCORD.PY. CLIENT IS SYNONYMOUS WITH BOT.\nbot = discord.Client()\n\n# EVENT LISTENER FOR WHEN THE BOT HAS SWITCHED FROM OFFLINE TO ONLINE.\n@bot.event\nasync def on_ready():\n # CREATES A COUNTER TO KEEP TRACK OF HOW MANY GUILDS / SERVERS THE BOT IS CONNECTED TO.\n guild_count = 0\n\n # LOOPS THROUGH ALL THE GUILD / SERVERS THAT THE BOT IS ASSOCIATED WITH.\n for guild in bot.guilds:\n # PRINT THE SERVER'S ID AND NAME.\n print(f\"- {guild.id} (name: {guild.name})\")\n\n # INCREMENTS THE GUILD COUNTER.\n guild_count += 1\n\n # PRINTS HOW MANY GUILDS / SERVERS THE BOT IS IN.\n print(f\"SampleDiscordBot is in {guild_count} guilds.\")\n\n\n# EVENT LISTENER FOR WHEN A NEW MESSAGE IS SENT TO A CHANNEL.\n@bot.event\nasync def on_message(message):\n # CHECKS IF THE MESSAGE THAT WAS SENT IS EQUAL TO \"HELLO\".\n if message.content == \"hello\":\n # SENDS BACK A MESSAGE TO THE CHANNEL.\n await message.channel.send(\"PRAISE THE SUN!\")\n\n\n# EXECUTES THE BOT WITH THE SPECIFIED TOKEN. TOKEN HAS BEEN REMOVED AND USED JUST AS AN EXAMPLE.\nbot.run(\"NzA0OTcwNzUyNzQyNjUzOTYz.Xqk5ww.1U_-WdW4aeGWCNF7bOJkLAu_2TM\")\n", "sub_path": "sample_bot_files/sample_discord_bot_01.py", "file_name": "sample_discord_bot_01.py", "file_ext": "py", "file_size_in_byte": 1238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "discord.Client", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "472352116", "text": "#!/usr/bin/env python3\n\nimport spidev\nimport time\n\ndef read(adc_ch = 0, spi_ch = 0):\n conn = spidev.SpiDev(0, spi_ch)\n conn.max_speed_hz = 200000 # Constrain to 200kHz\n\n cmd_read = 0x80 if adc_ch == 0 else 0xA0\n\n reply_bytes = conn.xfer2([cmd_read, 0])\n\n print(reply_bytes, end=' ')\n print(256 * reply_bytes[0] + reply_bytes[1])\n\nif __name__ == '__main__':\n while True:\n try:\n read()\n time.sleep(1);\n except KeyboardInterrupt:\n exit()\n\n", "sub_path": "python/atod-3202/atod-3202.py", "file_name": "atod-3202.py", "file_ext": "py", "file_size_in_byte": 508, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "spidev.SpiDev", "line_number": 7, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "23288245", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Sep 1 00:33:39 2021\r\n\r\n@author: joseph\r\n\"\"\"\r\n#import nltk\r\n#nltk.download()\r\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\r\nfrom nltk import tokenize\r\n\r\n#read the input file\r\nf = open('input.txt','r')\r\ntext = f.read()\r\n\r\n#split the text into sentences\r\nsentences = tokenize.sent_tokenize(text)\r\n\r\npos_score_sum = 0\r\ncomp_score_sum = 0\r\nneg_score_sum = 0\r\n\r\nSIA = SentimentIntensityAnalyzer()\r\n\r\n#sum the polarity scores of each sentences\r\nfor sentence in sentences:\r\n comp_score_sum += SIA.polarity_scores(sentence)['compound']\r\n pos_score_sum += SIA.polarity_scores(sentence)['pos'] \r\n neg_score_sum += SIA.polarity_scores(sentence)['neg'] \r\n\r\nprint(\"Average overall score: \")\r\n#average the sum of scores\r\n\r\nprint(\"positive score\", pos_score_sum/len(sentences))\r\nprint(\"negetive score\", neg_score_sum/len(sentences))\r\nprint(\"compound score\", comp_score_sum/len(sentences))\r\n", "sub_path": "Sentiment Analysis.py", "file_name": "Sentiment Analysis.py", "file_ext": "py", "file_size_in_byte": 943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "nltk.tokenize.sent_tokenize", "line_number": 17, "usage_type": "call"}, {"api_name": "nltk.tokenize", "line_number": 17, "usage_type": "name"}, {"api_name": "nltk.sentiment.vader.SentimentIntensityAnalyzer", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "249763286", "text": "import torch\nimport torch.nn.functional as F\nimport GCL.augmentors as A\n\nfrom typing import Optional, Tuple\n\n\nclass L2L(torch.nn.Module):\n def __init__(self, encoder: torch.nn.Module,\n augmentor: Tuple[A.Augmentor, A.Augmentor],\n loss,\n hidden_dim: int, proj_dim: int):\n super(L2L, self).__init__()\n self.encoder = encoder\n self.augmentor = augmentor\n self.loss = loss\n\n self.fc1 = torch.nn.Linear(hidden_dim, proj_dim)\n self.fc2 = torch.nn.Linear(proj_dim, hidden_dim)\n\n self.num_hidden = hidden_dim\n\n def forward(self, x: torch.Tensor,\n edge_index: torch.Tensor, edge_weight: Optional[torch.Tensor] = None)\\\n -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n aug1, aug2 = self.augmentor\n x1, edge_index1, edge_weight1 = aug1(x, edge_index, edge_weight)\n x2, edge_index2, edge_weight2 = aug2(x, edge_index, edge_weight)\n\n z = self.encoder(x, edge_index, edge_weight)\n z1 = self.encoder(x1, edge_index1, edge_weight1)\n z2 = self.encoder(x2, edge_index2, edge_weight2)\n\n return z, z1, z2\n\n def projection(self, z: torch.Tensor) -> torch.Tensor:\n z = F.elu(self.fc1(z))\n return self.fc2(z)\n", "sub_path": "models/L2L.py", "file_name": "L2L.py", "file_ext": "py", "file_size_in_byte": 1286, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 9, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 10, "usage_type": "name"}, {"api_name": "GCL.augmentors.Augmentor", "line_number": 10, "usage_type": "attribute"}, {"api_name": "GCL.augmentors", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 24, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 25, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.elu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}]} +{"seq_id": "74570138", "text": "from datetime import datetime, timedelta\nfrom dateutil import tz\nimport logging\nimport operator\nimport os\nimport pytz\nimport sys\n\nimport praw\n\n\n# Constants\nSUBREDDIT_LIMIT = 100 # number of submissions to get from each subreddit\n\n\ndef main():\n\n logger = setup_logging('get_reddit_data')\n reddit = setup_reddit_api(logger)\n subreddits = get_subreddits(logger)\n \n now = datetime.now()\n now = now.replace(tzinfo=tz.tzlocal())\n \n # Dictionary to store score and num_comments for Reddit posts\n # Assumption: can store dictionary in memory\n submission_counts_dict = {}\n total_posts = 0\n \n # Get posts from each subreddit from the past 24 hours\n for sr in subreddits:\n \n num_submissions = 0\n submissions = reddit.subreddit(sr).new(limit=SUBREDDIT_LIMIT)\n \n for s in submissions:\n title, time, score, num_comments = process_submission(s, logger)\n \n if (now - timedelta(hours=24) < time < now):\n num_submissions += 1\n \n if title in submission_counts_dict:\n counts = submission_counts_dict[title].split(';')\n score = score + int(counts[0])\n num_comments = num_comments + int(counts[1])\n \n submission_counts_dict[title] = '{};{};{}'.format(score+num_comments, score, num_comments)\n \n else:\n submission_counts_dict[title] = '{};{};{}'.format(score+num_comments, score, num_comments)\n else:\n pass # TODO: submission order is not by time, so cannot stop processing if current submission was created > 24 hours ago\n \n logger.info(\"Got {} submissions from subreddit '{}' in the past 24 hours\".format(num_submissions, sr))\n \n logger.info('Got {} total unique submissions from all subreddits'.format(len(submission_counts_dict)))\n \n # Write submissions to file in descending order of score + num_comments\n sorted_submissions = sorted(submission_counts_dict.items(), key=lambda x: int(x[1].split(';')[0]), reverse=True)\n if (len(sorted_submissions) > 1000):\n sorted_submissions = sorted_submissions[0:1000]\n\n output_filepath = 'reddit_data.csv'\n with open(output_filepath, 'w') as outfile:\n for (k,v) in sorted_submissions:\n total, score, num_comments = v.split(';')\n outfile.write('\"{}\",{},{}\\n'.format(k, score, num_comments))\n\n\n# -----\n# Helper functions\n# -----\n\ndef setup_logging(logger_name):\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n\n h = logging.StreamHandler(sys.stdout)\n FORMAT = '%(asctime)s %(filename)s:%(funcName)s:%(lineno)d %(levelname)s: %(message)s'\n h.setFormatter(logging.Formatter(FORMAT))\n logger.addHandler(h)\n\n return logger\n\n\ndef get_api_keys(logger):\n\n # Get Twitter API key info from env vars\n client_id = os.getenv('REDDIT_CLIENT_ID')\n client_secret = os.getenv('REDDIT_CLIENT_SECRET')\n \n if (client_id is None) or (client_secret is None):\n logger.error(\"Reddit API env vars not set up correctly!\")\n sys.exit(1)\n \n return (client_id, client_secret)\n\n\ndef setup_reddit_api(logger):\n\n # Setup read-only Reddit instance to access public info\n client_id, client_secret = get_api_keys(logger)\n user_agent=\"script:test_app:v0.1 (by /u/sniphw)\"\n\n # Set up PRAW to access Reddit\n reddit = praw.Reddit(\n client_id=client_id, \n client_secret=client_secret,\n user_agent=user_agent\n )\n \n return reddit\n\n\ndef get_subreddits(logger):\n \n # TODO: refine subreddits to use\n # TODO: how to handle duplicate submissions across subreddits?\n # TODO: 'hot', 'rising', 'controversial' subreddits result in no recent submissions. 'all' subreddit results in strange submissions. Why?\n subreddits = ['news', 'technology', 'politics', 'sports', 'religion', 'business', 'entertainment']\n \n return subreddits\n\n\ndef process_submission(s, logger):\n \n title = s.title\n time = datetime.fromtimestamp(s.created_utc, pytz.utc)\n score = s.score\n num_comments = s.num_comments\n \n return title, time, score, num_comments;\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "content-ranking/get_reddit_data.py", "file_name": "get_reddit_data.py", "file_ext": "py", "file_size_in_byte": 4364, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "dateutil.tz.tzlocal", "line_number": 23, "usage_type": "call"}, {"api_name": "dateutil.tz", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 76, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 77, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 79, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 79, "usage_type": "attribute"}, {"api_name": "logging.Formatter", "line_number": 81, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 90, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 91, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 95, "usage_type": "call"}, {"api_name": "praw.Reddit", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 129, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 129, "usage_type": "name"}, {"api_name": "pytz.utc", "line_number": 129, "usage_type": "attribute"}]} +{"seq_id": "328711078", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Dance',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32)),\n ('slug', models.SlugField(max_length=32)),\n ('description', models.TextField()),\n ('history', models.TextField(blank=True)),\n ('video_url', models.URLField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Genre',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32)),\n ('slug', models.SlugField(max_length=32)),\n ('description', models.TextField(blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Image',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('caption', models.CharField(max_length=128, blank=True)),\n ('genre', models.ForeignKey(to='dance_classes.Genre')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Level',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=32)),\n ('description', models.TextField(blank=True)),\n ('syllabus', models.TextField()),\n ('dance', models.ForeignKey(related_name='levels', to='dance_classes.Dance')),\n ],\n options={\n 'verbose_name_plural': 'dance_classes',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Song',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=64)),\n ('artist', models.CharField(max_length=64, blank=True)),\n ('bpm', models.PositiveSmallIntegerField(default=0)),\n ('dance', models.ForeignKey(related_name='songs', to='dance_classes.Dance')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='dance',\n name='genre',\n field=models.ForeignKey(to='dance_classes.Genre'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='dance',\n name='schedule_cat2',\n field=models.ForeignKey(related_name='+', to='dance_classes.Genre'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "dance_classes/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 3350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 13, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.SlugField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 37, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 43, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 43, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 44, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 45, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 45, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 54, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 55, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 56, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 57, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 58, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 58, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 68, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 68, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 70, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 70, "usage_type": "name"}, {"api_name": "django.db.models.PositiveSmallIntegerField", "line_number": 71, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 71, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 76, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 78, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 87, "usage_type": "name"}]} +{"seq_id": "487870506", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nimport csv\r\nfrom time import sleep\r\nimport os\r\nimport sys\r\nversion = 1.0\r\nprint(\"This is Version \" + str(version))\r\nUserAgent=input(\"Please enter your nation name: \")\r\n#makes the header\r\nheaders = {\r\n 'User-Agent': UserAgent\r\n}\r\nUserAgent.replace(\" \",\"_\")\r\nfilename=\"Links.txt\"\r\nprint(\"Hello, \"+UserAgent)\r\nBidOrAsk=input(\"Would you like to use Bid or Ask mode?: \")\r\nBidOrAsk=BidOrAsk.lower()\r\nHowClose=input(\"How close would you like to search? (For example 0.05 will look for bids/asks 0.05 bank away from your current bid/ask): \")\r\nbigmode=input(\"Would you like live output? (This is good for players with a crazy number of cards you can press Ctrl-C to pause the program) Yes/No: \")\r\nbigmode=bigmode.lower()\r\nfinal_list=[]\r\n\t\r\n\t\t\r\n\r\n#deletes the old file\r\nif os.path.exists(filename):\r\n os.remove(filename)\r\n\r\nprint(\"Grabbing all of your cards and the shredder please wait....\")\r\n#pings the API for the nations full card list\r\nurl = \"https://www.nationstates.net/cgi-bin/api.cgi?q=cards+asksbids;nationname=\"+UserAgent\r\nresult = requests.get(url, headers=headers)\r\n#soups the data up so we can parse it\r\nsoup = BeautifulSoup(result.content, \"xml\")\r\nsleep(.6)\r\n\r\ncardID=[]\r\ncardSeason=[]\r\nuserSetPrice=[]\r\ncount=0\r\n#parses the data peeling off the ID, Season and Ask Price\r\nif BidOrAsk == \"ask\":\r\n\tfor x, y, z in zip(soup.find_all('CARDID'),soup.find_all('SEASON'),soup.find_all('ASK_PRICE')):\r\n\t\tcount=count+1\r\n\t\tcardID.append(x.text)\r\n\t\tcardSeason.append(y.text)\r\n\t\tuserSetPrice.append(z.text)\r\n#parses the data peeling off the ID, Season and Bid Price\r\nif BidOrAsk == \"bid\":\r\n\tfor x, y, z in zip(soup.find_all('CARDID'),soup.find_all('SEASON'),soup.find_all('BID_PRICE')):\r\n\t\tcount=count+1\r\n\t\tcardID.append(x.text)\r\n\t\tcardSeason.append(y.text)\r\n\t\tuserSetPrice.append(z.text)\r\nindex=0\r\nprint(\"It will take approxamitly \"+ str(count)+\" secounds to shred all of your cards please wait.\")\r\nfor card in cardID:\r\n\t#print(card + cardSeason[index]+\"Step1\")\r\n\t#pings the API for each card.\r\n\turl = \"https://www.nationstates.net/cgi-bin/api.cgi?q=card+markets;cardid=\"+card+\";season=\"+cardSeason[index]\r\n\tresult = requests.get(url, headers=headers)\r\n\tsoup = BeautifulSoup(result.content, \"xml\")\r\n\t#print(ID+SEASON)\r\n\tsleep(.6)\r\n\tfor a, b in zip(soup.find_all('PRICE'),soup.find_all('TYPE')):\r\n\t\t#print(\"got here\")\r\n\t\tif b.text == \"bid\" and BidOrAsk == \"ask\" and float(a.text) > float(userSetPrice[index])-float(HowClose):\r\n\t\t\t#print(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index])\r\n\t\t\tfinal_list.append(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index]+\"\\n\")\r\n\t\t\tif bigmode == \"yes\":\r\n\t\t\t\tprint(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index])\r\n\t\tif b.text == \"ask\" and BidOrAsk == \"bid\" and float(a.text) < float(userSetPrice[index])+float(HowClose):\r\n\t\t\t#print(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index])\r\n\t\t\tfinal_list.append(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index]+\"\\n\")\r\n\t\t\tif bigmode == \"yes\":\r\n\t\t\t\tprint(\"https://www.nationstates.net/page=deck/card=\"+cardID[index]+\"/season=\"+cardSeason[index])\r\n\t#sleeper(card,cardSeason[index])\r\n\tindex=index+1\r\nif bigmode == \"yes\":\r\n\tprint(\"***********************************************************************************************************************\")\r\n\tprint(\"The final list:\")\r\nfinal_list = list(dict.fromkeys(final_list))\r\n\r\nwith open(filename, 'a+') as f:\r\n\t\tf.writelines(final_list)\r\nfor themall in final_list:\r\n\tprint(themall)\r\n\r\nprint(\"Have a nice day \"+UserAgent)\r\n", "sub_path": "The_Price_is_Right.py", "file_name": "The_Price_is_Right.py", "file_ext": "py", "file_size_in_byte": 3697, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 28, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 62, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 63, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "340551895", "text": "import numpy as np\nimport pandas as pd\nimport scipy.sparse as sp\nimport random\nimport csv\nimport matplotlib.pyplot as plt\nimport pickle\nimport math\nimport time\nstart = time.time()\nmode = 'movielens'\n#mode = 'et'\nmode1 = 'impli'\n#mode1 = 'expli'\np1=[]\np2=[]\np3=[]\np4=[]\n\naverage_diff = []\n\ndef sigmoid(x):\n return np.exp(-x) / (1 + np.exp(-x))\ndef predict_a(W,H,user,item):\n if(user==-1 and item==-1):\n ans = np.zeros((len(W),len(H)))\n for i in range(len(W)):\n for j in range(len(H)):\n temp = np.dot(W[int(i),:],H[int(j),:].T)\n if(mode1=='impli'):\n if temp >= 0:\n temp = 1.0/(1+np.exp(-temp))\n else:\n temp = np.exp(temp)/(1+np.exp(temp))\n ans[i][j] = temp\n elif(item==-1):\n ans = np.zeros((len(H)))\n ans = np.dot(W[int(user),:],H.T)\n ans = np.where(ans>=0,1.0/(1+np.exp(-ans)),np.exp(ans)/(1+np.exp(ans)))\n else:\n ans = np.dot(W[int(user),:],H[int(item),:].T)\n if(mode1=='impli'):\n if ans >= 0:\n ans = 1.0/(1+np.exp(-ans))\n else:\n ans = np.exp(ans)/(1+np.exp(ans))\n return ans\nclass BPR(object):\n def __init__(self,test,train,trainMatrix,us,it,factor,W,H,l,r,Iter=100):\n self.test = test\n self.train = train\n self.trainMatrix = trainMatrix\n self.us = us\n self.it = it\n self.factor = factor\n self.W = W\n self.H = H\n self.l = l\n self.r = r\n self.Iter = Iter\n def training(self):\n N = len(self.train)\n epochs = self.trainMatrix.nnz\n for epo in range(self.Iter):\n for n in range(epochs):\n u = np.random.randint(self.us)\n itemList = self.trainMatrix.getrowview(u).rows[0]\n if len(itemList) == 0:\n continue\n i = np.random.choice(itemList)\n self.update(u,i)\n if epo %1 ==0:\n print(\"epoch : \",epo)\n ans ,ans1= self.cal_in()\n print(\"a_error(in)\",ans,ans1,\"a_error(out)\",cal_out(self.test,self.W,self.H))\n print(\"finish\")\n def update(self,u,i):\n j = np.random.randint(self.it)\n while self.trainMatrix[u, j] != 0:\n j = np.random.randint(self.it)\n x_pos = self.predict(u,i)\n x_neg = self.predict(u,j)\n xij = -sigmoid(x_pos - x_neg)\n grad_u = self.H[i,] - self.H[j,]\n if(x_pos>0.95 or x_pos<0.05):\n if(np.dot(self.W[u,],self.l * (xij * grad_u + self.W[u,] * self.r)) < 0):\n return\n self.W[u,] -= self.l * (xij * grad_u + self.W[u,] * self.r)\n grad = self.W[u,]\n self.H[i,] -= self.l * (xij * grad + self.H[i,] * self.r)\n self.H[j,] -= self.l * (-xij * grad + self.H[j,] * self.r)\n def predict(self,u,i):\n ans = np.dot(self.W[int(u),:],self.H[int(i),:].T)\n if(mode1=='impli'):\n if ans >= 0:\n ans = 1.0/(1+np.exp(-ans))\n else:\n ans = np.exp(ans)/(1+np.exp(ans))\n return ans\n def cal_in(self):\n error = 0\n mse = 0\n for i in self.train:\n u = int(i[0])\n item = int(i[1])\n y_hat = self.predict(u,item)\n #print(y_hat,i[2])\n error += abs(i[2] - y_hat)\n mse += (i[2]-y_hat)**2\n return error/len(train) , mse/len(train)\n def add_new_rate(self,rating):\n self.trainMatrix[rating[0],rating[1]] = 1\n temp = list(self.train)\n temp.append(rating)\n self.train=np.array(temp)\n itemList = self.trainMatrix.getrowview(int(rating[0])).rows[0]\n for i in range(self.Iter):\n random.shuffle(itemList)\n for j in range(len(itemList)):\n k = itemList[j]\n self.update(int(rating[0]), int(k))\n \n \n\n\ndef cal_out(test,W,H):\n temp = 0\n for i in test:\n utest = int(i[0])\n itest = int(i[1])\n y_hat = predict_a(W,H,utest,itest)\n temp += (i[2] - y_hat)**2\n return temp/len(test)\n\ndef predict_top100(W,H,test):\n ans = []\n for i in test:\n temp = np.argsort(-predict_a(W,H,i[0],-1))[:100].tolist()\n temp.insert(0,i[3])\n temp.insert(0,i[0])\n ans.append(temp)\n return ans\n\nif(mode=='movielens'):\n train = pd.read_csv('train.csv',header=None)\n test = pd.read_csv('test.csv',header=None)\n train[:][2] = np.ones((len(train)))\n test[:][2] = np.ones((len(test)))\n train = np.array(train)\n test = np.array(test)\n c = np.r_[train,test]\n us = int(np.max(c[:, 0])+1)\n it = int(np.max(c[:, 1])+1)\n print(\"Using movielens\")\n\ntrainMatrix = sp.lil_matrix((us, it))\nfor i in train:\n\ttrainMatrix[i[0],i[1]] = 1\n\nnp.random.seed(7)\nf = 10\nl = 0.01\nr = 0.01\nIter = 100\nW = np.random.rand(us, f,)\nH = np.random.rand(it, f,)\n\nend = time.time()\nprint(\"Start Training : \",end-start)\nstart = end\nModel = BPR(test,train,trainMatrix,us,it,f,W,H,l,r,Iter)\nModel.training()\nend = time.time()\nprint(\"Training cost : \",end-start)\nstart = end\nout1=[]\nfor i in range(len(test)):\n if i %1000 ==0:\n print(\"epoch : \",i)\n ans = cal_out(test,Model.W,Model.H)\n print(\"a_error(out)\",ans)\n temp = np.argsort(-predict_a(Model.W,Model.H,test[i][0],-1))[:100].tolist()\n ans = np.where(temp==test[1])\n if(len(ans[0])>0):\n out1.append(ans[0][0])\n else:\n out1.append(0)\n Model.add_new_rate(test[i])\nend = time.time()\nprint(\"Updating cost : \",end-start)\nprint(\"After update\",cal_out(test,Model.W,Model.H))\n\n\nwith open('2.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerows(out1)\n\n\n\n\n", "sub_path": "src/BPR/BPR2.py", "file_name": "BPR2.py", "file_ext": "py", "file_size_in_byte": 5799, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 80, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 138, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 145, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 150, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 151, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 153, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.random.seed", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 160, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 166, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 182, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 183, "usage_type": "call"}, {"api_name": "time.time", "line_number": 189, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "262845367", "text": "from time import sleep\nfrom selenium import webdriver\nfrom pymongo import MongoClient\n\nconn = MongoClient(\"114.242.177.193\", 27017)\nbnu = conn.get_database(\"bnu_project\")\nbnu.authenticate(\"zjx\", \"ZhuJiaxing2018\")\ntaobao = bnu.get_collection('taobao')\n\ndriver = webdriver.Chrome()\ndriver.get(\"https://login.taobao.com/member/login.jhtml\")\n\nsleep(3)\ndriver.find_element_by_xpath('//*[@class=\"forget-pwd J_Quick2Static\"]').click()\nsleep(2)\ndriver.find_element_by_xpath('//*[@class=\"weibo-login\"]').click()\nsleep(3)\ndriver.find_element_by_name('username').send_keys('18810565250')\nsleep(2)\ndriver.find_element_by_name('password').send_keys('touch2014')\nsleep(4)\ndriver.find_element_by_xpath('//*[@class=\"btn_tip\"]/a/span').click()\n\nproducts = taobao.find({\"sale_num\": \"-\"})\n\ncount = 0\ntotal = taobao.find({\"sale_num\": \"-\"}).count()\nprint(total)\n\nfor item in products:\n count += 1\n try:\n driver.get(item[\"url\"])\n item[\"detail_html\"] = driver.find_element_by_id(\"detail\").get_attribute('innerHTML')\n except:\n continue\n item[\"status\"] = \"ready\"\n taobao.save(item)\n print(str(total - count) + \" left\")\n sleep(3)\n", "sub_path": "taobao.py", "file_name": "taobao.py", "file_ext": "py", "file_size_in_byte": 1147, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pymongo.MongoClient", "line_number": 5, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 10, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 10, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 13, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 17, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "471740210", "text": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport contextlib\nimport os\nimport tempfile\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nimport ray\nimport torch\n\nfrom ludwig.api import LudwigModel\nfrom ludwig.backend import create_ray_backend, LOCAL_BACKEND\nfrom ludwig.backend.ray import get_trainer_kwargs, RayBackend\nfrom ludwig.constants import BALANCE_PERCENTAGE_TOLERANCE, NAME, TRAINER\nfrom ludwig.data.dataframe.dask import DaskEngine\nfrom ludwig.data.preprocessing import balance_data\nfrom ludwig.utils.data_utils import read_parquet\nfrom tests.integration_tests.utils import (\n audio_feature,\n bag_feature,\n binary_feature,\n category_feature,\n create_data_set_to_use,\n date_feature,\n generate_data,\n h3_feature,\n image_feature,\n number_feature,\n sequence_feature,\n set_feature,\n spawn,\n text_feature,\n timeseries_feature,\n train_with_backend,\n vector_feature,\n)\n\nRAY_BACKEND_CONFIG = {\n \"type\": \"ray\",\n \"processor\": {\n \"parallelism\": 2,\n },\n \"trainer\": {\n \"use_gpu\": False,\n \"num_workers\": 2,\n \"resources_per_worker\": {\n \"CPU\": 0.1,\n \"GPU\": 0,\n },\n },\n}\n\n\n@contextlib.contextmanager\ndef ray_start(num_cpus=2, num_gpus=None):\n res = ray.init(\n num_cpus=num_cpus,\n num_gpus=num_gpus,\n include_dashboard=False,\n object_store_memory=150 * 1024 * 1024,\n )\n try:\n yield res\n finally:\n ray.shutdown()\n\n\ndef run_api_experiment(config, data_parquet, backend_config):\n # Sanity check that we get 4 slots over 1 host\n kwargs = get_trainer_kwargs()\n if torch.cuda.device_count() > 0:\n assert kwargs.get(\"num_workers\") == torch.cuda.device_count(), kwargs\n assert kwargs.get(\"use_gpu\"), kwargs\n else:\n assert kwargs.get(\"num_workers\") == 1, kwargs\n assert not kwargs.get(\"use_gpu\"), kwargs\n\n # Train on Parquet\n model = train_with_backend(backend_config, config, dataset=data_parquet, evaluate=True, predict=False)\n\n assert isinstance(model.backend, RayBackend)\n if isinstance(model.backend.df_engine, DaskEngine):\n assert model.backend.df_engine.parallelism == backend_config[\"processor\"][\"parallelism\"]\n\n\ndef run_split_api_experiment(config, data_parquet, backend_config):\n train_fname, val_fname, test_fname = split(data_parquet)\n\n # Train\n train_with_backend(backend_config, config, training_set=train_fname, evaluate=False, predict=True)\n\n # Train + Validation\n train_with_backend(\n backend_config, config, training_set=train_fname, validation_set=val_fname, evaluate=False, predict=False\n )\n\n # Train + Validation + Test\n train_with_backend(\n backend_config,\n config,\n training_set=train_fname,\n validation_set=val_fname,\n test_set=test_fname,\n evaluate=False,\n predict=False,\n )\n\n\ndef split(data_parquet):\n data_df = read_parquet(data_parquet, LOCAL_BACKEND.df_engine.df_lib)\n train_df = data_df.sample(frac=0.8)\n test_df = data_df.drop(train_df.index).sample(frac=0.5)\n validation_df = data_df.drop(train_df.index).drop(test_df.index)\n\n basename, ext = os.path.splitext(data_parquet)\n train_fname = basename + \".train\" + ext\n val_fname = basename + \".validation\" + ext\n test_fname = basename + \".test\" + ext\n\n train_df.to_parquet(train_fname)\n validation_df.to_parquet(val_fname)\n test_df.to_parquet(test_fname)\n return train_fname, val_fname, test_fname\n\n\n@spawn\ndef run_test_parquet(\n input_features,\n output_features,\n num_examples=100,\n run_fn=run_api_experiment,\n expect_error=False,\n num_cpus=2,\n num_gpus=None,\n df_engine=None,\n):\n with ray_start(num_cpus=num_cpus, num_gpus=num_gpus):\n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, \"batch_size\": 8},\n }\n\n backend_config = {**RAY_BACKEND_CONFIG}\n if df_engine:\n backend_config[\"processor\"][\"type\"] = df_engine\n\n with tempfile.TemporaryDirectory() as tmpdir:\n csv_filename = os.path.join(tmpdir, \"dataset.csv\")\n dataset_csv = generate_data(input_features, output_features, csv_filename, num_examples=num_examples)\n dataset_parquet = create_data_set_to_use(\"parquet\", dataset_csv)\n\n if expect_error:\n with pytest.raises(ValueError):\n run_fn(config, data_parquet=dataset_parquet, backend_config=backend_config)\n else:\n run_fn(config, data_parquet=dataset_parquet, backend_config=backend_config)\n\n\n@pytest.mark.parametrize(\"df_engine\", [\"dask\", \"modin\"])\n@pytest.mark.distributed\ndef test_ray_tabular(df_engine):\n input_features = [\n sequence_feature(reduce_output=\"sum\"),\n category_feature(vocab_size=2, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n set_feature(),\n binary_feature(),\n bag_feature(),\n vector_feature(),\n h3_feature(),\n date_feature(),\n ]\n output_features = [\n binary_feature(bool2str=[\"No\", \"Yes\"]),\n binary_feature(),\n number_feature(normalization=\"zscore\"),\n ]\n run_test_parquet(input_features, output_features, df_engine=df_engine)\n\n\n@pytest.mark.skip(reason=\"TODO torch\")\n@pytest.mark.distributed\ndef test_ray_text():\n input_features = [\n text_feature(),\n ]\n output_features = [\n text_feature(reduce_input=None, decoder=\"tagger\"),\n ]\n run_test_parquet(input_features, output_features)\n\n\n@pytest.mark.skip(reason=\"TODO torch\")\n@pytest.mark.distributed\ndef test_ray_sequence():\n input_features = [sequence_feature(max_len=10, encoder=\"rnn\", cell_type=\"lstm\", reduce_output=None)]\n output_features = [sequence_feature(max_len=10, decoder=\"tagger\", attention=False, reduce_input=None)]\n run_test_parquet(input_features, output_features)\n\n\n@pytest.mark.distributed\ndef test_ray_audio():\n with tempfile.TemporaryDirectory() as tmpdir:\n audio_dest_folder = os.path.join(tmpdir, \"generated_audio\")\n input_features = [audio_feature(folder=audio_dest_folder)]\n output_features = [binary_feature()]\n run_test_parquet(input_features, output_features)\n\n\n@pytest.mark.distributed\ndef test_ray_image():\n with tempfile.TemporaryDirectory() as tmpdir:\n image_dest_folder = os.path.join(tmpdir, \"generated_images\")\n input_features = [\n image_feature(\n folder=image_dest_folder,\n encoder=\"resnet\",\n preprocessing={\"in_memory\": True, \"height\": 12, \"width\": 12, \"num_channels\": 3, \"num_processes\": 5},\n output_size=16,\n num_filters=8,\n ),\n ]\n output_features = [binary_feature()]\n run_test_parquet(input_features, output_features)\n\n\n@pytest.mark.skip(reason=\"flaky: ray is running out of resources\")\n@pytest.mark.distributed\ndef test_ray_split():\n input_features = [\n number_feature(normalization=\"zscore\"),\n set_feature(),\n binary_feature(),\n ]\n output_features = [category_feature(vocab_size=2, reduce_input=\"sum\")]\n run_test_parquet(\n input_features,\n output_features,\n run_fn=run_split_api_experiment,\n num_cpus=4,\n )\n\n\n@pytest.mark.distributed\ndef test_ray_timeseries():\n input_features = [timeseries_feature()]\n output_features = [number_feature()]\n run_test_parquet(input_features, output_features)\n\n\n@pytest.mark.distributed\ndef test_ray_lazy_load_audio_error():\n with tempfile.TemporaryDirectory() as tmpdir:\n audio_dest_folder = os.path.join(tmpdir, \"generated_audio\")\n input_features = [\n audio_feature(\n folder=audio_dest_folder,\n preprocessing={\n \"in_memory\": False,\n },\n )\n ]\n output_features = [binary_feature()]\n run_test_parquet(input_features, output_features, expect_error=True)\n\n\n@pytest.mark.distributed\ndef test_ray_lazy_load_image_error():\n with tempfile.TemporaryDirectory() as tmpdir:\n image_dest_folder = os.path.join(tmpdir, \"generated_images\")\n input_features = [\n image_feature(\n folder=image_dest_folder,\n encoder=\"resnet\",\n preprocessing={\"in_memory\": False, \"height\": 12, \"width\": 12, \"num_channels\": 3, \"num_processes\": 5},\n output_size=16,\n num_filters=8,\n ),\n ]\n output_features = [binary_feature()]\n run_test_parquet(input_features, output_features, expect_error=True)\n\n\n@pytest.mark.skipif(torch.cuda.device_count() == 0, reason=\"test requires at least 1 gpu\")\n@pytest.mark.skipIf(not torch.cuda.is_available(), reason=\"test requires gpu support\")\n@pytest.mark.distributed\ndef test_train_gpu_load_cpu():\n input_features = [\n category_feature(vocab_size=2, reduce_input=\"sum\"),\n number_feature(normalization=\"zscore\"),\n ]\n output_features = [\n binary_feature(),\n ]\n run_test_parquet(input_features, output_features, run_fn=_run_train_gpu_load_cpu, num_gpus=1)\n\n\n@pytest.mark.distributed\n@pytest.mark.parametrize(\n \"method, balance\",\n [\n (\"oversample_minority\", 0.25),\n (\"oversample_minority\", 0.5),\n (\"oversample_minority\", 0.75),\n (\"undersample_majority\", 0.25),\n (\"undersample_majority\", 0.5),\n (\"undersample_majority\", 0.75),\n ],\n)\ndef test_balance_ray(method, balance):\n config = {\n \"input_features\": [\n {\"name\": \"Index\", \"proc_column\": \"Index\", \"type\": \"number\"},\n {\"name\": \"random_1\", \"proc_column\": \"random_1\", \"type\": \"number\"},\n {\"name\": \"random_2\", \"proc_column\": \"random_2\", \"type\": \"number\"},\n ],\n \"output_features\": [{\"name\": \"Label\", \"proc_column\": \"Label\", \"type\": \"binary\"}],\n \"preprocessing\": {\"oversample_minority\": None, \"undersample_majority\": None},\n }\n input_df = pd.DataFrame(\n {\n \"Index\": np.arange(0, 200, 1),\n \"random_1\": np.random.randint(0, 50, 200),\n \"random_2\": np.random.choice([\"Type A\", \"Type B\", \"Type C\", \"Type D\"], 200),\n \"Label\": np.concatenate((np.zeros(180), np.ones(20))),\n \"split\": np.zeros(200),\n }\n )\n config[\"preprocessing\"][method] = balance\n target = config[\"output_features\"][0][NAME]\n\n with ray_start(num_cpus=2, num_gpus=None):\n backend = create_ray_backend()\n input_df = backend.df_engine.from_pandas(input_df)\n test_df = balance_data(input_df, config[\"output_features\"], config[\"preprocessing\"], backend)\n\n majority_class = test_df[target].value_counts().compute()[test_df[target].value_counts().compute().idxmax()]\n minority_class = test_df[target].value_counts().compute()[test_df[target].value_counts().compute().idxmin()]\n new_class_balance = round(minority_class / majority_class, 2)\n\n assert abs(balance - new_class_balance) < BALANCE_PERCENTAGE_TOLERANCE\n\n\ndef _run_train_gpu_load_cpu(config, data_parquet):\n with tempfile.TemporaryDirectory() as output_dir:\n model_dir = ray.get(train_gpu.remote(config, data_parquet, output_dir))\n ray.get(predict_cpu.remote(model_dir, data_parquet))\n\n\n@ray.remote(num_cpus=1, num_gpus=1)\ndef train_gpu(config, dataset, output_directory):\n model = LudwigModel(config, backend=\"local\")\n _, _, output_dir = model.train(dataset, output_directory=output_directory)\n return os.path.join(output_dir, \"model\")\n\n\n@ray.remote(num_cpus=1, num_gpus=0)\ndef predict_cpu(model_dir, dataset):\n model = LudwigModel.load(model_dir, backend=\"local\")\n model.predict(dataset)\n", "sub_path": "tests/integration_tests/test_ray.py", "file_name": "test_ray.py", "file_ext": "py", "file_size_in_byte": 12536, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "ray.init", "line_number": 70, "usage_type": "call"}, {"api_name": "ray.shutdown", "line_number": 79, "usage_type": "call"}, {"api_name": "contextlib.contextmanager", "line_number": 68, "usage_type": "attribute"}, {"api_name": "ludwig.backend.ray.get_trainer_kwargs", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 86, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.train_with_backend", "line_number": 93, "usage_type": "call"}, {"api_name": "ludwig.backend.ray.RayBackend", "line_number": 95, "usage_type": "argument"}, {"api_name": "ludwig.data.dataframe.dask.DaskEngine", "line_number": 96, "usage_type": "argument"}, {"api_name": "tests.integration_tests.utils.train_with_backend", "line_number": 104, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.train_with_backend", "line_number": 107, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.train_with_backend", "line_number": 112, "usage_type": "call"}, {"api_name": "ludwig.utils.data_utils.read_parquet", "line_number": 124, "usage_type": "call"}, {"api_name": "ludwig.backend.LOCAL_BACKEND.df_engine", "line_number": 124, "usage_type": "attribute"}, {"api_name": "ludwig.backend.LOCAL_BACKEND", "line_number": 124, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "ludwig.constants.TRAINER", "line_number": 156, "usage_type": "name"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 163, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.generate_data", "line_number": 165, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.create_data_set_to_use", "line_number": 166, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 169, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.spawn", "line_number": 140, "usage_type": "name"}, {"api_name": "tests.integration_tests.utils.sequence_feature", "line_number": 179, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.category_feature", "line_number": 180, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.number_feature", "line_number": 181, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.set_feature", "line_number": 182, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 183, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.bag_feature", "line_number": 184, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.vector_feature", "line_number": 185, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.h3_feature", "line_number": 186, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.date_feature", "line_number": 187, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 190, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 191, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.number_feature", "line_number": 192, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 175, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 176, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.text_feature", "line_number": 201, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.text_feature", "line_number": 204, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 197, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 197, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 198, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.sequence_feature", "line_number": 212, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.sequence_feature", "line_number": 213, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 209, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 209, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.audio_feature", "line_number": 221, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 222, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 217, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 228, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 229, "usage_type": "call"}, {"api_name": "os.path", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.image_feature", "line_number": 231, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 239, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 226, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.number_feature", "line_number": 247, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.set_feature", "line_number": 248, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 249, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.category_feature", "line_number": 251, "usage_type": "call"}, {"api_name": "pytest.mark.skip", "line_number": 243, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 243, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 244, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.timeseries_feature", "line_number": 262, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.number_feature", "line_number": 263, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 260, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 269, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path", "line_number": 270, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.audio_feature", "line_number": 272, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 279, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 267, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 285, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 286, "usage_type": "call"}, {"api_name": "os.path", "line_number": 286, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.image_feature", "line_number": 288, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 296, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 283, "usage_type": "attribute"}, {"api_name": "tests.integration_tests.utils.category_feature", "line_number": 305, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.number_feature", "line_number": 306, "usage_type": "call"}, {"api_name": "tests.integration_tests.utils.binary_feature", "line_number": 309, "usage_type": "call"}, {"api_name": "pytest.mark.skipif", "line_number": 300, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 300, "usage_type": "attribute"}, {"api_name": "torch.cuda.device_count", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 300, "usage_type": "attribute"}, {"api_name": "pytest.mark.skipIf", "line_number": 301, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 301, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 301, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 302, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 336, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 338, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 339, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 339, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 340, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 342, "usage_type": "call"}, {"api_name": "ludwig.constants.NAME", "line_number": 346, "usage_type": "name"}, {"api_name": "ludwig.backend.create_ray_backend", "line_number": 349, "usage_type": "call"}, {"api_name": "ludwig.data.preprocessing.balance_data", "line_number": 351, "usage_type": "call"}, {"api_name": "ludwig.constants.BALANCE_PERCENTAGE_TOLERANCE", "line_number": 357, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 314, "usage_type": "attribute"}, {"api_name": "pytest.mark.parametrize", "line_number": 315, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 315, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 361, "usage_type": "call"}, {"api_name": "ray.get", "line_number": 362, "usage_type": "call"}, {"api_name": "ray.get", "line_number": 363, "usage_type": "call"}, {"api_name": "ludwig.api.LudwigModel", "line_number": 368, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 370, "usage_type": "call"}, {"api_name": "os.path", "line_number": 370, "usage_type": "attribute"}, {"api_name": "ray.remote", "line_number": 366, "usage_type": "call"}, {"api_name": "ludwig.api.LudwigModel.load", "line_number": 375, "usage_type": "call"}, {"api_name": "ludwig.api.LudwigModel", "line_number": 375, "usage_type": "name"}, {"api_name": "ray.remote", "line_number": 373, "usage_type": "call"}]} +{"seq_id": "249146833", "text": "# ------------------------------------------------------------------------------\n#\n# News - View\n#\n# ------------------------------------------------------------------------------\n\n# ------------------------------------------------------------------------------\n# Modules\n# ------------------------------------------------------------------------------\n\nfrom django.http import Http404\n\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render_to_response\n\nfrom django.template import RequestContext\n\nfrom django.core.paginator import Paginator\nfrom django.core.paginator import PageNotAnInteger\nfrom django.core.paginator import EmptyPage\n\nfrom models import Categorie\nfrom models import New\n\n# ------------------------------------------------------------------------------\n# Functions\n# ------------------------------------------------------------------------------\n\ndef index(request):\n \"\"\"\n Show main page with latest new\n\n :param WSGIRequest request: HTML request data\n \"\"\"\n\n new = New.objects.order_by(\"-date\")\n\n if len(new) == 0:\n raise Http404(\"No new in database\")\n\n return render_to_response(\"news/index.html\", {\n \"new\": new[0] },\n context_instance=RequestContext(request))\n\n\ndef list(request, order=None):\n \"\"\"\n List all the news ordered by date\n\n :param WSGIRequest request: HTML request data\n :param str order: News order\n \"\"\"\n\n if order is None:\n order = \"-date\"\n\n newslist = New.objects.order_by(order)\n\n p = Paginator(newslist, 10)\n\n page = request.GET.get(\"page\")\n\n try:\n nlist = p.page(page)\n\n except PageNotAnInteger:\n nlist = p.page(1)\n\n except EmptyPage:\n nlist = p.page(p.num_pages)\n\n return render_to_response(\"news/list.html\", {\n \"list\": nlist,\n \"order\": order },\n context_instance=RequestContext(request))\n\n\ndef news(request, news_id):\n \"\"\"\n Show a specific new\n\n :param WSGIRequest request: HTML request data\n :param int news_id: New identifier\n \"\"\"\n\n return render_to_response(\"news/single.html\", {\n \"new\": get_object_or_404(New, pk=news_id) },\n context_instance=RequestContext(request))\n", "sub_path": "pages/news/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "models.New.objects.order_by", "line_number": 36, "usage_type": "call"}, {"api_name": "models.New.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.New", "line_number": 36, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 41, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 43, "usage_type": "call"}, {"api_name": "models.New.objects.order_by", "line_number": 57, "usage_type": "call"}, {"api_name": "models.New.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.New", "line_number": 57, "usage_type": "name"}, {"api_name": "django.core.paginator.Paginator", "line_number": 59, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 66, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 69, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 72, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 86, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 87, "usage_type": "call"}, {"api_name": "models.New", "line_number": 87, "usage_type": "argument"}, {"api_name": "django.template.RequestContext", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "186065646", "text": "# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.3.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # print feature importance for each iteration\n\n# +\nimport xgboost as xgb\nimport pprint\n\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\n\npp = pprint.PrettyPrinter()\n\ndef fmap(trees):\n fmap = {}\n for tree in trees:\n for line in tree.split('\\n'):\n # look for the opening square bracket\n arr = line.split('[')\n # if no opening bracket (leaf node), ignore this line\n if len(arr) == 1:\n continue\n\n # extract feature name from string between []\n fid = arr[1].split(']')[0].split('<')[0]\n\n if fid not in fmap:\n # if the feature hasn't been seen yet\n fmap[fid] = 1\n else:\n fmap[fid] += 1\n return fmap\n\ndef MyCallback():\n def callback(env):\n pass\n trees = env.model.get_dump(with_stats=True)\n feature_weight = fmap(trees)\n pp.pprint(trees)\n print(feature_weight)\n print(env.model.get_score(importance_type='gain'))\n return callback\n\nX, y = load_boston(return_X_y=True)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)\ndtrain = xgb.DMatrix(X_train, label=y_train)\ndtest = xgb.DMatrix(X_test, label=y_test)\n\nparams = {'objective':'reg:squarederror', 'eval_metric': 'rmse'}\n\n# -\n\n# # f_target_weight\n\n# +\nv1=0.5\nv2=0.3\nv3=0.2\n\nf_target_weight = [v1/2,v1/2,\n v2/3,v2/3,v2/3,\n v3/5,v3/5,v3/5,v3/5,v3/5,\n ]\nsum(f_target_weight)\n# -\n\n\n\nbst = xgb.train(params, dtrain, num_boost_round=2, evals=[(dtrain, 'train'), (dtest, 'test')],\n callbacks=[MyCallback()])\n\nfig, ax = plt.subplots(figsize=(30, 30))\nxgb.plot_tree(bst,ax=ax)\n\nbst.get_score(importance_type='gain')\n\nbst.get_fscore()\n\n\n", "sub_path": "Mv_gbm/xgboost-auto_weight_col_5960/xgboost-auto_weight_col_5960/feature_importance_for_each_iteration.py", "file_name": "feature_importance_for_each_iteration.py", "file_ext": "py", "file_size_in_byte": 2158, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pprint.PrettyPrinter", "line_number": 26, "usage_type": "call"}, {"api_name": "sklearn.datasets.load_boston", "line_number": 58, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 59, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 60, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 61, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "xgboost.plot_tree", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "102860983", "text": "import logging\nimport argparse\nimport sys\nimport os\nimport time\nimport glob\nfrom datetime import datetime\n\nimport utilities\nfrom shell import cmd\n\n'''\nFunctions to download, backup, restore, and overwrite site databases\n'''\n\n# download an sql dump into Jenkins workspace directory\ndef download(site_variables):\n logging.debug(' --> sqlTools.py : download({})'.format(site_variables))\n\n # configure time stamp for backup file name\n date = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n # constuct backup file name\n dumpName = '{}.{}.sql'.format(site_variables['database'], date)\n\n # build shell command\n command = 'mysqldump -h {} -u{} -p{} --add-drop-database --databases {} > {}/{}'.format(site_variables['server'],\n site_variables['username'],\n site_variables['password'],\n site_variables['database'],\n os.getcwd(),\n dumpName)\n\n # get sql dump\n logging.info('\\n')\n logging.info(' --> Downloading database...')\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info('\\n')\n logging.info(\"Failed sqldump: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info('\\n')\n logging.info(\"Failed\\nReason: other\")\n return False\n else:\n logging.info('\\n')\n logging.info(' --> Database downloaded to Jenkins workspace')\n return True\n\n\n\n# get an sqldump from a sites database server\ndef backup(site_variables):\n logging.debug(' --> sqlTools.py : backup(site_variables)')\n\n # get backup directory\n backup_directory = utilities.get_backup_directory(site_variables['name'])\n\n\n # configure time stamp for backup file name\n date = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n # constuct backup file name\n dumpName = '{}.{}.sql'.format(site_variables['database'], date)\n\n # build shell command\n command = 'mysqldump -h {} -u{} -p{} --add-drop-database --databases {} > {}/{}'.format(site_variables['server'],\n site_variables['username'],\n site_variables['password'],\n site_variables['database'],\n backup_directory,\n dumpName)\n\n # get sql dump\n logging.info('\\n')\n logging.info(' --> Backing up database for {}...'.format(site_variables['name']))\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info('\\n')\n logging.info(\"Failed sqldump: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info('\\n')\n logging.info(\"Failed\\nReason: other\")\n return False\n else:\n logging.info('\\n')\n logging.info(' --> Database backup created at: {}/{}'.format(backup_directory, dumpName))\n return True\n\n# resore a database with the backup path argument as a source\ndef restore(site_variables, backup_path):\n logging.debug(' --> sqlTools.py : restore({},{})'.format(site_variables, backup_path))\n\n\n # construct sql command\n command = 'mysql -h {} -u{} -p{} {} < {}'.format(site_variables['server'],\n site_variables['username'],\n site_variables['password'],\n site_variables['database'],\n backup_path)\n # restore the database\n logging.info('\\n')\n logging.info(' --> Restoring database from {}'.format(backup_path))\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info('\\n')\n logging.info(\"Failed sql restore: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info('\\n')\n logging.info(\"Failed\\nReason: other\")\n return False\n else:\n logging.info('\\n')\n logging.info(' --> Database restored')\n\n # use drush to update the overwritten database\n if not utilities.drush_updb(site_variables):\n logging.info('\\n')\n logging.info(' --> Database not updated via Drush')\n\n # use drush to clear site cache\n if not utilities.drush_cc_all(site_variables):\n logging.info('\\n')\n logging.info(' --> Site cache not cleared via Drush')\n\n return True\n\n\n# overwrite the destination database with a source database\ndef overwrite(source_site_variables, dest_site_variables, backup_option):\n logging.debug(' --> sqlTools.py : overwrite({}, {})'.format(source_site_variables, dest_site_variables))\n\n\n # backup destination database before overwriting\n if backup_option:\n logging.debug(\"\\n\")\n logging.debug(\" --> Backing up destination database\")\n if not backup(dest_site_variables):\n logging.info('\\n')\n logging.info(' --> Destination database unable to be backed up')\n return False\n\n # backup source database to use in overwrite\n logging.debug(\"\\n\")\n logging.debug(\" --> Getting source database to use in overwrite\")\n if not backup(source_site_variables):\n logging.info('\\n')\n logging.info(' --> Source database unable to be backed up')\n return False\n\n # get path to source sql dump\n logging.debug(\"\\n\")\n logging.debug(\" --> Getting path to source sql dump\")\n source_sql_dump = utilities.get_latest_backup(source_site_variables['name'], 'Database')\n if not source_sql_dump:\n logging.info('\\n')\n logging.info(' --> Source database does not have a back up')\n return False\n\n # use sed to change DROP DATABASE from source to destination\n command = 'sed -i \"s/{}/{}/g\" \"{}\"'.format(source_site_variables['database'],\n dest_site_variables['database'],\n source_sql_dump)\n\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info(\"\\n\")\n logging.info(\" --> Failed to change DROP DATABASE name from source to destination: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info(\"\\n\")\n logging.info(\" --> Failed\\nReason: other\")\n return False\n else:\n logging.debug(\"\\n\")\n logging.debug(' --> Changed DROP DATABASE name from source to destination in sql dump')\n\n # overwrite destination database with souce database\n logging.info('\\n')\n logging.info(' --> Overwritting database...')\n command = 'mysql -h {} -u{} -p{} {} < {}'.format(dest_site_variables['server'],\n dest_site_variables['username'],\n dest_site_variables['password'],\n dest_site_variables['database'],\n source_sql_dump)\n\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info(\"\\n\")\n logging.info(\" --> Failed sql overwrite: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info(\"\\n\")\n logging.info(\" --> Failed\\nReason: other\")\n return False\n else:\n logging.info(\"\\n\")\n logging.info(' --> Database overwritten')\n\n # remove source sql dump file\n command = 'rm {}'.format(source_sql_dump)\n\n success, out, err = cmd(command)\n if not success:\n if err:\n logging.info(\" --> Failed to remove source sql dump: {0} \\n {1}\".format(err, out))\n return False\n else:\n logging.info(\" --> Failed\\nReason: other\")\n return False\n else:\n logging.debug(\"\\n\")\n logging.debug(' --> Removed source sql dump file')\n\n # use drush to update the overwritten database\n if not utilities.drush_updb(dest_site_variables):\n logging.info('\\n')\n logging.info(' --> Database not updated via Drush')\n\n # use drush to clear site cache\n if not utilities.drush_cc_all(dest_site_variables):\n logging.info('\\n')\n logging.info(' --> Site cache not cleared via Drush')\n\n return True\n", "sub_path": "jenkinsScripts/sqlTools.py", "file_name": "sqlTools.py", "file_ext": "py", "file_size_in_byte": 8162, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.debug", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "os.getcwd", "line_number": 30, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 34, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 35, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 39, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 47, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 48, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 55, "usage_type": "call"}, {"api_name": "utilities.get_backup_directory", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 62, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 62, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 76, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 77, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 80, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 81, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 84, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 85, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 88, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 89, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 94, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 104, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 105, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 106, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 110, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 113, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 117, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 118, "usage_type": "call"}, {"api_name": "utilities.drush_updb", "line_number": 121, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 122, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 123, "usage_type": "call"}, {"api_name": "utilities.drush_cc_all", "line_number": 126, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 128, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 135, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 141, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 143, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 144, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 148, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 149, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 151, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 157, "usage_type": "call"}, {"api_name": "utilities.get_latest_backup", "line_number": 158, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 160, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 161, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 169, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 172, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 176, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 177, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 180, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 181, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 184, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 185, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 192, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 195, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 196, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 199, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 200, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 203, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 204, "usage_type": "call"}, {"api_name": "shell.cmd", "line_number": 209, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 212, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 218, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 219, "usage_type": "call"}, {"api_name": "utilities.drush_updb", "line_number": 222, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 223, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 224, "usage_type": "call"}, {"api_name": "utilities.drush_cc_all", "line_number": 227, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 228, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 229, "usage_type": "call"}]} +{"seq_id": "499325930", "text": "from django.test import TestCase\nfrom apps.poll.models import User\nfrom apps.register.models import Registration\nfrom apps.reporters.models import Reporter, PersistantConnection, PersistantBackend\n\nclass UserTest(TestCase):\n def setUp(self):\n self.backend = PersistantBackend(slug=\"AnotherMockBackend\")\n self.backend.save()\n self.reporter = Reporter(alias=\"ReporterName\")\n self.reporter.save()\n self.pconnection = PersistantConnection(backend=self.backend, \n reporter=self.reporter, \n identity=\"1001\")\n self.pconnection.save()\n self.reporter.connections.add(self.pconnection)\n Registration(governorate = 3, district = 4, phone = self.pconnection).save()\n\n def test_set_user_geolocation(self):\n user = User()\n user.set_user_geolocation_if_registered(self.pconnection)\n self.assertEquals(user.governorate , \"3\")\n self.assertEquals(user.district, \"4\")\n \n\n def test_dont_set_geolocation_when_not_present(self):\n user = User()\n user.set_user_geolocation_if_registered(None)\n self.assertEquals(user.governorate , None)\n self.assertEquals(user.district, None)\n \n def set_value_on_user(self):\n user = User()\n self.assertEquals(user.age, None)\n self.assertEquals(user.gender, None)\n user.set_value(\"age\", 12)\n self.assertEquals(user.age, 12)\n self.assertEquals(user.gender, None)\n user.set_value(\"gender\", \"f\")\n self.assertEquals(user.gender, \"f\") \n", "sub_path": "apps/poll/subtests/user.py", "file_name": "user.py", "file_ext": "py", "file_size_in_byte": 1631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.test.TestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "apps.reporters.models.PersistantBackend", "line_number": 8, "usage_type": "call"}, {"api_name": "apps.reporters.models.Reporter", "line_number": 10, "usage_type": "call"}, {"api_name": "apps.reporters.models.PersistantConnection", "line_number": 12, "usage_type": "call"}, {"api_name": "apps.register.models.Registration", "line_number": 17, "usage_type": "call"}, {"api_name": "apps.poll.models.User", "line_number": 20, "usage_type": "call"}, {"api_name": "apps.poll.models.User", "line_number": 27, "usage_type": "call"}, {"api_name": "apps.poll.models.User", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "196529197", "text": "from .base import *\n\nfrom django.core.exceptions import ImproperlyConfigured\n\n\ndef get_env_variable(var_name):\n \"\"\"Get the environment variable or return exception.\"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = \"Set the {} environment variable\".format(var_name)\n raise ImproperlyConfigured(error_msg)\n\n\nDEBUG = True\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'sports',\n 'USER': get_env_variable('DB_USER'),\n 'PASSWORD': get_env_variable('DB_PASSWORD'),\n 'HOST': 'localhost',\n 'PORT': '',\n 'OPTIONS': {\n 'init_command': 'SET default_storage_engine=INNODB,'\n 'character_set_connection=utf8,'\n 'collation_connection=utf8_unicode_ci,'\n 'foreign_key_checks=0',\n 'charset': 'utf8',\n 'use_unicode': True,\n }\n }\n}\n", "sub_path": "sportssite/config/settings/local.py", "file_name": "local.py", "file_ext": "py", "file_size_in_byte": 1035, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.core.exceptions.ImproperlyConfigured", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "374433546", "text": "import my_config as mc\nimport rollbar\nimport my_sheets as sheets\nimport pandas as pd\nimport my_dfurl\nimport re\n\nUS_EXCHANGE = 1.27\n\n\nclass FirstDibsBulk:\n def __init__(self):\n rollbar.init(mc.config['rollbar']['key'])\n self.shallow_cats = ['More Art', 'Mixed Media']\n\n gc = sheets.gc\n book = gc.open('Autolister Translations')\n\n sheet = book.worksheet('Media_Translations')\n self.df_media = pd.DataFrame(sheet.get_all_records())\n\n sheet = book.worksheet('Subject Translations')\n self.df_subject = pd.DataFrame(sheet.get_all_records())\n\n sheet = book.worksheet('Period Translations')\n self.df_periods = pd.DataFrame(sheet.get_all_records())\n\n self.df_out = my_dfurl.DfInit('get-1stdibs-bulk.php').df\n # self.df_out = self.df_data.copy()\n self.process_columns()\n self.drop_columns()\n self.save_csv()\n self.create_status_csv()\n\n def process_columns(self):\n self.df_out['Artist'] = self.df_out['Artist'].str.replace('\\(.+', '')\n self.df_out['cat_1'] = self.df_out['Medium'].apply(self.subject1_vlookup)\n self.df_out['Medium'] = self.df_out['Medium'].apply(self.medium_vlookup)\n self.df_out['cat_2'] = self.df_out['subject'].apply(self.subject2_vlookup)\n self.df_out['Period'] = self.df_out['age'].apply(self.period_vlookup)\n self.df_out['Category'] = self.df_out.apply(self.category_builder, axis=1)\n self.df_out['Creation Year'] = self.df_out.apply(self.creation_year_calc, axis=1)\n self.df_out['List Price'] = (self.df_out['price'] * 1.5 + self.df_out['uscan_postage_web']) * US_EXCHANGE\n self.df_out = self.df_out.apply(self.calculate_dimensions, axis=1)\n self.df_out = self.df_out.apply(self.populate_images, axis=1)\n print(self.df_out)\n\n def save_csv(self):\n self.df_out.to_csv('./out/first_dibs_bulk.csv', index=False)\n\n def creation_year_calc(self, row):\n if len(row['date']) == 4 and row['date'].isdigit():\n row['Creation Year'] = row['date']\n return row['Creation Year']\n\n def medium_vlookup(self, value):\n return self.df_media[self.df_media['sulis'] == value]['1stdibs'].values[0]\n\n def subject1_vlookup(self, value):\n return self.df_media[self.df_media['sulis'] == value]['category1'].values[0]\n\n def subject2_vlookup(self, value):\n return self.df_subject[self.df_subject['sulis'] == value]['category2'].values[0]\n\n def period_vlookup(self, value):\n return self.df_periods[self.df_periods['sulis'] == value]['1stdibs'].values[0]\n\n def category_builder(self, row):\n if row['cat_1'] in self.shallow_cats:\n return 'Art > ' + row['cat_1']\n elif row['cat_1'] == 'Drawings and Watercolor Paintings':\n return 'Art > {}{} Drawings and Watercolors'.format(row['cat_1'], row['cat_2'])\n else:\n return 'Art > {}{} {}'.format(row['cat_1'], row['cat_2'], row['cat_1'])\n\n def calculate_dimensions(self, row):\n m = re.search('(.+:|^)(.+)cm.+$', row['size_text'])\n row['Height'] = m.group(2).split('x')[0].strip()\n row['Width'] = m.group(2).split('x')[1].strip()\n return row\n\n def populate_images(self, row):\n images = [x.strip() for x in row['images'].split(',')]\n for i in range(min(5, len(images))):\n row['Image {}'.format(i+1)] = \"https://www.sulisfineart.com/pub/media/catalog/product\" + images[i]\n return row\n\n def drop_columns(self):\n self.df_out.drop(['cat_1', 'cat_2', 'size_text', 'subject', 'style', 'date', 'framed', 'price', 'age', 'uscan_postage_web', 'size', 'images'],\n axis=1, inplace=True)\n\n def create_status_csv(self):\n df_status = pd.DataFrame()\n df_status['sku'] = self.df_out['Seller Ref No.']\n df_status['on_1st_dibs'] = 1\n df_status.to_csv('./out/first_dibs_status.csv', index=False)\n\n\n\n\n\nif __name__ == '__main__':\n dibs = FirstDibsBulk()\n", "sub_path": "first_dibs_bulk.py", "file_name": "first_dibs_bulk.py", "file_ext": "py", "file_size_in_byte": 4019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rollbar.init", "line_number": 13, "usage_type": "call"}, {"api_name": "my_config.config", "line_number": 13, "usage_type": "attribute"}, {"api_name": "my_sheets.gc", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 26, "usage_type": "call"}, {"api_name": "my_dfurl.DfInit", "line_number": 28, "usage_type": "call"}, {"api_name": "re.search", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 93, "usage_type": "call"}]} +{"seq_id": "562278385", "text": "# -*- coding: utf-8 -*-\n#################################################################################\n# Author : Acespritech Solutions Pvt. Ltd. ()\n# Copyright(c): 2012-Present Acespritech Solutions Pvt. Ltd.\n# All Rights Reserved.\n#\n# This program is copyright property of the author mentioned above.\n# You can`t redistribute it and/or modify it.\n#\n#################################################################################\n\nfrom odoo import netsvc, tools, models, fields, api, _\nimport time\nfrom datetime import datetime, timedelta\nimport psycopg2\nfrom pytz import timezone\nfrom odoo.tools import float_is_zero\nimport os\nimport json\nimport logging\n_logger = logging.getLogger(__name__)\nfrom odoo.tools.profiler import profile\n\nclass pos_order(models.Model):\n _inherit = \"pos.order\"\n\n def create_picking(self):\n \"\"\"Create a picking for each order and validate it.\"\"\"\n Picking = self.env['stock.picking']\n Move = self.env['stock.move']\n StockWarehouse = self.env['stock.warehouse']\n for order in self:\n if not order.lines.filtered(lambda l: l.product_id.type in ['product', 'consu']):\n continue\n address = order.partner_id.address_get(['delivery']) or {}\n picking_type = order.picking_type_id\n return_pick_type = order.picking_type_id.return_picking_type_id or order.picking_type_id\n order_picking = Picking\n return_picking = Picking\n moves = Move\n location_id = order.location_id.id\n if order.partner_id:\n destination_id = order.partner_id.property_stock_customer.id\n else:\n if (not picking_type) or (not picking_type.default_location_dest_id):\n customerloc, supplierloc = StockWarehouse._get_partner_locations()\n destination_id = customerloc.id\n else:\n destination_id = picking_type.default_location_dest_id.id\n\n if picking_type:\n message = _(\"This transfer has been created from the point of sale session: %s\") % (order.id, order.name)\n picking_vals = {\n 'origin': order.name,\n 'partner_id': address.get('delivery', False),\n 'date_done': order.date_order,\n 'picking_type_id': picking_type.id,\n 'company_id': order.company_id.id,\n 'move_type': 'direct',\n 'note': order.note or \"\",\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }\n pos_qty = any([x.qty > 0 for x in order.lines if x.product_id.type in ['product', 'consu']])\n if pos_qty:\n order_picking = Picking.create(picking_vals.copy())\n order_picking.message_post(body=message)\n neg_qty = any([x.qty < 0 for x in order.lines if x.product_id.type in ['product', 'consu']])\n if neg_qty:\n return_vals = picking_vals.copy()\n return_vals.update({\n 'location_id': destination_id,\n 'location_dest_id': return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,\n 'picking_type_id': return_pick_type.id\n })\n return_picking = Picking.create(return_vals)\n return_picking.message_post(body=message)\n\n for line in order.lines.filtered(lambda l: l.product_id.type in ['product', 'consu'] and not float_is_zero(l.qty, precision_digits=l.product_id.uom_id.rounding) and not l.operation_product):\n moves |= Move.create({\n 'name': line.name,\n 'restrict_lot_id':line.prodlot_id.id,\n 'product_uom': line.product_id.uom_id.id,\n 'picking_id': order_picking.id if line.qty >= 0 else return_picking.id,\n 'picking_type_id': picking_type.id if line.qty >= 0 else return_pick_type.id,\n 'product_id': line.product_id.id,\n 'product_uom_qty': abs(line.qty),\n 'state': 'draft',\n 'location_id': location_id if line.qty >= 0 else destination_id,\n 'location_dest_id': destination_id if line.qty >= 0 else return_pick_type != picking_type and return_pick_type.default_location_dest_id.id or location_id,\n })\n # prefer associating the regular order picking, not the return\n order.write({'picking_id': order_picking.id or return_picking.id})\n\n if return_picking:\n order._force_picking_done(return_picking)\n return_picking.action_done()\n\n if order_picking:\n order._force_picking_done(order_picking)\n order_picking.action_done()\n\n # when the pos.config has no picking_type_id set only the moves will be created\n if moves and not return_picking and not order_picking:\n tracked_moves = moves.filtered(lambda move: move.product_id.tracking != 'none')\n untracked_moves = moves - tracked_moves\n tracked_moves.action_confirm()\n untracked_moves.action_assign()\n moves.filtered(lambda m: m.state in ['confirmed', 'waiting']).force_assign()\n moves.filtered(lambda m: m.product_id.tracking == 'none').action_done()\n\n return True\n\n @api.multi\n # @api.depends('lines')\n def _return_status(self):\n for order in self:\n if not order.back_order:\n full = [ line for line in order.lines if line.return_qty > 0 and not line.product_id.non_refundable ]\n if not full:\n order.return_status = 'full'\n continue\n partial = [ line for line in order.lines if line.return_qty < line.qty and not line.product_id.non_refundable ]\n if partial:\n order.return_status = 'partial'\n continue\n if full and not partial:\n order.return_status = 'nothing'\n continue\n\n return_status = fields.Selection([('nothing', ''), ('partial', 'Partially Returned'), ('full', 'Fully Returned')], \"Return Status\", compute=\"_return_status\",default=\"nothing\")\n return_process = fields.Boolean('Return Process')\n back_order = fields.Char('Back Order', size=256, default=False, copy=False)\n\n def _order_fields(self, ui_order):\n res = super(pos_order, self)._order_fields(ui_order)\n res.update({\n 'back_order': ui_order.get('back_order', '') or False,\n })\n return res\n \n @api.model\n def _process_order(self, order):\n \"\"\"\"\n [{\n id: 6187591311, \n to_invoice: false, \n data: {partner_id: false, \n back_order: , \n uid: 6187591311, \n amount_return: 28, \n number: false, \n fiscal_position_id: false, \n lines: [[0, 0, {exchange_product: false, \n return_qty: 1, \n id: 33, \n pack_lot_ids: [[0, 0, {lot_name: 123456qw}]], \n stock_income: false, \n tax_ids: [[6, false, [1]]], \n discount: 0, qty: 1, \n operation_product: false, \n prodlot_id: 9, \n product_id: 11,\n price_unit: 62}]], \n pos_session_id: 1, \n invoice_journal: false, \n user_id: 1, \n amount_total: 62, \n sale_order_name: false, \n sequence_number: 0, \n statement_ids: [[0, 0, {statement_id: 1, \n account_id: 1541, \n amount: 90, \n name: 2018-12-30 16:33:22, \n journal_id: 6}]], \n amount_tax: 9.457627, \n amount_paid: 90, \n creation_date: 2018-12-30T16:33:22.132Z, \n pricelist_id: 1, \n name: Pedido6187591311\n }\n }\n ]\n \"\"\"\n order_id = self.with_context(from_pos=True).create(self._order_fields(order))\n os.system('echo \"%s\"'%(\"_process_order\"))\n os.system('echo \"%s\"'%(order_id.id))\n for payments in order['statement_ids']:\n if not order.get('sale_mode') and order.get('parent_return_order', ''):\n payments[2]['amount'] = payments[2]['amount'] or 0.0\n order_id.add_payment(self._payment_fields(payments[2]))\n\n os.system('echo \"%s\"'%(\"add_payment\"))\n os.system('echo \"%s\"'%(order_id.id))\n\n session = self.env['pos.session'].browse(order['pos_session_id'])\n if session.sequence_number <= order['sequence_number']:\n session.write({'sequence_number': order['sequence_number'] + 1})\n session.refresh()\n\n os.system('echo \"%s\"'%(\"set_session\"))\n os.system('echo \"%s\"'%(order_id.id))\n\n if not order.get('parent_return_order', '') and not float_is_zero(order['amount_return'], self.env['decimal.precision'].precision_get('Account')):\n cash_journal = session.cash_journal_id\n if not cash_journal:\n cash_journal_ids = list(filter(lambda st: st.journal_id.type == 'cash', session.statement_ids))\n if not len(cash_journal_ids):\n raise Warning(_('error!'),\n _(\"No cash statement found for this session. Unable to record returned cash.\"))\n cash_journal = cash_journal_ids[0].journal_id\n order_id.add_payment({\n 'amount':-order['amount_return'],\n 'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'payment_name': _('return'),\n 'journal': cash_journal.id,\n })\n \n if order.get('parent_return_order', '') and not float_is_zero(order['amount_return'], self.env['decimal.precision'].precision_get('Account')):\n cash_journal = session.cash_journal_id\n if not cash_journal:\n cash_journal_ids = list(filter(lambda st: st.journal_id.type == 'cash', session.statement_ids))\n if not len(cash_journal_ids):\n raise Warning(_('error!'),\n _(\"No cash statement found for this session. Unable to record returned cash.\"))\n cash_journal = cash_journal_ids[0].journal_id\n order_id.add_payment({\n 'amount':-order['amount_return'],\n 'payment_date': time.strftime('%Y-%m-%d %H:%M:%S'),\n 'payment_name': _('return'),\n 'journal': cash_journal.id,\n })\n return order_id\n\n @api.model\n def create_from_ui(self, orders):\n # Keep only new orders\n os.system('echo \"%s\"'%(json.dumps(orders)))\n submitted_references = [o['data']['name'] for o in orders]\n pos_order = self.search([('pos_reference', 'in', submitted_references)])\n existing_orders = pos_order.read(['pos_reference'])\n existing_references = set([o['pos_reference'] for o in existing_orders])\n orders_to_save = [o for o in orders if o['data']['name'] not in existing_references]\n order_ids = []\n\n for tmp_order in orders_to_save:\n to_invoice = tmp_order['to_invoice']\n order = tmp_order['data']\n if to_invoice:\n self._match_payment_to_invoice(order)\n pos_order = self._process_order(order)\n\n os.system('echo \"%s\"'%(pos_order.id))\n if pos_order :\n to_be_returned_items = {}\n for line in order.get('lines'):\n if line[2].get('return_process'):\n if line[2].get('product_id') in to_be_returned_items:\n to_be_returned_items[line[2].get('product_id')] = to_be_returned_items[line[2].get('product_id')] + line[2].get('qty')\n else:\n to_be_returned_items.update({line[2].get('product_id'):line[2].get('qty')})\n for line in order.get('lines'):\n for item_id in to_be_returned_items:\n return_lines = []\n if line[2].get('return_process'):\n return_lines = self.browse([line[2].get('return_process')[0]]).lines\n for origin_line in return_lines:\n if to_be_returned_items[item_id] == 0:\n continue\n if origin_line.return_qty > 0 and item_id == origin_line.product_id.id:\n if (to_be_returned_items[item_id] * -1) >= origin_line.return_qty:\n ret_from_line_qty = 0\n to_be_returned_items[item_id] = to_be_returned_items[item_id] + origin_line.return_qty\n else:\n ret_from_line_qty = to_be_returned_items[item_id] + origin_line.return_qty\n to_be_returned_items[item_id] = 0\n\n origin_line.write({'return_qty': ret_from_line_qty})\n order_ids.append(pos_order.id)\n os.system('echo \"%s\"'%(str(order_ids)))\n try:\n pos_order.action_pos_order_paid()\n except psycopg2.OperationalError:\n # do not hide transactional errors, the order(s) won't be saved!\n raise\n except Exception as e:\n _logger.error('Could not fully process the POS Order: %s', tools.ustr(e))\n os.system('echo \"%s\"'%(\"despues de pagar\"))\n if to_invoice:\n pos_order.action_pos_order_invoice()\n pos_order.invoice_id.sudo().action_invoice_open()\n pos_order.account_move = pos_order.invoice_id.move_id\n\n return order_ids\n\n @api.model\n def add_payment(self, data):\n \"\"\"Create a new payment for the order\"\"\"\n if data['amount'] == 0.0:\n return\n return super(pos_order, self).add_payment(data)\n\n @api.model\n def ac_pos_search_read(self, domain):\n search_vals = self.search_read(domain)\n user_id = self.env['res.users'].browse(self._uid)\n tz = False\n result = []\n if self._context and self._context.get('tz'):\n tz = timezone(self._context.get('tz'))\n elif user_id and user_id.tz:\n tz = timezone(user_id.tz)\n if tz:\n c_time = datetime.now(tz)\n hour_tz = int(str(c_time)[-5:][:2])\n min_tz = int(str(c_time)[-5:][3:])\n sign = str(c_time)[-6][:1]\n for val in search_vals:\n if sign == '-':\n val.update({\n 'date_order':(datetime.strptime(val.get('date_order'), '%Y-%m-%d %H:%M:%S') - timedelta(hours=hour_tz, minutes=min_tz)).strftime('%Y-%m-%d %H:%M:%S')\n })\n elif sign == '+':\n val.update({\n 'date_order':(datetime.strptime(val.get('date_order'), '%Y-%m-%d %H:%M:%S') + timedelta(hours=hour_tz, minutes=min_tz)).strftime('%Y-%m-%d %H:%M:%S')\n })\n result.append(val)\n return result\n else:\n return search_vals\n\n @profile\n def _action_create_invoice_line(self, line=False, invoice_id=False):\n InvoiceLine = self.env['account.invoice.line']\n inv_name = line.product_id.name_get()[0][1]\n inv_line = {\n 'invoice_id': invoice_id,\n 'product_id': line.product_id.id,\n 'quantity': line.qty,\n 'account_analytic_id': self._prepare_analytic_account(line),\n 'name': inv_name,\n 'prodlot_id': line.prodlot_id.id\n }\n # Oldlin trick\n invoice_line = InvoiceLine.sudo().new(inv_line)\n invoice_line._onchange_product_id()\n invoice_line.invoice_line_tax_ids = invoice_line.invoice_line_tax_ids.filtered(lambda t: t.company_id.id == line.order_id.company_id.id).ids\n fiscal_position_id = line.order_id.fiscal_position_id\n if fiscal_position_id:\n invoice_line.invoice_line_tax_ids = fiscal_position_id.map_tax(invoice_line.invoice_line_tax_ids, line.product_id, line.order_id.partner_id)\n invoice_line.invoice_line_tax_ids = invoice_line.invoice_line_tax_ids.ids\n # We convert a new id object back to a dictionary to write to\n # bridge between old and new api\n inv_line = invoice_line._convert_to_write({name: invoice_line[name] for name in invoice_line._cache})\n inv_line.update({\"price_unit\":line.price_unit, \"discount\":line.discount})\n return InvoiceLine.sudo().create(inv_line)\n\nclass pos_order_line(models.Model):\n _inherit = \"pos.order.line\"\n\n return_qty = fields.Float(\"Return Quantity\")\n return_process = fields.Char('Return Process')\n back_order = fields.Char('Back Order', size=256, default=False, copy=False)\n prodlot_id = fields.Many2one('stock.production.lot', \"Serial No.\")\n exchange_product = fields.Boolean(\"Exchange Product\")\n operation_product = fields.Boolean(\"Operation Product\")\n stock_income = fields.Boolean(\"Stock Income\")\n\nclass PosConfig(models.Model):\n _inherit = \"pos.config\"\n\n so_operation_draft = fields.Boolean(\"SO Operation Quotation\")\n so_operation_confirm = fields.Boolean(\"SO Operation Confirm\")\n so_operation_paid = fields.Boolean(\"SO Operation Paid\")\n enable_add_product = fields.Boolean(\"Enable Product Operations\")\n enable_pos_serial = fields.Boolean(\"Enable POS serials\")\n\n last_days = fields.Char(\"Last Days\")\n load_current_session_order = fields.Boolean(\"Load Order Of Current Session Only\")\n specified_orders = fields.Boolean(\"Load Orders From Past Upto Specified Number Of Days\")\n so_sequence = fields.Many2one('ir.sequence',\"Sale Order sequence\")\n\n\nclass AccountBankStatementLine(models.Model):\n _inherit = \"account.bank.statement.line\"\n \n @api.one\n @api.constrains('amount')\n def _check_amount(self):\n if not self._context.get('from_pos'):\n super(AccountBankStatementLine, self)._check_amount()\n\n @api.one\n @api.constrains('amount', 'amount_currency')\n def _check_amount_currency(self):\n if not self._context.get('from_pos'):\n super(AccountBankStatementLine, self)._check_amount_currency()\n\nclass product_product(models.Model):\n _inherit=\"product.product\"\n\n non_refundable = fields.Boolean(\"Non Refundable\")\n\n\nclass AccountInvoiceLine(models.Model):\n _inherit = \"account.invoice.line\"\n\n prodlot_id = fields.Many2one('stock.production.lot', \"Serial No.\")\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:", "sub_path": "addons/aspl_pos/models/pos.py", "file_name": "pos.py", "file_ext": "py", "file_size_in_byte": 19624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 24, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 24, "usage_type": "name"}, {"api_name": "odoo._", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.tools.float_is_zero", "line_number": 79, "usage_type": "call"}, {"api_name": "odoo.api.multi", "line_number": 114, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 114, "usage_type": "name"}, {"api_name": "odoo.fields.Selection", "line_number": 131, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 131, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 132, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 132, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 133, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 133, "usage_type": "name"}, {"api_name": "os.system", "line_number": 186, "usage_type": "call"}, {"api_name": "os.system", "line_number": 187, "usage_type": "call"}, {"api_name": "os.system", "line_number": 193, "usage_type": "call"}, {"api_name": "os.system", "line_number": 194, "usage_type": "call"}, {"api_name": "os.system", "line_number": 201, "usage_type": "call"}, {"api_name": "os.system", "line_number": 202, "usage_type": "call"}, {"api_name": "odoo.tools.float_is_zero", "line_number": 204, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 209, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 210, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 214, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 215, "usage_type": "call"}, {"api_name": "odoo.tools.float_is_zero", "line_number": 219, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 224, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 225, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 229, "usage_type": "call"}, {"api_name": "odoo._", "line_number": 230, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 142, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 142, "usage_type": "name"}, {"api_name": "os.system", "line_number": 238, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 238, "usage_type": "call"}, {"api_name": "os.system", "line_number": 253, "usage_type": "call"}, {"api_name": "os.system", "line_number": 280, "usage_type": "call"}, {"api_name": "psycopg2.OperationalError", "line_number": 283, "usage_type": "attribute"}, {"api_name": "odoo.tools.ustr", "line_number": 287, "usage_type": "call"}, {"api_name": "odoo.tools", "line_number": 287, "usage_type": "name"}, {"api_name": "os.system", "line_number": 288, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 235, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 235, "usage_type": "name"}, {"api_name": "odoo.api.model", "line_number": 296, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 296, "usage_type": "name"}, {"api_name": "pytz.timezone", "line_number": 310, "usage_type": "call"}, {"api_name": "pytz.timezone", "line_number": 312, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 314, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 314, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 321, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 321, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 325, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 325, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 325, "usage_type": "call"}, {"api_name": "odoo.api.model", "line_number": 303, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 303, "usage_type": "name"}, {"api_name": "odoo.tools.profiler.profile", "line_number": 332, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 358, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 358, "usage_type": "name"}, {"api_name": "odoo.fields.Float", "line_number": 361, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 361, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 362, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 362, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 363, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 363, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 364, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 364, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 365, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 365, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 366, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 366, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 367, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 367, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 369, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 369, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 372, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 372, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 373, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 373, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 374, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 374, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 375, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 375, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 376, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 376, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 378, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 378, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 379, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 379, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 380, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 380, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 381, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 381, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 384, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 384, "usage_type": "name"}, {"api_name": "odoo.api.one", "line_number": 387, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 387, "usage_type": "name"}, {"api_name": "odoo.api.constrains", "line_number": 388, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 388, "usage_type": "name"}, {"api_name": "odoo.api.one", "line_number": 393, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 393, "usage_type": "name"}, {"api_name": "odoo.api.constrains", "line_number": 394, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 394, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 399, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 399, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 402, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 402, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 405, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 405, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 408, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 408, "usage_type": "name"}]} +{"seq_id": "168380814", "text": "from django.urls import path, include\nfrom . import views\n\napp_name = 'home'\n\nurlpatterns = [\n path('', views.index),\n path('homepage', views.index),\n path('develop_log', views.get_develop_log),\n path('login',views.get_login_page),\n path('signup', views.get_signup_page),\n\n path('save_data', views.post_user_info),\n path('authen', views.post_user_login),\n path('logout', views.user_logout),\n\n path('board', views.get_board_list),\n path('write-post', views.get_write_post),\n path('posting', views.post_document),\n path('postview', views.postview, name='view_post'),\n\n path('comment/new', views.save_new_comment, name='new_comment'),\n path('comment/delete', views.delete_comment)\n]", "sub_path": "home/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "628687037", "text": "import logging, time\nfrom modules import util\nfrom modules.util import Failed\n\nlogger = logging.getLogger(\"Plex Meta Manager\")\n\nbuilders = [\"letterboxd_list\", \"letterboxd_list_details\"]\nbase_url = \"https://letterboxd.com\"\n\nclass Letterboxd:\n def __init__(self, config):\n self.config = config\n\n def _parse_list(self, list_url, language):\n response = self.config.get_html(list_url, headers=util.header(language))\n letterboxd_ids = response.xpath(\"//li[contains(@class, 'poster-container')]/div/@data-film-id\")\n items = []\n for letterboxd_id in letterboxd_ids:\n slugs = response.xpath(f\"//div[@data-film-id='{letterboxd_id}']/@data-film-slug\")\n items.append((letterboxd_id, slugs[0]))\n next_url = response.xpath(\"//a[@class='next']/@href\")\n if len(next_url) > 0:\n time.sleep(2)\n items.extend(self._parse_list(f\"{base_url}{next_url[0]}\", language))\n return items\n\n def _tmdb(self, letterboxd_url, language):\n response = self.config.get_html(letterboxd_url, headers=util.header(language))\n ids = response.xpath(\"//a[@data-track-action='TMDb']/@href\")\n if len(ids) > 0 and ids[0]:\n if \"themoviedb.org/movie\" in ids[0]:\n return util.regex_first_int(ids[0], \"TMDB Movie ID\")\n raise Failed(f\"Letterboxd Error: TMDb Movie ID not found in {ids[0]}\")\n raise Failed(f\"Letterboxd Error: TMDb Movie ID not found at {letterboxd_url}\")\n\n def get_list_description(self, list_url, language):\n response = self.config.get_html(list_url, headers=util.header(language))\n descriptions = response.xpath(\"//meta[@property='og:description']/@content\")\n return descriptions[0] if len(descriptions) > 0 and len(descriptions[0]) > 0 else None\n\n def validate_letterboxd_lists(self, letterboxd_lists, language):\n valid_lists = []\n for letterboxd_list in util.get_list(letterboxd_lists, split=False):\n list_url = letterboxd_list.strip()\n if not list_url.startswith(base_url):\n raise Failed(f\"Letterboxd Error: {list_url} must begin with: {base_url}\")\n elif len(self._parse_list(list_url, language)) > 0:\n valid_lists.append(list_url)\n else:\n raise Failed(f\"Letterboxd Error: {list_url} failed to parse\")\n return valid_lists\n\n def get_tmdb_ids(self, method, data, language):\n if method == \"letterboxd_list\":\n logger.info(f\"Processing Letterboxd List: {data}\")\n items = self._parse_list(data, language)\n total_items = len(items)\n if total_items > 0:\n ids = []\n for i, item in enumerate(items, 1):\n letterboxd_id, slug = item\n util.print_return(f\"Finding TMDb ID {i}/{total_items}\")\n tmdb_id = None\n expired = None\n if self.config.Cache:\n tmdb_id, expired = self.config.Cache.query_letterboxd_map(letterboxd_id)\n if not tmdb_id or expired is not False:\n try:\n tmdb_id = self._tmdb(f\"{base_url}{slug}\", language)\n except Failed as e:\n logger.error(e)\n continue\n if self.config.Cache:\n self.config.Cache.update_letterboxd_map(expired, letterboxd_id, tmdb_id)\n ids.append((tmdb_id, \"tmdb\"))\n logger.info(util.adjust_space(f\"Processed {total_items} TMDb IDs\"))\n return ids\n else:\n raise Failed(f\"Letterboxd Error: No List Items found in {data}\")\n else:\n raise Failed(f\"Letterboxd Error: Method {method} not supported\")\n", "sub_path": "modules/letterboxd.py", "file_name": "letterboxd.py", "file_ext": "py", "file_size_in_byte": 3881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "modules.util.header", "line_number": 15, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 15, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "modules.util.header", "line_number": 28, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 28, "usage_type": "name"}, {"api_name": "modules.util.regex_first_int", "line_number": 32, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 32, "usage_type": "name"}, {"api_name": "modules.util.Failed", "line_number": 33, "usage_type": "call"}, {"api_name": "modules.util.Failed", "line_number": 34, "usage_type": "call"}, {"api_name": "modules.util.header", "line_number": 37, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 37, "usage_type": "name"}, {"api_name": "modules.util.get_list", "line_number": 43, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 43, "usage_type": "name"}, {"api_name": "modules.util.Failed", "line_number": 46, "usage_type": "call"}, {"api_name": "modules.util.Failed", "line_number": 50, "usage_type": "call"}, {"api_name": "modules.util.print_return", "line_number": 62, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 62, "usage_type": "name"}, {"api_name": "modules.util.Failed", "line_number": 70, "usage_type": "name"}, {"api_name": "modules.util.adjust_space", "line_number": 76, "usage_type": "call"}, {"api_name": "modules.util", "line_number": 76, "usage_type": "name"}, {"api_name": "modules.util.Failed", "line_number": 79, "usage_type": "call"}, {"api_name": "modules.util.Failed", "line_number": 81, "usage_type": "call"}]} +{"seq_id": "646698197", "text": "from django.conf.urls import url\n\nfrom .views import (\n DocumentView, DocumentDCView, DocumentJSONView, PageTextView\n)\n\napp_name = 'filingcabinet'\n\nurlpatterns = [\n url(r\"^(?P\\d+)\\-(?P[-\\w]+)/$\", DocumentView.as_view(),\n name=\"document-detail\"),\n url(r\"^(?P\\d+)/$\", DocumentView.as_view(),\n name=\"document-detail_short\"),\n url(r\"^(?P\\d+)\\.json$\", DocumentJSONView.as_view(),\n name=\"document_json\"),\n url(r\"^(?P\\d+)/dc/$\", DocumentDCView.as_view(),\n name=\"document-detail_dc\"),\n url(r\"^(?P\\d+)/text/(?P\\d+)/$\", PageTextView.as_view(),\n name=\"document_page_text\"),\n]\n", "sub_path": "filingcabinet/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 650, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "views.DocumentView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "views.DocumentView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "views.DocumentView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "views.DocumentView", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 14, "usage_type": "call"}, {"api_name": "views.DocumentJSONView.as_view", "line_number": 14, "usage_type": "call"}, {"api_name": "views.DocumentJSONView", "line_number": 14, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.DocumentDCView.as_view", "line_number": 16, "usage_type": "call"}, {"api_name": "views.DocumentDCView", "line_number": 16, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "views.PageTextView.as_view", "line_number": 18, "usage_type": "call"}, {"api_name": "views.PageTextView", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "108114182", "text": "from time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import (manifold, datasets, random_projection)\nimport scipy.io\n\n\n# 加载数据\nusps = scipy.io.loadmat('data/USPS.mat')\nfea = usps['fea']\ngnd = usps['gnd']\n\n# 数据截断\nfea = fea[:2000, :]\ngnd = gnd[:2000, :]\n\nsource_data = np.hstack((fea, gnd))\nprint(source_data.shape)\n\n\n#%%\n# 将降维后的数据可视化,2维\ndef plot_embedding_2d(X, title=None):\n y = X[:, -1]\n x = X[:, :-1]\n # 坐标缩放到[0,1]区间\n x_min, x_max = np.min(x, axis=0), np.max(x, axis=0)\n x = (x - x_min) / (x_max - x_min)\n\n # 降维后的坐标为(X[i, 0], X[i, 1]),在该位置画出对应的digits\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n for i in range(x.shape[0]):\n ax.text(x[i, 0], x[i, 1], str(int(y[i])),\n color=plt.cm.Set1(y[i] / 10.),\n fontdict={'weight': 'bold', 'size': 4})\n\n if title is not None:\n plt.title(title)\n\n\n#%%\n# 将降维后的数据可视化,3维\ndef plot_embedding_3d(X, title=None):\n y = X[:, -1]\n x = X[:, :-1]\n # 坐标缩放到[0,1]区间\n x_min, x_max = np.min(x, axis=0), np.max(x, axis=0)\n x = (x - x_min) / (x_max - x_min)\n\n # 降维后的坐标为(X[i, 0], X[i, 1],X[i,2]),在该位置画出对应的digits\n fig = plt.figure()\n ax = Axes3D(fig)\n for i in range(x.shape[0]):\n ax.text(x[i, 0], x[i, 1], x[i, 2], str(int(y[i])),\n color=plt.cm.Set1(y[i] / 10.),\n fontdict={'weight': 'bold', 'size': 4})\n\n if title is not None:\n plt.title(title)\n\n\n#%%\n# t-SNE\nprint(\"Computing t-SNE embedding\")\ntsne = manifold.TSNE(n_components=3, init='pca', random_state=0)\nt0 = time()\nX_tsne = tsne.fit_transform(source_data[:, :-1])\nprint(X_tsne.shape)\ntsne_data = np.hstack((X_tsne, gnd))\nplot_embedding_2d(tsne_data, \"t-SNE 2D\")\nplot_embedding_3d(tsne_data, \"t-SNE 3D\")\n\nplt.show()\n", "sub_path": "T-SNE/tsne_usps.py", "file_name": "tsne_usps.py", "file_ext": "py", "file_size_in_byte": 1980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scipy.io.io.loadmat", "line_number": 10, "usage_type": "call"}, {"api_name": "scipy.io.io", "line_number": 10, "usage_type": "attribute"}, {"api_name": "scipy.io", "line_number": 10, "usage_type": "name"}, {"api_name": "numpy.hstack", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm.Set1", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.min", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.Axes3D", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.Set1", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 57, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}, {"api_name": "sklearn.manifold.TSNE", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.manifold", "line_number": 67, "usage_type": "name"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "280104747", "text": "from __future__ import print_function\nimport json\nimport random\nimport time\nimport logging\n\nfrom invoke import task, run\nfrom kafka.common import UnknownTopicOrPartitionError\nfrom kafka.client import KafkaClient\nfrom kafka.producer import SimpleProducer\nfrom six.moves import range\nfrom streamparse.ext.invoke import *\n\n\nlogging.basicConfig(format='%(asctime)-15s %(module)s %(name)s %(message)s')\nlog = logging.getLogger()\n\ndef retry(tries, delay=3, backoff=2, safe_exc_types=None):\n \"\"\"Retry a function call.\"\"\"\n if safe_exc_types is None:\n # By default, all exception types are \"safe\" and retried\n safe_exc_types = (Exception,)\n\n def decorator(func):\n def wrapper(*args, **kwargs):\n mtries, mdelay = tries, delay\n\n while mtries > 0:\n try:\n return func(*args, **kwargs)\n except Exception as e:\n if not isinstance(e, safe_exc_types):\n raise e\n\n\n mtries -= 1\n time.sleep(mdelay)\n mdelay *= backoff\n wrapper.__doc__ = func.__doc__\n wrapper.__name__ = func.__name__\n return wrapper\n return decorator\n\n\ndef random_pixel_generator():\n urls = (\n \"http://example.com/\",\n \"http://example.com/article1\",\n \"http://example.com/article2\",\n \"http://example.com/article3\")\n while True:\n ip = \"192.168.0.{}\".format(random.randint(0, 255))\n url = random.choice(urls)\n ts = int(time.time() + random.randint(0, 30))\n yield {\n \"ip\": ip,\n \"url\": url,\n \"ts\": ts,\n }\n\n\n@task\n@retry(2, safe_exc_types=(UnknownTopicOrPartitionError,))\ndef seed_kafka(kafka_hosts=None, topic_name=None, num_pixels=100000):\n \"\"\"Seed the local Kafka cluster's \"pixels\" topic with sample pixel data.\"\"\"\n topic_name = topic_name or \"pixels\"\n kafka_hosts = kafka_hosts or \"streamparse-box:9092\"\n\n kafka = KafkaClient(kafka_hosts)\n producer = SimpleProducer(kafka)\n # producer = SimpleProducer(kafka, batch_send=True, batch_send_every_n=1000,\n # batch_send_every_t=5)\n\n print(\"Seeding Kafka ({}) topic '{}' with {:,} fake pixels.\"\n .format(kafka_hosts, topic_name, num_pixels))\n pixels = random_pixel_generator()\n for i in range(num_pixels):\n pixel = json.dumps(next(pixels)).encode(\"utf-8\", \"ignore\")\n producer.send_messages(topic_name, pixel)\n print(\"Done.\")\n\n", "sub_path": "examples/kafka-jvm/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 2524, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.basicConfig", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 37, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 52, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 53, "usage_type": "call"}, {"api_name": "time.time", "line_number": 54, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 54, "usage_type": "call"}, {"api_name": "kafka.common", "line_number": 69, "usage_type": "name"}, {"api_name": "kafka.client.KafkaClient", "line_number": 69, "usage_type": "call"}, {"api_name": "kafka.producer.SimpleProducer", "line_number": 70, "usage_type": "call"}, {"api_name": "kafka.common", "line_number": 70, "usage_type": "argument"}, {"api_name": "six.moves.range", "line_number": 77, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 78, "usage_type": "call"}, {"api_name": "invoke.task", "line_number": 62, "usage_type": "name"}, {"api_name": "kafka.common.UnknownTopicOrPartitionError", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "338996828", "text": "from flask import Flask\nfrom config import *\nfrom flask import *\nimport requests\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello():\n return \"Welcome to Ty redis page!\"\n\n\n@app.route(\"/index\" , methods=['POST', 'GET'])\ndef index():\n res = \"tingyun\"\n if request.method == 'POST':\n operate_type = request.form['type']\n if operate_type == \"get\":\n ret = requests.post('http://127.0.0.1:18888/snapchat/snapchat' , data = { 'Type': 'get'})\n if ret.status_code == 200:\n res = ret.content.decode()\n else:\n res = \"operator get banned_key 【sc:hash:banned:user】 failed\"\n\n elif operate_type == \"add\":\n uid = request.form['uid']\n res = requests.post('http://127.0.0.1:18888/snapchat/snapchat' , data = { 'Type': 'add' , 'uid' : uid}) \n if res.status_code == 200:\n res = \"operator add uid 【%s】 ok\"%(uid)\n else:\n res = \"operator add uid 【%s】 failed\"%(uid)\n\n\n elif operate_type == \"remove\":\n uid = request.form['uid']\n res = requests.post('http://127.0.0.1:18888/snapchat/snapchat' , data = { 'Type': 'remove' , 'uid' : uid})\n if res.status_code == 200:\n res = \"operator remove uid 【%s】 ok\"%(uid)\n else:\n res = \"operator remove uid 【%s】 failed\"%(uid)\n\n elif operate_type == \"search\":\n uid = request.form['uid']\n ret = requests.post('http://127.0.0.1:18888/snapchat/snapchat' , data = { 'Type': 'search' , 'uid' : uid})\n if ret.status_code == 200:\n res = ret.content.decode()\n else:\n res = \"operator search uid 【%s】 failed\"%(uid)\n\n else:\n res = \"method not valid\"\n\n return render_template('index.html', data=res)\n\n\nif __name__ == '__main__':\n app.jinja_env.auto_reload = True\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n app.run(host='127.0.0.1', port=18889 ,debug=True)\n\n", "sub_path": "Ty_redis.py", "file_name": "Ty_redis.py", "file_ext": "py", "file_size_in_byte": 2038, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 36, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "226841387", "text": "#!/usr/bin/env python\n\"\"\"Uploader demo for Zenodo.\n\nTo Use\n------\nYou must set / export two environment variables for access to Zenodo;\n\n```\nexport ZENODO_TOKEN_PROD=\nexport ZENODO_TOKEN_DEV=\n```\n\nNote: This script will yell loudly if the requested token is unset.\n\nNow, you can then upload the sample data to the development site:\n```\n$ python scripts/archive_collection.py \\\n data/proceedings.json \\\n data/conferences.json \\\n dev \\\n --verbose 50 \\\n --num_cpus -2 \\\n --max_items 10\n```\n\"\"\"\nimport argparse\nimport io\nfrom joblib import Parallel, delayed\nimport json\nimport logging\nimport os\nimport random\nimport requests\nimport sys\n\nimport zen\n\nlogger = logging.getLogger(\"fetch\")\n\nMETA = {\n \"license\": \"CC-BY-4.0\",\n \"access_right\": \"open\",\n \"description\": \"

    \",\n \"communities\": [{\"identifier\": \"ismir\"}],\n \"imprint_publisher\": \"ISMIR\",\n \"upload_type\": \"publication\",\n \"publication_type\": \"conferencepaper\"\n}\n\n\ndef build_record(record, conferences):\n record = dict(**record)\n key = record['conference_acronym']\n meta = META.copy()\n meta.update(**conferences[key])\n meta[\"partof_title\"] = meta.pop('conference_title')\n meta['conference_title'] = meta[\"partof_title\"].split(\"the \")[-1]\n meta[\"imprint_place\"] = meta[\"conference_place\"]\n res = requests.get(record.pop('pdf_url'))\n fp = io.BytesIO(res.content)\n meta.update(**record)\n return os.path.basename(res.url), fp, meta\n\n\ndef upload(record, conferences, stage):\n \"\"\"Upload a file / metadata pair to a Zenodo stage.\n\n Parameters\n ----------\n filename : str\n Path to a local file on disk.\n TODO: Could be a generic URI, to allow webscraping at the same time.\n\n metadata : dict\n Metadata associated with the resource.\n\n stage : str\n One of [dev, prod]; defines the deployment area to use.\n\n zid : str, default=None\n If provided, attempts to update the resource for the given Zenodo ID.\n \"\"\"\n if not record['pdf_url'].lower().endswith('.pdf'):\n return\n\n fname, fp, meta = build_record(record, conferences)\n zid = zen.create_id(stage=stage)\n zen.upload_file(zid, fname, fp=fp, stage=stage)\n zen.update_metadata(zid, meta, stage=stage)\n return zen.publish(zid, stage=stage).get('submitted', False)\n\n\ndef archive(proceedings, conferences, stage, num_cpus=-2, verbose=0):\n pool = Parallel(n_jobs=num_cpus, verbose=verbose)\n fx = delayed(upload)\n return all(pool(fx(rec, conferences, stage) for rec in proceedings))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n parser = argparse.ArgumentParser(description=__doc__)\n\n # Inputs\n parser.add_argument(\"proceedings\",\n metavar=\"proceedings\", type=str,\n help=\"Path to proceedings records.\")\n parser.add_argument(\"conferences\",\n metavar=\"conferences\", type=str,\n help=\"Path to a JSON file of conference metadata.\")\n parser.add_argument(\"stage\",\n metavar=\"stage\", type=str,\n help=\"Stage to execute.\")\n parser.add_argument(\"--num_cpus\",\n metavar=\"num_cpus\", type=int, default=-2,\n help=\"Number of CPUs to use in parallel.\")\n parser.add_argument(\"--verbose\",\n metavar=\"verbose\", type=int, default=0,\n help=\"Verbosity level for joblib.\")\n parser.add_argument(\"--max_items\",\n metavar=\"max_items\", type=int, default=None,\n help=\"Maximum number of items to upload.\")\n args = parser.parse_args()\n proceedings = [rec for year in json.load(open(args.proceedings))\n for rec in year]\n conferences = json.load(open(args.conferences))\n\n if args.max_items is not None:\n random.shuffle(proceedings)\n proceedings = proceedings[:args.max_items]\n\n success = archive(proceedings, conferences, args.stage,\n args.num_cpus, args.verbose)\n sys.exit(0 if success else 1)\n", "sub_path": "scripts/archive_collection.py", "file_name": "archive_collection.py", "file_ext": "py", "file_size_in_byte": 4134, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 59, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "zen.create_id", "line_number": 87, "usage_type": "call"}, {"api_name": "zen.upload_file", "line_number": 88, "usage_type": "call"}, {"api_name": "zen.update_metadata", "line_number": 89, "usage_type": "call"}, {"api_name": "zen.publish", "line_number": 90, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 94, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 95, "usage_type": "call"}, {"api_name": "logging.basicConfig", "line_number": 100, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 100, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 101, "usage_type": "call"}, {"api_name": "json.load", "line_number": 123, "usage_type": "call"}, {"api_name": "json.load", "line_number": 125, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 128, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "470409811", "text": "import logging\nimport warnings\nfrom typing import Union\n\nfrom _pytest.config import Config\nfrom pydantic import ValidationError\n\nfrom pytest_zebrunner.hooks import PytestHooks, PytestXdistHooks\nfrom pytest_zebrunner.settings import load_settings\n\nlogger = logging.getLogger(__name__)\n\n\ndef pytest_configure(config: Config) -> None:\n\n try:\n settings = load_settings()\n except ValidationError as exc:\n field_errors = \"\\n\".join([f\"\\033[93m {e['loc'][0]}\\033[0m - {e['msg']}\" for e in exc.errors()])\n warnings.warn(\n UserWarning(\n \"\\033[1;31m Zebrunner plugin not configured properly because missing required config options.\\n\"\n \"Add it to environment variables or .env file.\\n\" + field_errors + \"\\n\" * 3\n )\n )\n return\n\n if settings.enabled:\n hooks: Union[PytestHooks, PytestXdistHooks]\n if config.pluginmanager.has_plugin(\"xdist\") and any([x == \"-n\" for x in config.invocation_params.args]):\n hooks = PytestXdistHooks()\n else:\n hooks = PytestHooks()\n\n config.pluginmanager.register(hooks)\n config.addinivalue_line(\"markers\", \"maintainer(name): Email or nickname of test maintainer\")\n config.addinivalue_line(\"markers\", \"label(name, value): Test label\")\n", "sub_path": "src/pytest_zebrunner/plugin.py", "file_name": "plugin.py", "file_ext": "py", "file_size_in_byte": 1310, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 11, "usage_type": "call"}, {"api_name": "_pytest.config.Config", "line_number": 14, "usage_type": "name"}, {"api_name": "pytest_zebrunner.settings.load_settings", "line_number": 17, "usage_type": "call"}, {"api_name": "pydantic.ValidationError", "line_number": 18, "usage_type": "name"}, {"api_name": "warnings.warn", "line_number": 20, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest_zebrunner.hooks.PytestHooks", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest_zebrunner.hooks.PytestXdistHooks", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest_zebrunner.hooks.PytestXdistHooks", "line_number": 31, "usage_type": "call"}, {"api_name": "pytest_zebrunner.hooks.PytestHooks", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "367369053", "text": "# -*- coding: utf8 -*-\n\nimport os\nfrom time import sleep\nimport inspect\n\nimport common.HTMLTestRunner as HTMLTestRunner\nimport common.xutils as xutils\n\nimport unittest\nfrom appium import webdriver\n\n\ndirectory = '%s/' % os.getcwd()\n\nclass AndroidSimulatorTestCase(unittest.TestCase):\n \"Android simulator测试用例\"\n\n def setUp(self):\n \"Setup for the test\"\n desired_caps = {}\n desired_caps['platformName'] = 'Android'\n desired_caps['platformVersion'] = '4.4.2'\n desired_caps['deviceName'] = 'emulator-5554'\n desired_caps['udid'] = 'emulator-5554' ###非常重要,多android设备时需要指定,否则多个测试用例会都运行在某一个设备上\n #desired_caps['app'] = os.path.abspath('./apps/TestApp.app')\n desired_caps['appPackage'] = 'com.android.calculator2'\n desired_caps['appActivity'] = '.Calculator'\n self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)\n self.driver.implicitly_wait(10)\n \n elements = self.driver.find_elements_by_id(\"com.android.calculator2:id/clear\")\n num_digits = len(self.driver.find_element_by_class_name(\"android.widget.EditText\").text)\n if(len(elements) == 1): #has clear button, clear it\n elements[0].click()\n elif(len(elements) == 0 and num_digits > 0): #already has digits displayed, then clear it\n for i in range(num_digits):\n self.driver.find_element_by_id(\"com.android.calculator2:id/del\").click()\n \n def tearDown(self):\n \"Tear down the test\"\n self.driver.quit()\n\n def test_delete(self):\n \"测试消除功能\"\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit1\").click()\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit2\").click()\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit3\").click()\n pic_path1 = directory + 'result/image/' + inspect.stack()[0][3] + '_before.png'\n self.driver.save_screenshot(pic_path1) \n\n self.driver.find_element_by_id(\"com.android.calculator2:id/del\").click()\n self.assertEqual('12', self.driver.find_element_by_class_name(\"android.widget.EditText\").text)\n pic_path2 = directory + 'result/image/' + inspect.stack()[0][3] + '_after.png'\n self.driver.save_screenshot(pic_path2)\n \n #合并图片并生成一张新图片,用作报告显示\n new_img_path = directory+'result/image/' + inspect.stack()[0][3] + '_combined.png'\n #必须要打印路径HTMLTestRunner才能捕获并且生成路径,result/image/*.png 是获取路径的条件,必须这样的目录\n print(new_img_path) \n xutils.combine_images([pic_path1,pic_path2], new_img_path)\n\n def test_plus(self):\n \"测试加法功能\"\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit9\").click()\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit5\").click()\n self.driver.find_element_by_id(\"com.android.calculator2:id/plus\").click()\n self.driver.find_element_by_id(\"com.android.calculator2:id/digit5\").click()\n pic_path1 = directory + 'result/image/' + inspect.stack()[0][3] + '_before.png'\n self.driver.save_screenshot(pic_path1) \n\n self.driver.find_element_by_id(\"com.android.calculator2:id/equal\").click()\n self.assertEqual('100', self.driver.find_element_by_class_name(\"android.widget.EditText\").text)\n pic_path2 = directory + 'result/image/' + inspect.stack()[0][3] + '_after.png'\n self.driver.save_screenshot(pic_path2)\n\n #合并图片并生成一张新图片,用作报告显示\n new_img_path = directory+'result/image/' + inspect.stack()[0][3] + '_combined.png' \n #必须要打印路径HTMLTestRunner才能捕获并且生成路径,result/image/*.png 是获取路径的条件,必须这样的目录\n print (new_img_path)\n xutils.combine_images([pic_path1,pic_path2], new_img_path)\n \n\n#---START OF SCRIPT\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(AndroidSimulatorTestCase)\n #unittest.TextTestRunner(verbosity=2).run(suite)\n \n #确定生成报告的路径\n filePath = directory + \"AppiumTestsResult_\" + os.path.basename(__file__).split('.')[0] + \".html\"\n fp = open(filePath,'wb')\n #生成报告的Title,描述\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp,title='测试报告',description='测试结果')\n runner.run(suite)\n", "sub_path": "tests/appium_test_android_simulator.py", "file_name": "appium_test_android_simulator.py", "file_ext": "py", "file_size_in_byte": 4575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.getcwd", "line_number": 14, "usage_type": "call"}, {"api_name": "unittest.TestCase", "line_number": 16, "usage_type": "attribute"}, {"api_name": "appium.webdriver.Remote", "line_number": 29, "usage_type": "call"}, {"api_name": "appium.webdriver", "line_number": 29, "usage_type": "name"}, {"api_name": "inspect.stack", "line_number": 49, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 54, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 58, "usage_type": "call"}, {"api_name": "common.xutils.combine_images", "line_number": 61, "usage_type": "call"}, {"api_name": "common.xutils", "line_number": 61, "usage_type": "name"}, {"api_name": "inspect.stack", "line_number": 69, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 74, "usage_type": "call"}, {"api_name": "inspect.stack", "line_number": 78, "usage_type": "call"}, {"api_name": "common.xutils.combine_images", "line_number": 81, "usage_type": "call"}, {"api_name": "common.xutils", "line_number": 81, "usage_type": "name"}, {"api_name": "unittest.TestLoader", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "common.HTMLTestRunner.HTMLTestRunner", "line_number": 93, "usage_type": "call"}, {"api_name": "common.HTMLTestRunner", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "537543072", "text": "\"\"\"\nTests for METADB ETL jobs\n\"\"\"\n\nimport os\nfrom mock import patch\nimport json\nfrom deepdiff import DeepDiff\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom beagle_etl.models import SMILEMessage\nfrom beagle_etl.models import JobGroup, JobGroupNotifier, Notifier\nfrom beagle_etl.jobs.metadb_jobs import update_request_job, update_sample_job, new_request\nfrom django.core.management import call_command\nfrom file_system.models import Request, Sample, Patient, FileMetadata\nfrom file_system.repository import FileRepository\nfrom study.objects import StudyObject\n\n\nclass TestNewRequest(TestCase):\n fixtures = [\n \"file_system.filegroup.json\",\n \"file_system.filetype.json\",\n \"file_system.storage.json\",\n \"beagle_etl.operator.json\",\n \"runner.pipeline.json\",\n ]\n\n def setUp(self):\n self.request_keys = [\n \"piEmail\",\n \"projectManagerName\",\n \"labHeadName\",\n \"labHeadEmail\",\n \"investigatorName\",\n \"investigatorEmail\",\n \"dataAnalystName\",\n \"dataAnalystEmail\",\n \"otherContactEmails\",\n \"dataAccessEmails\",\n \"qcAccessEmails\",\n settings.REQUEST_ID_METADATA_KEY,\n settings.PROJECT_ID_METADATA_KEY,\n settings.RECIPE_METADATA_KEY,\n ]\n self.notifier = Notifier.objects.create(notifier_type=\"JIRA\", board=\"TEST\")\n self.job_group = JobGroup.objects.create()\n self.job_group_notifier = JobGroupNotifier.objects.create(job_group=self.job_group, notifier_type=self.notifier)\n self.file_keys = [\"R\"]\n test_files_fixture = os.path.join(settings.TEST_FIXTURE_DIR, \"10075_D_2.file.json\")\n call_command(\"loaddata\", test_files_fixture, verbosity=0)\n test_files_fixture = os.path.join(settings.TEST_FIXTURE_DIR, \"10075_D_2.filemetadata.json\")\n call_command(\"loaddata\", test_files_fixture, verbosity=0)\n new_request_json_path = os.path.join(settings.TEST_FIXTURE_DIR, \"10075_D_2.update.json\")\n call_command(\"loaddata\", test_files_fixture, verbosity=0)\n update_sample_json_path = os.path.join(settings.TEST_FIXTURE_DIR, \"10075_D_2.update_sample.json\")\n call_command(\"loaddata\", test_files_fixture, verbosity=0)\n update_request_json_path = os.path.join(settings.TEST_FIXTURE_DIR, \"10075_D_request_update.json\")\n with open(update_sample_json_path) as new_sample_json_file:\n self.new_sample_data = json.load(new_sample_json_file)\n self.new_sample_data_str = json.dumps(self.new_sample_data)\n with open(new_request_json_path) as new_request_json_file:\n self.new_request_data = json.load(new_request_json_file)\n\n self.request_data_str = json.dumps(self.new_request_data)\n with open(update_request_json_path) as update_request_json_file:\n self.update_request_data = json.load(update_request_json_file)\n self.update_request_str = json.dumps(self.update_request_data)\n\n test_new_request_08944_B = os.path.join(settings.TEST_FIXTURE_DIR, \"08944_B_new_request.json\")\n with open(test_new_request_08944_B) as new_request_08944_B:\n self.new_request = json.load(new_request_08944_B)\n self.new_request_str = json.dumps(self.new_request)\n\n test_new_request_14269_C = os.path.join(settings.TEST_FIXTURE_DIR, \"14269_C_new_request.json\")\n with open(test_new_request_14269_C) as new_request_14269_C:\n self.new_request_14269_C = json.load(new_request_14269_C)\n self.new_request_14269_C_str = json.dumps(self.new_request_14269_C)\n\n test_14269_C_1_update_sample = os.path.join(settings.TEST_FIXTURE_DIR, \"14269_C_1_update_sample.json\")\n with open(test_14269_C_1_update_sample) as update_sample_14269_C_1:\n self.update_sample_14269_C_1 = json.load(update_sample_14269_C_1)\n self.update_sample_14269_C_1_str = json.dumps(self.update_sample_14269_C_1)\n\n test_14269_C_1_update_sample_new_files = os.path.join(\n settings.TEST_FIXTURE_DIR, \"14269_C_1_update_sample_new_files.json\"\n )\n with open(test_14269_C_1_update_sample_new_files) as update_sample_14269_C_1_new_files:\n self.update_sample_14269_C_1_new_files = json.load(update_sample_14269_C_1_new_files)\n self.update_sample_14269_C_1_new_files_str = json.dumps(self.update_sample_14269_C_1_new_files)\n\n self.etl_user = User.objects.create_superuser(\"ETL\", \"voyager-etl@mskcc.org\", \"password\")\n settings.ETL_USER = self.etl_user.username\n\n @patch(\"beagle_etl.jobs.metadb_jobs.check_files_permissions\")\n @patch(\"notifier.tasks.notifier_start\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n def test_new_request(\n self,\n populate_job_group_notifier_metadata,\n job_group_notifier_get,\n send_notification,\n notifier_start,\n check_files_permissions,\n ):\n populate_job_group_notifier_metadata.return_value = None\n job_group_notifier_get.return_value = self.job_group_notifier\n notifier_start.return_value = True\n send_notification.return_value = True\n check_files_permissions.return_value = True\n settings.NOTIFIER_ACTIVE = False\n msg = SMILEMessage.objects.create(topic=\"new-request\", message=self.new_request_str)\n new_request(str(msg.id))\n request = Request.objects.filter(request_id=\"08944_B\")\n sample_1 = Sample.objects.filter(sample_id=\"08944_B_1\")\n sample_2 = Sample.objects.filter(sample_id=\"08944_B_2\")\n sample_3 = Sample.objects.filter(sample_id=\"08944_B_3\")\n sample_4 = Sample.objects.filter(sample_id=\"08944_B_4\")\n patient_1 = Patient.objects.filter(patient_id=\"C-MP76JR\")\n patient_2 = Patient.objects.filter(patient_id=\"C-4LM16H\")\n self.assertEqual(request.count(), 1)\n self.assertEqual(sample_1.count(), 1)\n self.assertEqual(sample_2.count(), 1)\n self.assertEqual(sample_3.count(), 1)\n self.assertEqual(sample_4.count(), 1)\n self.assertTrue(patient_1.count(), 1)\n self.assertTrue(patient_2.count(), 1)\n study = StudyObject.get_by_request(\"08944_B\")\n self.assertIsNotNone(study)\n self.assertListEqual(list(study[0].requests), list(request.all()))\n files = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"08944_B\"})\n self.assertEqual(files.count(), 8)\n request = request.first()\n samples = Sample.objects.filter(sample_id__startswith=\"08944_B\").order_by(\"created_date\").all()\n for sample in samples:\n self.assertEqual(sample.request_id, request.request_id)\n self.assertListEqual(study[0].samples, list(samples))\n\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n @patch(\"beagle_etl.jobs.metadb_jobs.create_request_callback_instance\")\n def test_update_request_metadata(\n self, request_callback, populate_job_group, send_notification, jobGroupNotifierObjectGet\n ):\n \"\"\"\n Test if request metadata update is properly updating fields\n \"\"\"\n request_keys = [\n \"piEmail\",\n \"projectManagerName\",\n \"labHeadName\",\n \"labHeadEmail\",\n \"investigatorName\",\n \"investigatorEmail\",\n \"dataAnalystName\",\n \"dataAnalystEmail\",\n \"otherContactEmails\",\n \"dataAccessEmails\",\n \"qcAccessEmails\",\n ]\n with self.settings(ETL_USER=str(self.etl_user.username)):\n request_callback.return_value = None\n populate_job_group.return_value = None\n jobGroupNotifierObjectGet.return_value = None\n send_notification.return_value = None\n msg = SMILEMessage.objects.create(topic=\"update_request\", message=self.update_request_str)\n update_request_job(str(msg.id))\n files = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\"})\n for file in files:\n self.assertEqual(\n file.metadata[settings.REQUEST_ID_METADATA_KEY],\n json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\"requestId\"],\n )\n self.assertEqual(\n file.metadata[settings.PROJECT_ID_METADATA_KEY],\n json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\"projectId\"],\n )\n self.assertEqual(\n file.metadata[settings.RECIPE_METADATA_KEY],\n json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[settings.LIMS_RECIPE_METADATA_KEY],\n )\n for single_request_key in request_keys:\n self.assertEqual(\n file.metadata[single_request_key],\n json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[single_request_key],\n )\n\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n @patch(\"beagle_etl.jobs.metadb_jobs.create_request_callback_instance\")\n @patch(\"beagle_etl.jobs.metadb_jobs._generate_ticket_description\")\n def test_update_request_ticket(\n self, ticket_description, request_callback, populate_job_group, send_notification, jobGroupNotifierObjectGet\n ):\n \"\"\"\n Test that generate ticket is called properly in update request\n \"\"\"\n with self.settings(ETL_USER=str(self.etl_user.username)):\n request_callback.return_value = None\n populate_job_group.return_value = None\n jobGroupNotifierObjectGet.return_value = None\n send_notification.return_value = None\n msg = SMILEMessage.objects.create(topic=\"update_request\", message=self.update_request_str)\n update_request_job(str(msg.id))\n files = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\"})\n sample_names = []\n for file in files:\n sample_name = file.metadata[settings.SAMPLE_ID_METADATA_KEY]\n if sample_name not in sample_names:\n sample_names.append(sample_name)\n ticket_description.assert_called_once()\n call_args = ticket_description.call_args[0]\n self.assertEqual(call_args[0], \"10075_D_2\")\n self.assertEqual(len(call_args[3]), len(sample_names))\n request_metadata = call_args[5]\n for single_request_key in self.request_keys:\n self.assertEqual(file.metadata[single_request_key], request_metadata[single_request_key])\n\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n @patch(\"beagle_etl.jobs.metadb_jobs.create_request_callback_instance\")\n def test_update_request_sample(\n self, request_callback, populate_job_group, send_notification, jobGroupNotifierObjectGet\n ):\n \"\"\"\n Test that the samples metadata are not updated when only update request is called\n \"\"\"\n with self.settings(ETL_USER=str(self.etl_user.username)):\n request_callback.return_value = None\n populate_job_group.return_value = None\n jobGroupNotifierObjectGet.return_value = None\n send_notification.return_value = None\n sample_metadata = {}\n sample_files = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\"})\n for single_file in sample_files:\n sample_name = single_file.metadata[settings.SAMPLE_ID_METADATA_KEY]\n if sample_name not in sample_metadata:\n sample_metadata[sample_name] = single_file.metadata\n msg = SMILEMessage.objects.create(topic=\"update_request\", message=self.update_request_str)\n update_request_job(str(msg.id))\n files = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\"})\n for file in files:\n metadata_keys = file.metadata.keys()\n sample_name = file.metadata[settings.SAMPLE_ID_METADATA_KEY]\n for single_metadata_key in metadata_keys:\n current_value = file.metadata[single_metadata_key]\n if single_metadata_key in self.file_keys:\n continue\n if single_metadata_key in self.request_keys:\n if single_metadata_key == settings.REQUEST_ID_METADATA_KEY:\n expected_value = json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\n \"requestId\"\n ]\n elif single_metadata_key == settings.PROJECT_ID_METADATA_KEY:\n expected_value = json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\n \"projectId\"\n ]\n elif single_metadata_key == settings.RECIPE_METADATA_KEY:\n expected_value = json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\n settings.LIMS_RECIPE_METADATA_KEY\n ]\n else:\n expected_value = json.loads(self.update_request_data[-1][\"requestMetadataJson\"])[\n single_metadata_key\n ]\n else:\n expected_value = sample_metadata[sample_name][single_metadata_key]\n self.assertEqual(current_value, expected_value)\n\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n @patch(\"beagle_etl.jobs.metadb_jobs.create_request_callback_instance\")\n def test_update_sample_preserve(\n self, request_callback, populate_job_group, send_notification, jobGroupNotifierObjectGet\n ):\n \"\"\"\n Test that other samples are not modified\n \"\"\"\n with self.settings(ETL_USER=str(self.etl_user.username)):\n request_callback.return_value = None\n populate_job_group.return_value = None\n jobGroupNotifierObjectGet.return_value = None\n send_notification.return_value = None\n sample_metadata = {}\n msg = SMILEMessage.objects.create(topic=\"update_sample\", message=self.new_sample_data_str)\n update_sample_job(str(msg.id))\n sample_files = FileRepository.filter(metadata={settings.SAMPLE_ID_METADATA_KEY: \"10075_D_2_3\"})\n for f in sample_files:\n self.assertEqual(f.metadata[\"sampleName\"], \"XXX002_P3_12345_L1\")\n\n @patch(\"beagle_etl.jobs.metadb_jobs.check_files_permissions\")\n @patch(\"notifier.tasks.notifier_start\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n def test_update_sample(\n self, populate_job_group, job_group_notifier_get, send_notification, notifier_start, check_files_permissions\n ):\n \"\"\"\n Test that sample metadata is updated properly\n \"\"\"\n populate_job_group.return_value = None\n send_notification.return_value = None\n job_group_notifier_get.return_value = self.job_group_notifier\n notifier_start.return_value = True\n send_notification.return_value = True\n check_files_permissions.return_value = True\n settings.NOTIFIER_ACTIVE = False\n\n new_request_msg = SMILEMessage.objects.create(request_id=\"14269_C\", message=self.new_request_14269_C_str)\n update_sample_msg = SMILEMessage.objects.create(\n request_id=\"14269_C_1\", message=self.update_sample_14269_C_1_str\n )\n\n new_request(new_request_msg.id)\n tumor_or_normal = FileRepository.filter(\n metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"},\n values_metadata=settings.TUMOR_OR_NORMAL_METADATA_KEY,\n ).first()\n self.assertEqual(tumor_or_normal, \"Normal\")\n update_sample_job(update_sample_msg.id)\n tumor_or_normal = FileRepository.filter(\n metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"},\n values_metadata=settings.TUMOR_OR_NORMAL_METADATA_KEY,\n ).first()\n self.assertEqual(tumor_or_normal, \"Tumor\")\n\n files = FileRepository.filter(metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"})\n self.assertEqual(len(files), 2)\n\n for file in files:\n old_file = FileMetadata.objects.get(file__path=file.file.path, latest=False)\n ddiff = DeepDiff(old_file.metadata, file.metadata, ignore_order=True)\n print(ddiff)\n self.assertIsNotNone(ddiff)\n self.assertEqual(ddiff[\"values_changed\"][\"root['tumorOrNormal']\"][\"new_value\"], \"Tumor\")\n self.assertEqual(ddiff[\"values_changed\"][\"root['tumorOrNormal']\"][\"old_value\"], \"Normal\")\n self.assertEqual(list(ddiff.keys()), [\"values_changed\"])\n\n @patch(\"beagle_etl.jobs.metadb_jobs.check_files_permissions\")\n @patch(\"notifier.tasks.notifier_start\")\n @patch(\"notifier.tasks.send_notification.delay\")\n @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n def test_update_sample_new_fastqs(\n self, populate_job_group, job_group_notifier_get, send_notification, notifier_start, check_files_permissions\n ):\n \"\"\"\n Test sample updates for new fastqs\n \"\"\"\n populate_job_group.return_value = None\n send_notification.return_value = None\n job_group_notifier_get.return_value = self.job_group_notifier\n notifier_start.return_value = True\n send_notification.return_value = True\n check_files_permissions.return_value = True\n settings.NOTIFIER_ACTIVE = False\n\n new_request_msg = SMILEMessage.objects.create(request_id=\"14269_C\", message=self.new_request_14269_C_str)\n update_sample_msg = SMILEMessage.objects.create(\n request_id=\"14269_C_1\", message=self.update_sample_14269_C_1_new_files_str\n )\n\n new_request(new_request_msg.id)\n tumor_or_normal = FileRepository.filter(\n metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"},\n values_metadata=settings.TUMOR_OR_NORMAL_METADATA_KEY,\n ).first()\n self.assertEqual(tumor_or_normal, \"Normal\")\n update_sample_job(update_sample_msg.id)\n tumor_or_normal = FileRepository.filter(\n metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"},\n values_metadata=settings.TUMOR_OR_NORMAL_METADATA_KEY,\n ).first()\n self.assertEqual(tumor_or_normal, \"Tumor\")\n\n files = FileRepository.filter(metadata={settings.SAMPLE_ID_METADATA_KEY: \"14269_C_1\"})\n self.assertEqual(len(files), 2)\n\n for file in files:\n self.assertTrue(file.file.path.endswith(\"new.fastq.gz\"))\n\n # @patch(\"notifier.models.JobGroupNotifier.objects.get\")\n # @patch(\"notifier.tasks.send_notification.delay\")\n # @patch(\"file_system.tasks.populate_job_group_notifier_metadata.delay\")\n # def test_update_sample_update(self, populate_job_group, send_notification, jobGroupNotifierObjectGet):\n # \"\"\"\n # Test that samples metadata is properly updated\n # \"\"\"\n # populate_job_group.return_value = None\n # jobGroupNotifierObjectGet.return_value = None\n # send_notification.return_value = None\n # sample_metadata = {}\n # file = FileRepository.filter(metadata={settings.REQUEST_ID_METADATA_KEY: \"10075_D_2\"}).first()\n # fastq = file.file.path\n # for single_key in file.metadata:\n # sample_metadata[single_key] = file.metadata[single_key]\n # request_data = copy.deepcopy(self.new_request_data)\n # sample_update_list = request_data[\"samples\"]\n # first_sample_update = sample_update_list[0]\n # first_sample_update[\"libraries\"][0][\"runs\"][0][\"fastqs\"] = [fastq]\n # request_data[\"samples\"] = [first_sample_update]\n # for single_key in request_data:\n # if single_key != \"samples\":\n # sample_metadata[single_key] = request_data[single_key]\n # sample_data = request_data[\"samples\"][0]\n # for single_key in sample_data:\n # sample_metadata[single_key] = sample_data[single_key]\n # sample_metadata.update(sample_data[\"libraries\"][0])\n # sample_metadata.update(sample_data[\"libraries\"][0][\"runs\"][0])\n # request_data_str = json.dumps(request_data)\n # update_sample_job(request_data_str)\n # file = FileRepository.filter(path=fastq).first()\n # for single_key in file.metadata:\n # if single_key in self.file_keys or single_key not in sample_metadata:\n # continue\n # if single_key == \"ciTag\":\n # sample_name = sample_metadata[\"cmoSampleName\"]\n # expected_value = \"s_\" + sample_name.replace(\"-\", \"_\")\n # else:\n # expected_value = sample_metadata[single_key]\n # current_value = file.metadata[single_key]\n # self.assertEqual(current_value, expected_value)\n", "sub_path": "beagle_etl/tests/jobs/test_metadb.py", "file_name": "test_metadb.py", "file_ext": "py", "file_size_in_byte": 22047, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.test.TestCase", "line_number": 21, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 43, "usage_type": "name"}, {"api_name": "django.conf.settings.PROJECT_ID_METADATA_KEY", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 44, "usage_type": "name"}, {"api_name": "django.conf.settings.RECIPE_METADATA_KEY", "line_number": 45, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 45, "usage_type": "name"}, {"api_name": "beagle_etl.models.Notifier.objects.create", "line_number": 47, "usage_type": "call"}, {"api_name": "beagle_etl.models.Notifier.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.Notifier", "line_number": 47, "usage_type": "name"}, {"api_name": "beagle_etl.models.JobGroup.objects.create", "line_number": 48, "usage_type": "call"}, {"api_name": "beagle_etl.models.JobGroup.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.JobGroup", "line_number": 48, "usage_type": "name"}, {"api_name": "beagle_etl.models.JobGroupNotifier.objects.create", "line_number": 49, "usage_type": "call"}, {"api_name": "beagle_etl.models.JobGroupNotifier.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.JobGroupNotifier", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 51, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 51, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 53, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 53, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 55, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 57, "usage_type": "name"}, {"api_name": "django.core.management.call_command", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 59, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 59, "usage_type": "name"}, {"api_name": "json.load", "line_number": 61, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 62, "usage_type": "call"}, {"api_name": "json.load", "line_number": 64, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 66, "usage_type": "call"}, {"api_name": "json.load", "line_number": 68, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 71, "usage_type": "name"}, {"api_name": "json.load", "line_number": 73, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 76, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 76, "usage_type": "name"}, {"api_name": "json.load", "line_number": 78, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 81, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 81, "usage_type": "name"}, {"api_name": "json.load", "line_number": 83, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.conf.settings.TEST_FIXTURE_DIR", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "json.load", "line_number": 90, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 91, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.create_superuser", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 93, "usage_type": "name"}, {"api_name": "django.conf.settings.ETL_USER", "line_number": 94, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 94, "usage_type": "name"}, {"api_name": "django.conf.settings.NOTIFIER_ACTIVE", "line_number": 114, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 114, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 115, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 115, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 115, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.new_request", "line_number": 116, "usage_type": "call"}, {"api_name": "file_system.models.Request.objects.filter", "line_number": 117, "usage_type": "call"}, {"api_name": "file_system.models.Request.objects", "line_number": 117, "usage_type": "attribute"}, {"api_name": "file_system.models.Request", "line_number": 117, "usage_type": "name"}, {"api_name": "file_system.models.Sample.objects.filter", "line_number": 118, "usage_type": "call"}, {"api_name": "file_system.models.Sample.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "file_system.models.Sample", "line_number": 118, "usage_type": "name"}, {"api_name": "file_system.models.Sample.objects.filter", "line_number": 119, "usage_type": "call"}, {"api_name": "file_system.models.Sample.objects", "line_number": 119, "usage_type": "attribute"}, {"api_name": "file_system.models.Sample", "line_number": 119, "usage_type": "name"}, {"api_name": "file_system.models.Sample.objects.filter", "line_number": 120, "usage_type": "call"}, {"api_name": "file_system.models.Sample.objects", "line_number": 120, "usage_type": "attribute"}, {"api_name": "file_system.models.Sample", "line_number": 120, "usage_type": "name"}, {"api_name": "file_system.models.Sample.objects.filter", "line_number": 121, "usage_type": "call"}, {"api_name": "file_system.models.Sample.objects", "line_number": 121, "usage_type": "attribute"}, {"api_name": "file_system.models.Sample", "line_number": 121, "usage_type": "name"}, {"api_name": "file_system.models.Patient.objects.filter", "line_number": 122, "usage_type": "call"}, {"api_name": "file_system.models.Patient.objects", "line_number": 122, "usage_type": "attribute"}, {"api_name": "file_system.models.Patient", "line_number": 122, "usage_type": "name"}, {"api_name": "file_system.models.Patient.objects.filter", "line_number": 123, "usage_type": "call"}, {"api_name": "file_system.models.Patient.objects", "line_number": 123, "usage_type": "attribute"}, {"api_name": "file_system.models.Patient", "line_number": 123, "usage_type": "name"}, {"api_name": "study.objects", "line_number": 131, "usage_type": "name"}, {"api_name": "study.objects.StudyObject.get_by_request", "line_number": 131, "usage_type": "call"}, {"api_name": "study.objects.StudyObject", "line_number": 131, "usage_type": "name"}, {"api_name": "study.objects", "line_number": 132, "usage_type": "argument"}, {"api_name": "study.objects", "line_number": 133, "usage_type": "name"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 134, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 134, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 134, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 134, "usage_type": "name"}, {"api_name": "file_system.models.Sample.objects.filter", "line_number": 137, "usage_type": "call"}, {"api_name": "file_system.models.Sample.objects", "line_number": 137, "usage_type": "attribute"}, {"api_name": "file_system.models.Sample", "line_number": 137, "usage_type": "name"}, {"api_name": "study.objects", "line_number": 140, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 96, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 97, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 98, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 99, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 100, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 170, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 170, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 170, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_request_job", "line_number": 171, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 172, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 172, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 172, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 172, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 175, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 175, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 176, "usage_type": "call"}, {"api_name": "django.conf.settings.PROJECT_ID_METADATA_KEY", "line_number": 179, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 179, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 180, "usage_type": "call"}, {"api_name": "django.conf.settings.RECIPE_METADATA_KEY", "line_number": 183, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 183, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 184, "usage_type": "call"}, {"api_name": "django.conf.settings.LIMS_RECIPE_METADATA_KEY", "line_number": 184, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 184, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 189, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 142, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 143, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 144, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 145, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 208, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 208, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 208, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_request_job", "line_number": 209, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 210, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 210, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 210, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 210, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 213, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 213, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 192, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 193, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 194, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 195, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 196, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 240, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 240, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 240, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 240, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 242, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 242, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 245, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 245, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 245, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_request_job", "line_number": 246, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 247, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 247, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 247, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 247, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 250, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 250, "usage_type": "name"}, {"api_name": "django.conf.settings.REQUEST_ID_METADATA_KEY", "line_number": 256, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 256, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 257, "usage_type": "call"}, {"api_name": "django.conf.settings.PROJECT_ID_METADATA_KEY", "line_number": 260, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 260, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 261, "usage_type": "call"}, {"api_name": "django.conf.settings.RECIPE_METADATA_KEY", "line_number": 264, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 264, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 265, "usage_type": "call"}, {"api_name": "django.conf.settings.LIMS_RECIPE_METADATA_KEY", "line_number": 266, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 266, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 269, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 224, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 225, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 226, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 227, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 292, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 292, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 292, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_sample_job", "line_number": 293, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 294, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 294, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 294, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 294, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 276, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 277, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 278, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 279, "usage_type": "call"}, {"api_name": "django.conf.settings.NOTIFIER_ACTIVE", "line_number": 315, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 315, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 317, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 317, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 317, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 318, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 318, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 318, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.new_request", "line_number": 322, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 323, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 323, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 324, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 324, "usage_type": "name"}, {"api_name": "django.conf.settings.TUMOR_OR_NORMAL_METADATA_KEY", "line_number": 325, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 325, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_sample_job", "line_number": 328, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 329, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 329, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 330, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 330, "usage_type": "name"}, {"api_name": "django.conf.settings.TUMOR_OR_NORMAL_METADATA_KEY", "line_number": 331, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 331, "usage_type": "name"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 335, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 335, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 335, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 335, "usage_type": "name"}, {"api_name": "file_system.models.FileMetadata.objects.get", "line_number": 339, "usage_type": "call"}, {"api_name": "file_system.models.FileMetadata.objects", "line_number": 339, "usage_type": "attribute"}, {"api_name": "file_system.models.FileMetadata", "line_number": 339, "usage_type": "name"}, {"api_name": "deepdiff.DeepDiff", "line_number": 340, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 298, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 299, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 300, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 301, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 302, "usage_type": "call"}, {"api_name": "django.conf.settings.NOTIFIER_ACTIVE", "line_number": 364, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 364, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 366, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 366, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 366, "usage_type": "name"}, {"api_name": "beagle_etl.models.SMILEMessage.objects.create", "line_number": 367, "usage_type": "call"}, {"api_name": "beagle_etl.models.SMILEMessage.objects", "line_number": 367, "usage_type": "attribute"}, {"api_name": "beagle_etl.models.SMILEMessage", "line_number": 367, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.new_request", "line_number": 371, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 372, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 372, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 373, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 373, "usage_type": "name"}, {"api_name": "django.conf.settings.TUMOR_OR_NORMAL_METADATA_KEY", "line_number": 374, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 374, "usage_type": "name"}, {"api_name": "beagle_etl.jobs.metadb_jobs.update_sample_job", "line_number": 377, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 378, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 378, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 379, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 379, "usage_type": "name"}, {"api_name": "django.conf.settings.TUMOR_OR_NORMAL_METADATA_KEY", "line_number": 380, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 380, "usage_type": "name"}, {"api_name": "file_system.repository.FileRepository.filter", "line_number": 384, "usage_type": "call"}, {"api_name": "file_system.repository.FileRepository", "line_number": 384, "usage_type": "name"}, {"api_name": "django.conf.settings.SAMPLE_ID_METADATA_KEY", "line_number": 384, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 384, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 347, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 348, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 349, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 350, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 351, "usage_type": "call"}]} +{"seq_id": "116192407", "text": "from typing import List, Callable\nimport numpy as np\n\nfrom .base import NeuralNet, NeuralLayerFactory, NeuralLayer\n\n\nclass Sequential(NeuralNet):\n def __init__(self, isize: np.ndarray) -> None:\n self.isz = isize\n self.layers: List[NeuralLayer] = []\n\n def add(self, factory: NeuralLayerFactory) -> None:\n isize = self.isz\n if len(self.layers) > 0:\n isize = self.layers[-1].osize()\n layer = factory.set_isize(isize)\n self.layers.append(layer)\n\n def isize(self) -> np.ndarray:\n return self.isz\n\n def osize(self) -> np.ndarray:\n if len(self.layers) == 0:\n return np.array([0])\n else:\n return self.layers[-1].osize()\n\n def calc(self, X: np.ndarray) -> np.ndarray:\n for layer in self.layers:\n X, _ = layer.calc(X)\n return X\n\n def update_eta(self, update: Callable[[float], float]):\n for layer in self.layers:\n layer.update_eta(update)\n\n def train(self, X: np.ndarray, Y: np.ndarray) -> None:\n save = []\n for layer in self.layers:\n out, net = layer.calc(X)\n assert np.isfinite(out).all()\n assert np.isfinite(net).all()\n save.append((X, net, out))\n X = out\n assert Y.shape == X.shape\n dE = Y - X\n for layer in reversed(self.layers):\n X, net, out = save.pop()\n dE = layer.train(dE, out, net, X)\n assert np.isfinite(dE).all()\n", "sub_path": "src/ann/nnlib/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 1499, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "base.NeuralNet", "line_number": 7, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 8, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "base.NeuralLayer", "line_number": 10, "usage_type": "name"}, {"api_name": "base.NeuralLayerFactory", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 19, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 28, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 33, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 37, "usage_type": "attribute"}, {"api_name": "numpy.isfinite", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.isfinite", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "53705625", "text": "import utils.load_data as datama\nimport utils.attacker as attacker\nimport utils.tools as helper\nimport logging\nimport numpy as np\nimport foolbox\nfrom keras.utils import to_categorical\nimport argparse\nimport os\n\ndef compute_shuffle_idx(size):\n (x_train, y_train), (_, _), (_, _, num_class) = datama.getData('mnist')\n idx_mnist = np.arange(x_train.shape[0])\n np.random.shuffle(idx_mnist)\n\n (x_train, y_train), (_, _), (_, _, num_class) = datama.getData('cifar10')\n idx_cifar10 = np.arange(x_train.shape[0])\n np.random.shuffle(idx_cifar10)\n data = {'mnist':idx_mnist[:size], 'fashion_mnist':idx_mnist[:size], 'cifar10': idx_cifar10[:size]}\n if not os.path.isdir('./adv_data/'):\n os.mkdir('./adv_data/')\n np.save('./adv_data/data_index.npy', data)\n return idx_mnist[:size], idx_cifar10[:size]\n\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-m','--model', type=str,default='mlp')\n ap.add_argument('-d', '--dataset', type=str, default='mnist')\n #ap.add_argument('-s', '--size', type=int, default=10000)\n ap.add_argument('-a','--attacker', nargs='+')\n args = vars(ap.parse_args())\n\n #size = args['size']\n dataname = args['dataset']\n attack_methods = args['attacker']\n logger = logging.getLogger(__name__)\n logger.setLevel(level=logging.INFO)\n file_handler = logging.FileHandler('./attack_logs/%s_%s_attack.log'%(args['model'], dataname))\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n logger.info(\"Model %s, Dataset %s\"%(args['model'], dataname))\n #model_cifar10 = ['dense_net', 'netinnet', 'resnet', 'vgg']\n path = './cluster_results/model/'\n cifarmodelweights = {'dense_net': 'densenet_cifar10.h5',\n 'netinnet':'NetInNet_cifar10.h5', 'resnet':'ResNet_cifar10.h5',\n 'vgg':'vgg_cifar10.h5'}\n mnistmodelweights = {'mnist':{'deepxplore':'deepxplore_mnist.hdf5',\n 'lenet':'lenet_mnist.h5', 'mlp':'mlp_mnist.h5'},\n 'fashion_mnist':{'deepxplore':'deepxplore_fashion_mnist.hdf5',\n 'lenet':'lenet_fashion_mnist.h5',\n 'mlp':'mlp_fashion_mnist.h5'}}\n #model_mnist = ['mlp','deepxplore', 'lenet']\n datalist = ['mnist', 'fashion_mnist','cifar10']\n\n if 'cifar' in dataname:\n bounds = (0, 255.)\n weightspaths = cifarmodelweights\n else:\n bounds = (0, 1.0)\n weightspaths = mnistmodelweights[dataname]\n\n name = args['model']\n\n (x_train, _), (x_test, y_test), (_, _, num_class) = datama.getData(dataname)\n if name == 'mlp':\n x_train = x_train.reshape(x_train.shape[0], -1)\n x_test = x_test.reshape(x_test.shape[0], -1)\n bestModelName = path + weightspaths[name]\n if name == 'mlp':\n x_train = x_train.reshape(x_train.shape[0], -1)\n # x_test = x_test.reshape(x_test.shape[0], -1)\n model = helper.load_model(name, bestModelName, x_train.shape[1:], num_class, isDrop=False)\n\n del x_train\n y_test = np.argmax(y_test, axis=1)\n if not os.path.isfile('./adv_data/slectedTest300ImgsIdx_%s_%s'%(name, dataname)):\n y = model.predict(x_test)\n plabels = np.argmax(y, axis=1)\n b = plabels == y_test\n #idx = np.arange(x_test.shape[0])\n idx = np.nonzero(b)[0]\n np.random.shuffle(idx)\n cx_train, cy_train, selectedIndex = x_test[idx[:300]], y_test[idx[:300]], idx[:300]\n cidx = selectedIndex\n np.save('./adv_data/slectedTest300ImgsIdx_%s_%s'%(name, dataname), {\"idx\":selectedIndex})\n else:\n data = np.load('./adv_data/slectedTest300ImgsIdx_%s_%s'%(name, dataname)).item()\n selectedIndex = data.get(\"idx\")\n (_, _), (x_test, y_test), (_, _, num_class) = datama.getData(dataname)\n y_test = np.argmax(y_test, axis=1)[..., np.newaxis]\n cx_train, cy_train, cidx = x_test[selectedIndex], y_test[selectedIndex], selectedIndex\n\n\n\n\n if \"FGSM\" in attack_methods:\n print(\"FGSM Attacking\")\n if not os.path.isdir('./FGSM_%s_%s/' % (name, dataname)):\n os.mkdir('./FGSM_%s_%s/' % (name, dataname))\n x_fgsm, s_fgsm = attacker.attackFGSM(model, cx_train, cy_train, bounds,svaeMedianImages='./FGSM_%s_%s/'%(name, dataname))\n #idx_fgsm = cidx[s_fgsm]\n score = model.evaluate(x_fgsm, to_categorical(cy_train[s_fgsm],num_classes=10),verbose=0, batch_size=1)\n logger.info(\"FGSM {}, {}\".format(name, score))\n data = {'fgsm': {'x_adv': x_fgsm, 'y_adv': cy_train[s_fgsm], 'idx': selectedIndex[s_fgsm]}}\n np.save('./adv_data/%s_%s_fgsm.np' % (name, dataname), data)\n del x_fgsm,data\n\n # if \"DF\" in attack_methods:\n # print(\"DF Attacking\")\n # x_df, s_df = attacker.attackDeepFool(model, cx_train, cy_train, bounds)\n # idx_df = cidx[s_df]\n # score = model.evaluate(x_df, to_categorical(cy_train[s_df]),verbose=0, batch_size=1)\n # logger.info(\"DF {}, {}\".format(name, score))\n # data = { 'df': {'x_adv': x_df, 'y_adv': cy_train[s_df], 'idx': selectedIndex[idx_df]}}\n # np.save('./adv_data/%s_%s_df.np' % (name, dataname), data)\n # del x_df,idx_df, data\n #\n # if \"BIM\" in attack_methods:\n # print(\"BIM Attacking\")\n # x_bim, s_bim = attacker.attackBIM(model, cx_train, cy_train, bounds)\n # idx_bim = cidx[s_bim]\n # score = model.evaluate(x_bim, to_categorical(cy_train[s_bim]),verbose=0, batch_size=1)\n # logger.info(\"BIM {}, {}\".format(name, score))\n # data = {'bim': {'x_adv': x_bim, 'y_adv': cy_train[s_bim], 'idx': selectedIndex[idx_bim]}}\n # np.save('./adv_data/%s_%s_bim.np' % (name, dataname), data)\n # del x_bim,s_bim,data\n #\n # if 'JSMA' in attack_methods:\n # print(\"JSMA Attacking\")\n # x_jsma, s_jsma = attacker.attackJSMA(model, cx_train, cy_train, bounds)\n # idx_jsma = cidx[s_jsma]\n # score = model.evaluate(x_jsma, to_categorical(cy_train[s_jsma]),verbose=0, batch_size=1)\n # logger.info(\"JSMA {}, {}\".format(name, score))\n # data = {'jsma': {'x_adv': x_jsma, 'y_adv': cy_train[s_jsma], 'idx': selectedIndex[idx_jsma]}}\n # np.save('./adv_data/%s_%s_jsma.np'%(name, dataname), data)\n # del x_jsma,idx_jsma,data\n\n if 'CW' in attack_methods:\n print(\"CW Attacking\")\n if not os.path.isdir('./CW_%s_%s/' % (name, dataname)):\n os.mkdir('./CW_%s_%s/' % (name, dataname))\n x_cw, s_cw = attacker.attackCWl2(model, cx_train, cy_train, bounds,svaeMedianImages='./CW_%s_%s/'%(name, dataname))\n #idx_cw = cidx[s_cw]\n score = model.evaluate(x_cw, to_categorical(cy_train[s_cw],num_classes=10), verbose=0, batch_size=1)\n logger.info(\"CW {}, {}\".format(name, score))\n data = {'cw':{'x_adv':x_cw, 'y_adv':cy_train[s_cw], 'idx': selectedIndex[s_cw]}}\n np.save('./adv_data/%s_%s_cw.np'%(name, dataname), data)\n\n\n\n", "sub_path": "generateAdvImgs.py", "file_name": "generateAdvImgs.py", "file_ext": "py", "file_size_in_byte": 7145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "utils.load_data.getData", "line_number": 12, "usage_type": "call"}, {"api_name": "utils.load_data", "line_number": 12, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "utils.load_data.getData", "line_number": 16, "usage_type": "call"}, {"api_name": "utils.load_data", "line_number": 16, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 22, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 38, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 39, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 41, "usage_type": "call"}, {"api_name": "utils.load_data.getData", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.load_data", "line_number": 67, "usage_type": "name"}, {"api_name": "utils.tools.load_model", "line_number": 75, "usage_type": "call"}, {"api_name": "utils.tools", "line_number": 75, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.nonzero", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 85, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.load_data.getData", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.load_data", "line_number": 92, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path", "line_number": 101, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 102, "usage_type": "call"}, {"api_name": "utils.attacker.attackFGSM", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.attacker", "line_number": 103, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 144, "usage_type": "call"}, {"api_name": "utils.attacker.attackCWl2", "line_number": 145, "usage_type": "call"}, {"api_name": "utils.attacker", "line_number": 145, "usage_type": "name"}, {"api_name": "keras.utils.to_categorical", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 150, "usage_type": "call"}]} +{"seq_id": "473452521", "text": "import os\r\nimport pymongo\r\nimport json\r\n\r\ndef dummy(request):\r\n \"\"\"Responds to any HTTP request.\r\n Args:\r\n request (flask.Request): HTTP request object.\r\n Returns:\r\n The response text or any set of values that can be turned into a\r\n Response object using\r\n `make_response `.\r\n \"\"\"\r\n if request.method == 'OPTIONS':\r\n # Allows GET requests from origin https://mydomain.com with\r\n # Authorization header\r\n headers = {\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Methods': 'POST',\r\n 'Access-Control-Allow-Headers': '*',\r\n 'Access-Control-Max-Age': '3600',\r\n 'Access-Control-Allow-Credentials': 'true'\r\n }\r\n return ('', 204, headers)\r\n\r\n # Set CORS headers for main requests\r\n headers = {\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Credentials': 'true'\r\n }\r\n\r\n # request_json = request.get_json()\r\n mongostr = os.environ.get('MONGOSTR')\r\n client = pymongo.MongoClient(mongostr)\r\n db = client[\"safewalk\"]\r\n col = db.locations\r\n results = []\r\n maxid = 0\r\n \r\n for x in col.find():\r\n item = {}\r\n item['id'] = str(x['id'])\r\n # item[\"emoji\"] = str(x[\"emoji\"])\r\n item[\"description\"] = x[\"description\"]\r\n # item[\"name\"] = x[\"name\"]\r\n item[\"lat\"] = x[\"lat\"]\r\n item[\"long\"] = x[\"long\"]\r\n item[\"img_url\"] = x[\"img_url\"]\r\n \r\n results.append(item)\r\n maxid +=1\r\n \r\n \r\n retjson = {}\r\n\r\n retjson['locations'] = results\r\n retjson['mongoresult'] = str(maxid)\r\n\r\n return json.dumps(retjson)\r\n\r\n\r\n retstr = \"action not done\"\r\n\r\n if request.args and 'message' in request.args:\r\n return request.args.get('message')\r\n elif request_json and 'message' in request_json:\r\n return request_json['message']\r\n else:\r\n return retstr\r\n", "sub_path": "backend/getAllMapPins.py", "file_name": "getAllMapPins.py", "file_ext": "py", "file_size_in_byte": 2016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ.get", "line_number": 33, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 34, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "361209781", "text": "#\n# cogs/info/info.py\n#\n# mawabot - Maware's selfbot\n# Copyright (c) 2017 Ma-wa-re, Ammon Smith\n#\n# mawabot is available free of charge under the terms of the MIT\n# License. You are free to redistribute and/or modify it under those\n# terms. It is distributed in the hopes that it will be useful, but\n# WITHOUT ANY WARRANTY. See the LICENSE file for more details.\n#\n\n''' Contains in-depth commands that get information '''\nimport re\nimport unicodedata\n\nimport discord\nfrom discord.ext import commands\n\n__all__ = [\n 'Info',\n]\n\nCHANNEL_REGEX = re.compile(r'<#([0-9]+)>')\nEMOJI_REGEX = re.compile(r'<:([A-Za-z~\\-0-9]+):([0-9]+)>')\n\nclass Info:\n __slots__ = (\n 'bot',\n )\n\n def __init__(self, bot):\n self.bot = bot\n\n def _get_users(self, names):\n if not names:\n return [self.bot.user]\n\n users = []\n for name in names:\n if name == 'me':\n users.append(self.bot.user)\n continue\n\n try:\n id = int(name)\n user = self.bot.get_user(id)\n if user:\n users.append(user)\n except ValueError:\n for user in self.bot.users:\n if user.name == name:\n users.append(user)\n break\n return users\n\n def _get_members(self, guild, names):\n if not names:\n return [guild.me]\n\n members = []\n for name in names:\n if name == 'me':\n members.append(guild.me)\n continue\n\n try:\n id = int(name)\n member = guild.get_member(id)\n if member:\n members.append(member)\n except ValueError:\n member = guild.get_member_named(name)\n if member:\n members.append(member)\n return members\n\n @commands.command()\n async def uinfo(self, ctx, *names: str):\n ''' Gets information about the given user(s) '''\n\n if ctx.guild is None:\n users = self._get_users(names)\n else:\n users = self._get_members(ctx.guild, names)\n\n for user in users:\n profile = None\n if not user.bot and not isinstance(user, discord.ClientUser):\n # Get user profile info\n profile = await user.profile()\n\n lines = [user.mention]\n\n if profile is not None:\n if profile.premium:\n since = profile.premium_since.strftime('%x @ %X')\n lines.append(f'Nitro user since `{since}`')\n\n if isinstance(user, discord.Member):\n if user.game:\n if user.game.type == 1:\n lines.append(f'Streaming [{user.game.name}]({user.game.url})')\n else:\n lines.append(f'Playing `{user.game.name}`')\n\n if user.voice:\n mute = user.voice.mute or user.voice.self_mute\n deaf = user.voice.deaf or user.voice.self_deaf\n\n states = []\n if mute:\n states.append('muted')\n if deaf:\n states.append('deafened')\n\n if states:\n state = ', '.join(states)\n else:\n state = 'active'\n\n lines.append(f'Voice: {state}')\n\n if user.nick:\n lines.append(f'Nickname: {user.nick}')\n\n roles = ' '.join(map(lambda x: x.mention, user.roles[1:]))\n if roles:\n lines.append(f'Roles: {roles}')\n\n # For embed.color\n # pylint: disable=assigning-non-slot\n\n embed = discord.Embed(type='rich', description='\\n'.join(lines))\n embed.timestamp = user.created_at\n if hasattr(user, 'color'):\n embed.color = user.color\n\n name = f'{user.name}#{user.discriminator}'\n embed.set_author(name=name)\n embed.set_thumbnail(url=user.avatar_url)\n if isinstance(user, discord.Member):\n embed.add_field(name='Status:', value=f'`{user.status}`')\n embed.add_field(name='ID:', value=f'`{user.id}`')\n\n # Get connected accounts\n if profile is not None:\n if profile.connected_accounts:\n accounts = []\n\n for account in profile.connected_accounts:\n if account['type'] == 'steam':\n url = f'https://steamcommunity.com/profiles/{account[\"id\"]}'\n accounts.append(f'[{account[\"name\"]}]({url})')\n elif account['type'] == 'twitch':\n url = f'https://www.twitch.tv/{account[\"name\"]}'\n accounts.append(f'[{account[\"name\"]}]({url})')\n\n if accounts:\n embed.add_field(name='Connected Accounts:', value=', '.join(accounts))\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def cinfo(self, ctx, *names: str):\n ''' Gets information about a given channel '''\n\n if not names:\n await self._cinfo(ctx, None)\n for name in names:\n await self._cinfo(ctx, name)\n\n async def _cinfo(self, ctx, name):\n # Read argument\n if name is None:\n channel = ctx.channel\n else:\n channel = None\n match = CHANNEL_REGEX.match(name)\n if match:\n cid = int(match[1])\n elif name.isdigit():\n cid = int(name)\n elif ctx.guild:\n channel = discord.utils.find(lambda chan: name == chan.name, ctx.guild.channels)\n\n # Retrieve channel from ID\n if channel is None and cid:\n channel = self.bot.get_channel(cid)\n\n # Couldn't find it\n if channel is None:\n embed = discord.Embed(description=f'No channel found that matched {name}', color=discord.Color.red())\n embed.set_author(name='Error')\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed()\n embed.timestamp = channel.created_at\n desc = [f'ID: `{channel.id}`']\n\n # Check if it is a guild channel\n if isinstance(channel, discord.abc.GuildChannel):\n embed.set_author(name=channel.name)\n desc.append(f'Guild: `{channel.guild.name}`')\n\n if isinstance(channel, discord.TextChannel):\n if channel.is_default():\n embed.set_author(name=f'{channel.name} [DEFAULT]')\n\n desc.append('Type: `Text`')\n desc.append(f'Mention: {channel.mention}')\n desc.append(f'NSFW: `{channel.is_nsfw()}`')\n desc.append(f'Members: `{len(channel.members)}`')\n\n if channel.topic is not None:\n embed.add_field(name='Topic:', value=channel.topic)\n else:\n desc.append('Type: `Voice`')\n desc.append(f'Bitrate: `{channel.bitrate}`')\n connected = len(channel.members)\n limit = channel.user_limit\n\n if limit == 0:\n connstr = f'{connected}'\n else:\n connstr = f'{connected}/{limit}'\n desc.append(f'Connected: `{connstr}`')\n\n else:\n # Must be a DM otherwise\n if isinstance(channel, discord.DMChannel):\n desc.append('Type: `DM`')\n embed.set_author(name=channel.recipient.name)\n else:\n desc.append('Type: `DM Group`')\n embed.set_author(name=channel.name)\n desc.append(f'Owner: `{channel.owner.name}`')\n\n embed.description = '\\n'.join(desc)\n await ctx.send(embed=embed)\n\n @commands.command()\n async def id(self, ctx, *ids: int):\n ''' Gets information about the given snowflake(s) '''\n\n for id in ids:\n embed = discord.Embed(type='rich')\n embed.set_author(name=f'Snowflake {id}')\n embed.timestamp = discord.utils.snowflake_time(id)\n\n guild = self.bot.get_guild(id)\n if guild:\n embed.add_field(name='Guild:', value=guild.name)\n embed.set_thumbnail(url=guild.icon_url)\n\n channel = self.bot.get_channel(id)\n if channel:\n text = channel.mention\n if channel.guild != guild:\n text += f' from \"{channel.guild.name}\"'\n embed.add_field(name='Channel:', value=text)\n\n user = self.bot.get_user(id)\n if user:\n embed.add_field(name='User:', value=user.mention)\n\n emoji = self.bot.get_emoji(id)\n if emoji:\n text = f'{emoji} ({emoji.name}) from \"{channel.guild.name}\"'\n embed.add_field(name='Emoji:', value=text)\n\n # Can't do get_message() since we're not a true bot\n\n await ctx.send(embed=embed)\n\n @commands.command()\n async def emoji(self, ctx, *emojis: str):\n ''' Gets information about the given emoji(s) '''\n\n for emoji in emojis:\n match = EMOJI_REGEX.match(emoji)\n lines = [emoji]\n if match:\n lines.append(f'Emoji: `{match[1]}`')\n lines.append(f'ID: `{match[2]}`')\n else:\n try:\n name = unicodedata.name(emoji)\n lines.append(f'Unicode name: `{name}`')\n try:\n lines.append(f'Ord: `{ord(name)}`')\n except:\n pass\n except TypeError:\n lines.append('Not an emoji')\n\n await ctx.send(content='\\n'.join(lines))\n", "sub_path": "mawabot/cogs/info/info.py", "file_name": "info.py", "file_ext": "py", "file_size_in_byte": 10178, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.compile", "line_number": 24, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 25, "usage_type": "call"}, {"api_name": "discord.ClientUser", "line_number": 89, "usage_type": "attribute"}, {"api_name": "discord.Member", "line_number": 100, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 134, "usage_type": "call"}, {"api_name": "discord.Member", "line_number": 142, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 78, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 78, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 164, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 164, "usage_type": "name"}, {"api_name": "discord.utils.find", "line_number": 185, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 185, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 193, "usage_type": "call"}, {"api_name": "discord.Color.red", "line_number": 193, "usage_type": "call"}, {"api_name": "discord.Color", "line_number": 193, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 197, "usage_type": "call"}, {"api_name": "discord.abc", "line_number": 202, "usage_type": "attribute"}, {"api_name": "discord.TextChannel", "line_number": 206, "usage_type": "attribute"}, {"api_name": "discord.DMChannel", "line_number": 231, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 247, "usage_type": "call"}, {"api_name": "discord.utils.snowflake_time", "line_number": 249, "usage_type": "call"}, {"api_name": "discord.utils", "line_number": 249, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.command", "line_number": 242, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 242, "usage_type": "name"}, {"api_name": "unicodedata.name", "line_number": 288, "usage_type": "call"}, {"api_name": "discord.ext.commands.command", "line_number": 276, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 276, "usage_type": "name"}]} +{"seq_id": "23876739", "text": "import tempfile\nfrom unittest import mock\n\nimport pytest\nfrom pathlib import Path\nimport xarray as xr\n\nfrom finch.processes import BCCAQV2HeatWave\nfrom finch.processes.wps_xclim_indices import make_nc_input\nfrom tests.utils import wps_literal_input, execute_process\n\n\n@mock.patch(\"finch.processes.utils.get_bccaqv2_opendap_datasets\")\n@mock.patch.object(BCCAQV2HeatWave, \"subset\")\n@mock.patch.object(BCCAQV2HeatWave, \"compute_indices\")\ndef test_bccaqv2_heatwave(\n mock_compute_indices, mock_bccaq_subset, mock_datasets, client\n):\n identifier = \"BCCAQv2_heat_wave_frequency_gridpoint\"\n inputs = [\n wps_literal_input(\"output_format\", \"netcdf\"),\n wps_literal_input(\"lat\", \"2\"),\n wps_literal_input(\"lon\", \"3\"),\n wps_literal_input(\"thresh_tasmin\", \"22.0 degC\"),\n wps_literal_input(\"thresh_tasmax\", \"30 degC\"),\n wps_literal_input(\"window\", \"3\"),\n wps_literal_input(\"freq\", \"YS\"),\n ]\n\n metalink = mock.MagicMock()\n tmp = Path(__file__).parent / \"tmp\"\n tmp.mkdir(exist_ok=True)\n\n metalink_file = mock.MagicMock()\n metalink_file.file = tmp / \"tasmin_some_file.nc\"\n metalink_file.file.write_text(\"dummy data\")\n metalink_file2 = mock.MagicMock()\n metalink_file2.file = tmp / \"tasmax_some_file.nc\"\n metalink_file2.file.write_text(\"dummy data\")\n\n metalink.files = [metalink_file, metalink_file2]\n\n mock_datasets.return_value = [\"dataset1\", \"dataset2\"]\n mock_bccaq_subset.return_value = metalink\n\n def write_dummy_data(filename):\n Path(filename).write_text(\"dummy data\")\n\n mock_computed = mock.MagicMock()\n mock_compute_indices.return_value = mock_computed\n mock_computed.to_netcdf.side_effect = write_dummy_data\n\n outputs = execute_process(client, identifier, inputs, output_names=[\"output\"])\n\n output_file = outputs[0]\n assert len(outputs) == 1\n assert output_file.endswith(\"zip\")\n assert Path(output_file).exists()\n\n assert len(mock_bccaq_subset.call_args[0][0][\"resource\"]) == 4\n\n\ndef test_bccaqv2_heat_wave_frequency_sample_data():\n here = Path(__file__).parent\n folder = here / \"data\" / \"bccaqv2_single_cell\"\n tasmin = list(sorted(folder.glob(\"tasmin*.nc\")))[0]\n tasmax = list(sorted(folder.glob(\"tasmax*.nc\")))[0]\n\n tasmin_input = make_nc_input(\"tasmin\")\n tasmin_input.file = tasmin\n tasmax_input = make_nc_input(\"tasmax\")\n tasmax_input.file = tasmax\n\n inputs = {\n \"tasmin\": [tasmin_input],\n \"tasmax\": [tasmax_input],\n }\n process = BCCAQV2HeatWave()\n process.workdir = tempfile.mkdtemp()\n out = process.compute_indices(process.indices_process.xci, inputs)\n\n input_attrs = xr.open_dataset(tasmin).attrs\n del input_attrs[\"creation_date\"]\n output_attrs = out.attrs\n del output_attrs[\"creation_date\"]\n\n assert output_attrs == input_attrs\n\n\n@pytest.mark.online\ndef test_bccaqv2_heatwave_online(client):\n identifier = \"BCCAQv2_heat_wave_frequency_gridpoint\"\n up_right = 45.507485, -73.541295\n\n inputs = [\n wps_literal_input(\"output_format\", \"netcdf\"),\n wps_literal_input(\"lat\", str(up_right[0])),\n wps_literal_input(\"lon\", str(up_right[1])),\n wps_literal_input(\"thresh_tasmin\", \"22.0 degC\"),\n wps_literal_input(\"thresh_tasmax\", \"30 degC\"),\n wps_literal_input(\"window\", \"3\"),\n wps_literal_input(\"freq\", \"YS\"),\n ]\n\n outputs = execute_process(client, identifier, inputs, output_names=[\"output\"])\n\n print(outputs)\n", "sub_path": "tests/test_wps_bccaqv2_heatwave.py", "file_name": "test_wps_bccaqv2_heatwave.py", "file_ext": "py", "file_size_in_byte": 3465, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tests.utils.wps_literal_input", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 22, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 23, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 24, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 25, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 26, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 27, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 30, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 30, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 31, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 34, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 34, "usage_type": "name"}, {"api_name": "unittest.mock.MagicMock", "line_number": 37, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 37, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 47, "usage_type": "call"}, {"api_name": "unittest.mock.MagicMock", "line_number": 49, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 49, "usage_type": "name"}, {"api_name": "tests.utils.execute_process", "line_number": 53, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 58, "usage_type": "call"}, {"api_name": "unittest.mock.patch", "line_number": 13, "usage_type": "call"}, {"api_name": "unittest.mock", "line_number": 13, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 14, "usage_type": "call"}, {"api_name": "finch.processes.BCCAQV2HeatWave", "line_number": 14, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 14, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 14, "usage_type": "name"}, {"api_name": "unittest.mock.patch.object", "line_number": 15, "usage_type": "call"}, {"api_name": "finch.processes.BCCAQV2HeatWave", "line_number": 15, "usage_type": "argument"}, {"api_name": "unittest.mock.patch", "line_number": 15, "usage_type": "attribute"}, {"api_name": "unittest.mock", "line_number": 15, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 64, "usage_type": "call"}, {"api_name": "finch.processes.wps_xclim_indices.make_nc_input", "line_number": 69, "usage_type": "call"}, {"api_name": "finch.processes.wps_xclim_indices.make_nc_input", "line_number": 71, "usage_type": "call"}, {"api_name": "finch.processes.BCCAQV2HeatWave", "line_number": 78, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 79, "usage_type": "call"}, {"api_name": "xarray.open_dataset", "line_number": 82, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 96, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 97, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 98, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 99, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 100, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 101, "usage_type": "call"}, {"api_name": "tests.utils.wps_literal_input", "line_number": 102, "usage_type": "call"}, {"api_name": "tests.utils.execute_process", "line_number": 105, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 90, "usage_type": "attribute"}]} +{"seq_id": "204747468", "text": "import json \nimport os \nimport sys, getopt\nfrom datetime import datetime \n\nconfig = {\"input\":\"txt\", \"output\":\"glm\", \"type\":[]}\n\ndef help():\n\treturn \"\"\"txt-cyme2glm.py -i|--ifile [,[,...]] -o|--ofile \n -i|--ifile : [REQUIRED] txt input file name\n -o|--ofile : [REQUIRED] glm output file name\n \"\"\"\n\ndef convert(input_file=None, output_file=None, output_type=None):\n\n\tif not input_file:\n\t\traise Exception(f\"missing input file name\")\n\n\tif not output_file:\n\t\traise Exception(f\"missing output file name\")\n\n\tsections = get_sections(input_file)\n\tprint(json.dumps(sections,indent=4))\n\n\twith open(output_file,\"w\") as glm:\n\n\t\tglm.write(f\"#warning CYME conversion from {input_file} incomplete\\n\")\n\ndef get_sections(input_file):\n\tsections = {}\n\twith open(input_file,\"r\") as txt:\n\t\ttext = txt.read().split(\"\\n\\n\")\n\t\tfor section in text:\n\t\t\tlines = section.splitlines()\n\t\t\tif len(lines) == 0:\n\t\t\t\tcontinue\n\t\t\theading = lines[0]\n\t\t\tif len(heading) == 0:\n\t\t\t\tcontinue\n\t\t\tif heading[0] != \"[\" or heading[-1] != \"]\":\n\t\t\t\traise Exception(f\"{heading} is an invalid section heading\")\n\t\t\theading_name = heading[1:-1]\n\t\t\tif len(lines) > 1:\n\t\t\t\tdata = {}\n\t\t\t\tif lines[1][0:6] == \"FORMAT\":\n\t\t\t\t\t# CSV data\n\t\t\t\t\theader = lines[1].split(\"=\")[1].split(\",\")\n\t\t\t\t\ttags = header[1:]\n\t\t\t\t\tvalues = {}\n\t\t\t\t\tfor line in lines[2:]:\n\t\t\t\t\t\tvalues[line[0]] = dict(zip(header[1:],line.split()))\n\t\t\t\t\tdata[\"values\"] = values\n\t\t\t\t\tsections[heading_name] = data\n\t\t\t\telse:\n\t\t\t\t\t# tuples\n\t\t\t\t\tsections[heading_name] = []\n\t\t\telse:\n\t\t\t\tsections[heading_name] = []\n\treturn sections\n\nif __name__ == '__main__':\n\t\n\topts, args = getopt.getopt(sys.argv[1:],\"hi:o:t:\",[\"help\",\"ifile=\",\"ofile=\",\"type=\"]);\n\n\tif not opts : \n\t\tprint(help())\n\t\tsys.exit(0)\n\tfor opt, arg in opts:\n\t\tif opt in (\"-h\",\"--help\"):\n\t\t\tprint(help())\n\t\t\tsys.exit(0);\n\t\telif opt in (\"-i\", \"--ifile\"):\n\t\t\tinput_file = arg.strip()\n\t\telif opt in (\"-o\", \"--ofile\"):\n\t\t\toutput_file = arg.strip()\n\t\telif opt in (\"-t\", \"--type\"):\n\t\t\toutput_type = arg.strip()\n\t\telse:\n\t\t\traise Exception(f\"'{opt}' is an invalid command line option\");\n\n\tconvert(input_file=input_file,output_file=output_file)\n\t", "sub_path": "converters/txt-cyme2glm.py", "file_name": "txt-cyme2glm.py", "file_ext": "py", "file_size_in_byte": 2154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "json.dumps", "line_number": 23, "usage_type": "call"}, {"api_name": "getopt.getopt", "line_number": 63, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 63, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 67, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 71, "usage_type": "call"}]} +{"seq_id": "643938767", "text": "import os\nimport sys\nimport pygame\nimport requests\nimport requests\nimport json\n\n\n#Дворцовая набережная - Малая Нева - Финский залив - Петргоф\ngeocoder_request = \"http://geocode-maps.yandex.ru/1.x/?apikey=40d1649f-0493-4b70-98ba-98533de7710b&geocode=Санкт Петербург&format=json\"\nresponse = requests.get(geocoder_request)\na = response.json()\nb = a['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']['Point']['pos'].split()\nresponse = None\nmap_request = 'http://static-maps.yandex.ru/1.x/?ll=' + b[0] + ',' + b[1] + '&spn=2.00,2.00&l=map'\nresponse = requests.get(map_request)\n\nif not response:\n print(\"Ошибка выполнения запроса:\")\n print(map_request)\n print(\"Http статус:\", response.status_code, \"(\", response.reason, \")\")\n sys.exit(1)\n\n# Запишем полученное изображение в файл.\nmap_file = \"map.png\"\nwith open(map_file, \"wb\") as file:\n file.write(response.content)\n\n# Инициализируем pygame\npygame.init()\nscreen = pygame.display.set_mode((600, 450))\n# Рисуем картинку, загружаемую из только что созданного файла.\nscreen.blit(pygame.image.load(map_file), (0, 0))\n# Переключаем экран и ждем закрытия окна.\npygame.display.flip()\nwhile pygame.event.wait().type != pygame.QUIT:\n pass\npygame.quit()\n\n# Удаляем за собой файл с изображением.\nos.remove(map_file)", "sub_path": "2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 1535, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 35, "usage_type": "attribute"}, {"api_name": "pygame.event.wait", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 38, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "310540832", "text": "#!C:\\Users\\Evol\\Desktop\\python\\webapp-python\n#-*- coding: utf-8 -*-\n\n__author__ = 'Lu Sunping'\n\n'''\nasync web application\n'''\n\n#由于Web app是建立在asyncio的基础上的,所以用aiohttp写一个基本的app.py\n\nimport logging; logging.basicConfig(level=logging.INFO)\nimport asyncio, os, json, time\nfrom datetime import datetime\nfrom aiohttp import web\n\ndef index(request):\n\treturn web.Response(body=b'

    Awesome

    ')\n\t\ndef index_home(request):\n\treturn web.Response(body=b'

    Home

    ')\n\t \n@asyncio.coroutine\ndef init(loop):\n\tapp = web.Application(loop = loop)\n\tapp.router.add_route('GET','/',index)\n\tapp.router.add_route('GET','/home',index_home)\n\tsrv = yield from loop.create_server(app.make_handler(),'127.0.0.1',9000)\n\tlogging.info(\"Server started at http://127.0.0.1...\")\n\treturn srv\n\t\nloop = asyncio.get_event_loop()\nloop.run_until_complete(init(loop))\nloop.run_forever()\n\t", "sub_path": "www/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.basicConfig", "line_number": 12, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 12, "usage_type": "attribute"}, {"api_name": "aiohttp.web.Response", "line_number": 18, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 18, "usage_type": "name"}, {"api_name": "aiohttp.web.Response", "line_number": 21, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 21, "usage_type": "name"}, {"api_name": "aiohttp.web.Application", "line_number": 25, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 29, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 23, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "371833675", "text": "import os \nimport sys\nimport vss_cls\nimport serch_cls\nimport argparse\n\n\ndef searc_by_file_name(filename,path):\n sech = serch_cls.search()\n find_file = sech.find_files(filename,path)\n return find_file\n\ndef searc_by_hash(hash,path):\n sech = serch_cls.search()\n find_file = sech.find_hash(hash,path)\n return find_file\n\ndef searc_by_folder(path,root_dir):\n path = path[3:]\n path = os.path.join(root_dir, path)\n if os.path.exists(path):\n print(\"Exsiting Path: \"+path)\n sech = serch_cls.search()\n find_file = sech.find_folders_files(path)\n return find_file\n else:\n print(\"Path Doesn't exists in \" + root_dir)\n\n\n\ndef main(argv=[]):\n parser = argparse.ArgumentParser(description=\"Explore Volume Shadow Copies\")\n parser.add_argument(\"-p\", \"--Path\", type=str, help=\"Type path\")\n parser.add_argument(\"-s\", \"--Hash\", type=str, help=\"Type Hash\")\n parser.add_argument(\"-f\", \"--FileName\", type=str, help=\"Type File Name\")\n parser.add_argument(\"-c\", \"--CopyFile\", type=str, help=\"Type File Name\")\n\n args = parser.parse_args()\n cs_vss = vss_cls.vss()\n IDs = cs_vss.get_devicesIDs()\n \n if args.Hash:\n for id in IDs:\n hashes = args.Hash\n if \",\" in hashes:\n hashex = hashes.split(',')\n for hash in hashex:\n files = searc_by_hash(hash, id['ID'])\n print(files)\n else:\n files = searc_by_hash(hashes, id['ID'])\n print(files)\n \n if args.FileName:\n for id in IDs:\n files = searc_by_file_name(args.FileName, id['ID'])\n print(files)\n \n if args.Path:\n for id in IDs:\n files = searc_by_folder(args.Path, id['ID'])\n print(files)\n \n\nif __name__ == '__main__':\n main(sys.argv)", "sub_path": "vss.py", "file_name": "vss.py", "file_ext": "py", "file_size_in_byte": 1854, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "serch_cls.search", "line_number": 9, "usage_type": "call"}, {"api_name": "serch_cls.search", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "serch_cls.search", "line_number": 23, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 32, "usage_type": "call"}, {"api_name": "vss_cls.vss", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "482997766", "text": "import logging\nimport os\nimport time\n\nfrom pip._internal.utils.logging import IndentingFormatter\n\n\nclass TestIndentingFormatter(object):\n \"\"\"\n Test `pip._internal.utils.logging.IndentingFormatter`.\n \"\"\"\n\n def setup(self):\n # Robustify the tests below to the ambient timezone by setting it\n # explicitly here.\n self.old_tz = getattr(os.environ, 'TZ', None)\n os.environ['TZ'] = 'UTC'\n # time.tzset() is not implemented on some platforms (notably, Windows).\n if hasattr(time, 'tzset'):\n time.tzset()\n\n def teardown(self):\n if self.old_tz:\n os.environ['TZ'] = self.old_tz\n else:\n del os.environ['TZ']\n if 'tzset' in dir(time):\n time.tzset()\n\n def test_format(self, tmpdir):\n record = logging.makeLogRecord(dict(\n created=1547704837.4,\n msg='hello\\nworld',\n ))\n f = IndentingFormatter(fmt=\"%(message)s\")\n assert f.format(record) == 'hello\\nworld'\n\n def test_format_with_timestamp(self, tmpdir):\n record = logging.makeLogRecord(dict(\n created=1547704837.4,\n msg='hello\\nworld',\n ))\n f = IndentingFormatter(fmt=\"%(message)s\", add_timestamp=True)\n expected = '2019-01-17T06:00:37 hello\\n2019-01-17T06:00:37 world'\n assert f.format(record) == expected\n", "sub_path": "tests/unit/test_logging.py", "file_name": "test_logging.py", "file_ext": "py", "file_size_in_byte": 1378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 17, "usage_type": "attribute"}, {"api_name": "time.tzset", "line_number": 20, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "time.tzset", "line_number": 28, "usage_type": "call"}, {"api_name": "logging.makeLogRecord", "line_number": 31, "usage_type": "call"}, {"api_name": "pip._internal.utils.logging.IndentingFormatter", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.makeLogRecord", "line_number": 39, "usage_type": "call"}, {"api_name": "pip._internal.utils.logging.IndentingFormatter", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "66651610", "text": "import keras\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nfrom keras.layers import Input, Dense, Lambda\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras import objectives\nfrom keras.datasets import mnist\nfrom keras.layers.core import Reshape\nfrom __future__ import print_function\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Merge\nfrom keras.layers import Convolution2D, MaxPooling2D,Highway\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D\nfrom keras.utils import np_utils\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.callbacks import ModelCheckpoint,LearningRateScheduler\nimport os\nfrom keras.optimizers import SGD\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\n\nnoise_factor = 0.4\nx_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) \nx_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape) \n\nx_train_noisy = np.clip(x_train_noisy, 0., 1.)\nx_test_noisy = np.clip(x_test_noisy, 0., 1.)\n\nzero=np.where(y_train==0)\n\nx_train_orig=x_train[zero][0:20]\nx_train=x_train_noisy[zero][0:20]\n\nshape=28\nbatch_size = 30\nnb_classes = 10\nimg_rows, img_cols = shape, shape\nnb_filters = 32\npool_size = (2, 2)\nkernel_size = (3, 3)\ninput_shape=(shape,shape,1)\noriginal_dim = 784\nlatent_dim = 2\nintermediate_dim = 256\nepsilon_std = 1.0\n\nlearning_rate = 0.07\ndecay_rate = 5e-5\nmomentum = 0.9\nsgd = SGD(lr=learning_rate,momentum=momentum, decay=decay_rate, nesterov=True)\n\ndef norm(x):\n return (x-np.min(x))/(np.max(x)-np.min(x))\n\npart=8\nthre=1\n\nrecog=Sequential()\nrecog.add(Dense(64,activation='relu',input_shape=(784,),init='glorot_uniform'))\n\nrecog_left=recog\nrecog_left.add(Dense(64,input_shape=(64,),activation='relu'))\n\nrecog_right=recog\nrecog_right.add(Dense(64,input_shape=(64,),activation='relu'))\nrecog_right.add(Lambda(lambda x: x + K.exp(x / 2) * K.random_normal(shape=(1, 64), mean=0.,\n std=epsilon_std), output_shape=(64,)))\nrecog_right.add(Highway())\nrecog_right.add(Activation('sigmoid'))\n\nrecog1=Sequential()\nrecog1.add(Merge([recog_left,recog_right],mode = 'ave'))\nrecog1.add(Dense(784))\nrecog1.add(Activation('relu'))\n\n#### GATE***\nrecog11=Sequential()\nlayer=Dense(2,init='glorot_uniform',input_shape=(784,))\nlayer.trainable=False\nrecog11.add(layer)\nlayer2=Dense(784, activation='sigmoid',init='glorot_uniform')\nlayer2.trainable=True\nrecog11.add(layer2)\nrecog11.layers[0].W.set_value(np.ones((784,2)).astype(np.float32))\n\nrecog11.compile(loss='mean_squared_error', optimizer=sgd,metrics = ['mae'])\n\nrecog11.get_weights()[0].shape\n\ngan_input = Input(batch_shape=(1,784))\n\ngan_level2 = recog11(recog1(gan_input))\n\nGAN = Model(gan_input, gan_level2)\nGAN.compile(loss='mean_squared_error', optimizer='adam',metrics = ['mae'])\n\nGAN.fit(x_train_orig[0].reshape(1,784), x_train_orig[0].reshape((1,784)), \n batch_size=30, nb_epoch=100,verbose=1)\n\n### UNIQUE BLUEPRINT\na=GAN.predict(x_train_orig[0].reshape(1,784),verbose=1)\n\nplt.figure(figsize=(10, 10))\nax = plt.subplot(1, 2, 1)\nplt.imshow(x_train_orig[0].reshape(28, 28))\nplt.gray()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nax = plt.subplot(1, 2, 2)\nplt.imshow(a.reshape(28, 28))\nplt.gray()\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_visible(False)\nplt.show()", "sub_path": "Autoencoder Dimensionality.py", "file_name": "Autoencoder Dimensionality.py", "file_ext": "py", "file_size_in_byte": 3593, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "keras.datasets.mnist.load_data", "line_number": 24, "usage_type": "call"}, {"api_name": "keras.datasets.mnist", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.random.normal", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.optimizers.SGD", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 65, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 69, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 72, "usage_type": "call"}, {"api_name": "keras.layers.Lambda", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.backend.exp", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 73, "usage_type": "name"}, {"api_name": "keras.backend.random_normal", "line_number": 73, "usage_type": "call"}, {"api_name": "keras.layers.Highway", "line_number": 75, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 76, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "keras.layers.Merge", "line_number": 79, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 80, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 81, "usage_type": "call"}, {"api_name": "keras.models.Sequential", "line_number": 84, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 85, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "keras.layers.Input", "line_number": 97, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gray", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "430589151", "text": "from dl.algorithms import QLearning\nfrom dl.util import logger, find_monitor\nimport gin, time\nimport torch\nimport numpy as np\n\nfrom deepassist.util import similarities, mask_helipad, rl_evaluate_\n\n\n@gin.configurable\nclass DeepAssist(QLearning):\n def __init__(self, *args, tolerance=0.8, nfqi=True, choose_best=True, break_sim_ties=False, max_grad_norm = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.pilot_policy = self.env.unwrapped.pilot_policy\n self.tolerance = tolerance\n self.nfqi = nfqi\n self.prev_t = self.t\n self.choose_best = choose_best\n self.break_sim_ties = break_sim_ties\n self.max_grad_norm = max_grad_norm\n\n def act(self):\n idx = self.buffer.store_frame(self._ob)\n x = self.buffer.encode_recent_observation()\n with torch.no_grad():\n x = torch.from_numpy(x).to(self.device)\n masked_x = mask_helipad(x)\n qvals = self.net(masked_x[None]).qvals\n qvals -= qvals.min(dim=-1).values\n maxq = qvals.max(dim=-1).values\n \n pilot_ac = self.pilot_policy(x[:9])\n f = similarities(qvals, pilot_ac, self.break_sim_ties).to(self.device)\n mask = torch.zeros(f.shape).to(self.device) - 1\n actions = torch.where(\n qvals >= (1 - self.tolerance) * maxq, \n f,\n mask\n )\n \n if self.choose_best:\n most_sim = actions.max(dim=-1).values\n most_sims_ind = (f == most_sim).nonzero().squeeze(-1).unsqueeze(0)\n inds = torch.gather(qvals, -1, most_sims_ind)\n if self.eps_schedule.value(self.t) > np.random.rand():\n ind = np.random.randint(low=0, high=inds.numel())\n else: \n ind = inds.argmax(-1)\n ac = most_sims_ind[:, ind]\n else:\n ac = actions.argmax(dim=-1)\n\n ac = ac.cpu().numpy()[0]\n\n self._ob, r, done, _ = self.env.step(ac)\n self.buffer.store_effect(idx, ac, r, done)\n if done:\n self._ob = self.env.reset()\n self.t += 1\n\n return done\n\n def loss(self, batch):\n if self.prioritized_replay:\n idx = batch[-1]\n ob, ac, rew, next_ob, done, weight = [torch.from_numpy(x).to(self.device) for x in batch[:-1]]\n else:\n ob, ac, rew, next_ob, done = [torch.from_numpy(x).to(self.device) for x in batch]\n\n ob = mask_helipad(ob)\n next_ob = mask_helipad(next_ob)\n q = self.net(ob, ac).value\n\n with torch.no_grad():\n if self.double_dqn:\n next_ac = self.net(next_ob).max_a\n qtarg = self.target_net(next_ob, next_ac).value\n else:\n qtarg = self.target_net(next_ob).max_q\n assert rew.shape == qtarg.shape\n target = rew + (1.0 - done) * self.gamma * qtarg\n\n assert target.shape == q.shape\n err = self.criterion(target, q)\n\n if self.prioritized_replay:\n self.buffer.update_priorities(idx, err.detach().cpu().numpy() + 1e-6)\n assert err.shape == weight.shape\n err = weight * err\n loss = err.mean()\n self.losses.append(loss)\n if self.t % self.log_period == 0 and self.t > 0:\n logger.add_scalar('alg/maxq', torch.max(q).detach().cpu().numpy(), self.t, time.time())\n\n return loss\n\n def step(self):\n done = self.act()\n while self.buffer.num_in_buffer < min(self.learning_starts, self.buffer.size):\n done = self.act()\n if self.t % self.target_update_period == 0:\n self.target_net.load_state_dict(self.net.state_dict())\n\n if self.nfqi:\n if done:\n for _ in range(self.t - self.prev_t):\n if self.prioritized_replay:\n beta = self.beta_schedule.value(self.t)\n batch = self.buffer.sample(self.batch_size, beta)\n else:\n batch = self.buffer.sample(self.batch_size)\n\n self.opt.zero_grad()\n loss = self.loss(batch)\n loss.backward()\n if self.max_grad_norm:\n nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)\n self.opt.step()\n \n self.prev_t = self.t\n\n else: \n if self.t % self.update_period == 0:\n if self.prioritized_replay:\n beta = self.beta_schedule.value(self.t)\n batch = self.buffer.sample(self.batch_size, beta)\n else:\n batch = self.buffer.sample(self.batch_size)\n\n self.opt.zero_grad()\n loss = self.loss(batch)\n loss.backward()\n self.opt.step()\n\n if self.t % self.log_period == 0 and self.t > 0:\n if len(self.losses) != 0:\n self.log()\n\n def evaluate(self):\n self.net.train(False)\n env = self.env_fn(rank=1)\n if self.frame_stack > 1:\n eval_env = EpsilonGreedy(FrameStack(env, self.frame_stack), self.eval_eps)\n else:\n eval_env = EpsilonGreedy(env, self.eval_eps)\n\n os.makedirs(os.path.join(self.logdir, 'eval'), exist_ok=True)\n outfile = os.path.join(self.logdir, 'eval', self.ckptr.format.format(self.t) + '.json')\n stats = rl_evaluate_(eval_env, self.net, self.eval_nepisodes, outfile, self.device)\n logger.add_scalar('eval/mean_episode_reward', stats['mean_reward'], self.t, time.time())\n logger.add_scalar('eval/mean_episode_length', stats['mean_length'], self.t, time.time())\n\n if find_monitor(self.env):\n rl_plot(os.path.join(self.logdir, 'logs'), self.env.spec.id, self.t)\n self.net.train(True)", "sub_path": "deepassist/algorithms/copilot.py", "file_name": "copilot.py", "file_ext": "py", "file_size_in_byte": 5971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "dl.algorithms.QLearning", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 26, "usage_type": "call"}, {"api_name": "deepassist.util.mask_helipad", "line_number": 27, "usage_type": "call"}, {"api_name": "deepassist.util.similarities", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 68, "usage_type": "call"}, {"api_name": "deepassist.util.mask_helipad", "line_number": 70, "usage_type": "call"}, {"api_name": "deepassist.util.mask_helipad", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 74, "usage_type": "call"}, {"api_name": "dl.util.logger.add_scalar", "line_number": 93, "usage_type": "call"}, {"api_name": "dl.util.logger", "line_number": 93, "usage_type": "name"}, {"api_name": "torch.max", "line_number": 93, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "deepassist.util.rl_evaluate_", "line_number": 149, "usage_type": "call"}, {"api_name": "dl.util.logger.add_scalar", "line_number": 150, "usage_type": "call"}, {"api_name": "dl.util.logger", "line_number": 150, "usage_type": "name"}, {"api_name": "time.time", "line_number": 150, "usage_type": "call"}, {"api_name": "dl.util.logger.add_scalar", "line_number": 151, "usage_type": "call"}, {"api_name": "dl.util.logger", "line_number": 151, "usage_type": "name"}, {"api_name": "time.time", "line_number": 151, "usage_type": "call"}, {"api_name": "dl.util.find_monitor", "line_number": 153, "usage_type": "call"}, {"api_name": "gin.configurable", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "532900531", "text": "# Import\r\nfrom pynput.keyboard import Key, Controller\r\nimport win32com.client\r\nimport pywintypes\r\nimport win32api\r\nimport win32con\r\nimport win32gui\r\nimport pickle\r\nimport time\r\nimport os\r\n\r\n\r\n# Clear Function\r\ndef cls():\r\n os.system('cls')\r\n\r\n\r\n# Fake Startup\r\nprint(\"Opening AutoSpam.\")\r\ntime.sleep(1)\r\ncls()\r\nprint(\"Opening AutoSpam..\")\r\ntime.sleep(1)\r\ncls()\r\nprint(\"Opening AutoSpam...\")\r\ntime.sleep(1)\r\ncls()\r\n\r\n# Variables/Settings\r\nkeyboard = Controller()\r\namount1 = 0\r\nkey = \"|\"\r\n\r\n\r\ndef AutoSpam():\r\n cls()\r\n global timer, word, inf, app_back, app, amount1, amount2, number, chose, key\r\n # Menu\r\n print(\"Welcome to AutoSpamv1.1\")\r\n chose = int(input(\"Main Menu\\n1 = Keyboard\\n2 = Other\\n:\"))\r\n if chose == 1:\r\n cls()\r\n saves = str(input(\"Use saved settings? [y/n]\\n[if you don't have saved settings it will use the default \"\r\n \"settings]\\n:\")).lower()\r\n if saves == \"y\":\r\n word, number, amount1, amount2, timer, app_back, app = pickle.load(open(\"settings.dat\", \"rb\"))\r\n cls()\r\n print(\"Settings Loaded\")\r\n time.sleep(0.5)\r\n elif saves == \"n\":\r\n cls()\r\n word = str(input(\"1.Type what you want to spam.\\n[Type the Keyword to add an newline]\\n[Current Keyword: \"\r\n \"\" + key + \"]\\n:\"))\r\n cls()\r\n number = str(input(\"2.Enable Increasing Number? [y/n]\\n:\")).lower()\r\n if number == \"y\":\r\n amount1 = int(input(\"2a.Type on what number the increasing number starts\\n:\"))\r\n amount2 = int(input(\"2b.Type on what number the increasing number starts\\n:\"))\r\n elif number == \"n\":\r\n amount2 = input(\"2b.Type how many times to repeat the word.\\n[Type inf for infinity]\\n:\")\r\n cls()\r\n timer = float(\r\n input(\"3.Type how long for word to be sent.\\n(Measured in Seconds)\\n:\"))\r\n cls()\r\n app_back = str(input(\"4.Enable instant tabbing and un tabbing to application? [y/n]\\n[If you disable this \"\r\n \"you have to manually select the application]\\n:\")).lower()\r\n if app_back == \"y\":\r\n app = str(input(\"4a.Type the name of your application\\n:\")).lower()\r\n elif app_back == \"n\":\r\n app = \"None\"\r\n cls()\r\n save = str(input(\"Save settings? [y/n]\\n:\")).lower()\r\n if save == \"y\":\r\n pickle.dump([word, number, amount1, amount2, timer, app_back, app], open(\"settings.dat\", \"wb\"))\r\n print(\"Settings Saved.\")\r\n cls()\r\n correction = str(\r\n input(\"Are you sure these are the correct settings? [y/n]\\n1.\" + str(word) + \" (Word)\\n2.\" + str(\r\n number) + \" (Increasing Number)\\n2a.\" + str(amount1) + \" (Starting Number)\\n2b.\" + str(\r\n amount2) + \" (Stopping Number)\\n3.\" + str(timer) + \" (Time between)\\n4.\" + str(app_back) + \" (Instant \"\r\n \"Select)\\n4a.\" + str(app) + \" (Selected Application)\\n:\")).lower()\r\n if correction == \"n\":\r\n AutoSpam()\r\n\r\n # Accurate Number Fix\r\n amount2 += 1\r\n\r\n # Inf Check\r\n inf = False\r\n if amount2 == \"inf\":\r\n amount2 = 1\r\n inf = True\r\n\r\n # Countdown Loop\r\n time.sleep(1)\r\n cls()\r\n input(\"Press Enter to Start\\n\")\r\n cls()\r\n print(\"AutoSpam is starting in\")\r\n s = 5\r\n while s != 0:\r\n print(s)\r\n s -= 1\r\n time.sleep(1)\r\n\r\n # Message\r\n cls()\r\n print(\"AutoSpam is Running...\")\r\n if inf:\r\n print(\"inf is enabled exit the program to stop it\")\r\n\r\n def windowEnumerationHandler(hwnd, top_windows):\r\n global amount2\r\n top_windows.append((hwnd, win32gui.GetWindowText(hwnd)))\r\n\r\n # While Loop\r\n while amount1 != amount2:\r\n if app_back == \"y\":\r\n if __name__ == \"__main__\":\r\n results = []\r\n top_windows = []\r\n win32gui.EnumWindows(windowEnumerationHandler, top_windows)\r\n for i in top_windows:\r\n if app in i[1].lower():\r\n win32gui.ShowWindow(i[0], win32con.SW_MAXIMIZE)\r\n shell = win32com.client.Dispatch(\"WScript.Shell\")\r\n shell.SendKeys('%')\r\n win32gui.SetForegroundWindow(i[0])\r\n if number == \"y\":\r\n keyboard.type(str(amount1))\r\n for c in word:\r\n if c == key:\r\n keyboard.press(Key.shift_l)\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.shift_l)\r\n keyboard.release(Key.enter)\r\n elif c != key:\r\n keyboard.press(c)\r\n keyboard.release(c)\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n win32gui.ShowWindow(i[0], win32con.SW_MINIMIZE)\r\n time.sleep(timer)\r\n if not inf:\r\n amount1 += 1\r\n elif app_back == \"n\":\r\n if number == \"y\":\r\n keyboard.type(str(amount1))\r\n for c in word:\r\n if c == key:\r\n keyboard.press(Key.shift_l)\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.shift_l)\r\n keyboard.release(Key.enter)\r\n elif c != key:\r\n keyboard.press(c)\r\n keyboard.release(c)\r\n keyboard.press(Key.enter)\r\n keyboard.release(Key.enter)\r\n time.sleep(timer)\r\n if not inf:\r\n amount1 += 1\r\n\r\n elif chose == 2:\r\n cls()\r\n other = int(input(\"Other\\n1 = Keyword\\n2 = Back\\n:\"))\r\n if other == 1:\r\n cls()\r\n key = str(input(\"Type a letter when used will make an newline.\\n[If you make a keyword longer than 1 \"\r\n \"letter it will use the first letter instead]\\n[Current Keyword: \" + key + \"]\\n:\"))\r\n key = key[0:1]\r\n cls()\r\n AutoSpam()\r\n\r\n elif other == \"yeetus betus deletus\":\r\n print(\"yeet\")\r\n # Restart\r\n continues = str(input(\"Do you want to go back to the main menu? [y/n]\\n:\"))\r\n if continues == \"y\":\r\n cls()\r\n AutoSpam()\r\n\r\n\r\nAutoSpam()\r\n\r\n# Quit\r\ncls()\r\nprint(\"Closing AutoSpam.\")\r\ntime.sleep(1)\r\ncls()\r\nprint(\"Closing AutoSpam..\")\r\ntime.sleep(1)\r\ncls()\r\nprint(\"Closing AutoSpam...\")\r\ntime.sleep(1)\r\nquit()\r\n", "sub_path": "AutoSpam.py", "file_name": "AutoSpam.py", "file_ext": "py", "file_size_in_byte": 7091, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.system", "line_number": 15, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "pynput.keyboard.Controller", "line_number": 30, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 46, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 49, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 74, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 95, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "win32gui.GetWindowText", "line_number": 114, "usage_type": "call"}, {"api_name": "win32gui.EnumWindows", "line_number": 122, "usage_type": "call"}, {"api_name": "win32gui.ShowWindow", "line_number": 125, "usage_type": "call"}, {"api_name": "win32con.SW_MAXIMIZE", "line_number": 125, "usage_type": "attribute"}, {"api_name": "win32com.client.client.Dispatch", "line_number": 126, "usage_type": "call"}, {"api_name": "win32com.client.client", "line_number": 126, "usage_type": "attribute"}, {"api_name": "win32com.client", "line_number": 126, "usage_type": "name"}, {"api_name": "win32gui.SetForegroundWindow", "line_number": 128, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key.shift_l", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 133, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 134, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 134, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.shift_l", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 135, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 136, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 140, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 141, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 141, "usage_type": "name"}, {"api_name": "win32gui.ShowWindow", "line_number": 142, "usage_type": "call"}, {"api_name": "win32con.SW_MINIMIZE", "line_number": 142, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 143, "usage_type": "call"}, {"api_name": "pynput.keyboard.Key.shift_l", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 151, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 152, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.shift_l", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 153, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 154, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 158, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 158, "usage_type": "name"}, {"api_name": "pynput.keyboard.Key.enter", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pynput.keyboard.Key", "line_number": 159, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 160, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 189, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 192, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "423662907", "text": "\"\"\" Script to execute benchmarking tests on various hash table implementations\n\n execute using\n python bench.py\n\n Nick Roseveare, Jan 2019\n\"\"\"\nimport sys\nimport subprocess\nimport numpy as np\nfrom tqdm import tqdm\nimport os\n\nprograms = [\n # 'std_unordered_map',\n 'boost_unordered_map',\n 'google_sparse_hash_map',\n 'google_dense_hash_map',\n 'google_dense_hash_map_mlf_0_9',\n 'libcuckoo_map',\n 'qt_qhash',\n 'spp_sparse_hash_map',\n # 'emilib_hash_map',\n # 'ska_flat_hash_map',\n # 'ska_flat_hash_map_power_of_two',\n 'tsl_sparse_map',\n 'tsl_hopscotch_map',\n 'tsl_hopscotch_map_mlf_0_5',\n 'tsl_hopscotch_map_store_hash',\n 'tsl_robin_map',\n 'tsl_robin_map_mlf_0_9',\n 'tsl_robin_map_store_hash',\n 'tsl_robin_pg_map',\n 'tsl_ordered_map',\n 'tsl_array_map',\n 'tsl_array_map_mlf_1_0'\n]\n\nnewcsvfile = True\n\nbase_size = int(1e5)\nminkeys = 2 * base_size\nmaxkeys = 30 * base_size\ninterval = 2 * base_size\nbest_out_of = 10\n\ndebug = 0\n# we have a small file for the minimum runtimes/usages found, and another for\n# with more statistics\n# outfile = 'modified_cuckoo_compare.csv'\n# outfile_all_stats = 'modified_cuckoo_compare.stats.csv'\noutfile = 'output_results.csv'\noutfile_all_stats = 'output_results.stats.csv'\n\nif len(sys.argv) > 1:\n benchtypes = sys.argv[1:]\nelse:\n benchtypes = [\n\n # 'insert_random_shuffle_range_reserve',\n 'insert_random_full_reserve',\n\n # 'read_random_shuffle_range_reserve',\n # 'read_random_full_reserve',\n # 'read_miss_random_full_reserve',\n # 'read_random_full_after_delete_reserve',\n\n # 'delete_random_full_reserve',\n\n # 'iteration_random_full_reserve'\n\n 'insert_random_shuffle_range',\n 'insert_random_full',\n\n 'read_random_shuffle_range',\n 'read_random_full',\n 'read_miss_random_full',\n 'read_random_full_after_delete',\n\n 'delete_random_full',\n\n 'iteration_random_full'\n\n\n 'insert_string',\n 'insert_string_reserve',\n 'insert_small_string',\n 'insert_small_string_reserve',\n\n 'read_string',\n 'read_miss_string',\n 'read_string_after_delete',\n 'read_small_string',\n 'read_miss_small_string',\n 'read_small_string_after_delete',\n\n 'delete_string',\n 'delete_small_string',\n\n ]\n\nnkeys_range = range(maxkeys, minkeys-1, -interval)\ntotal = (len(programs) * len(nkeys_range) * len(benchtypes))\nif debug == 0:\n progress_bar = tqdm(total=total)\ncount = 0\nif total > 0:\n if newcsvfile or not(os.path.isfile(outfile)):\n with open(outfile, 'w') as file:\n file.write(('test_type, nkeys, hash_table_algo, lf_min, mem_bytes_min, '\n 'runtime_sec_min'))\n file.write('\\n')\n if newcsvfile or not(os.path.isfile(outfile_all_stats)):\n with open(outfile_all_stats, 'w') as file:\n file.write(\n 'test_type, nkeys, hash_table_algo, lf_min, lf_avg, lf_std, lf_max, '\n 'mem_bytes_min, mem_bytes_avg, mem_bytes_std, mem_bytes_max, '\n 'runtime_sec_min, runtime_sec_avg, runtime_sec_std, runtime_sec_max')\n file.write('\\n')\n\nrt_attempts = np.nan * np.ones(best_out_of, dtype=float)\nmu_attempts = np.nan * np.ones(best_out_of, dtype=int)\nlf_attempts = np.nan * np.ones(best_out_of, dtype=float)\n\nfor nkey_idx, nkeys in enumerate(nkeys_range):\n for benchtype in benchtypes:\n for program in programs:\n if debug == 0:\n count += 1\n progress_bar.n = count\n progress_bar.set_description((\n f'nkeys[{nkeys}] '\n f'test[{benchtype}] '\n f'program[{program}]'))\n if program.startswith('tsl_array_map') and 'string' not in benchtype:\n continue\n\n fastest_attempt = np.inf\n fastest_attempt_data = ''\n\n rt_attempts[:] = np.nan\n mu_attempts[:] = np.nan\n lf_attempts[:] = np.nan\n for attempt in range(best_out_of):\n try:\n output = subprocess.check_output(\n ['./build/' + program, str(nkeys), benchtype])\n words = output.strip().split()\n\n runtime_seconds = float(words[0])\n memory_usage_bytes = int(words[1])\n load_factor = float(words[2])\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n if debug:\n print((f\"Error with {('./build/' + program)}\"\n f\"[{nkeys}: {benchtype}]\"))\n break\n\n rt_attempts[attempt] = np.round(runtime_seconds, decimals=7)\n mu_attempts[attempt] = memory_usage_bytes\n lf_attempts[attempt] = np.round(load_factor, decimals=3)\n\n if runtime_seconds < fastest_attempt:\n fastest_attempt = runtime_seconds\n fastest_attempt_data = ','.join(\n map(str, [benchtype, nkeys, program, load_factor,\n memory_usage_bytes, runtime_seconds]))\n\n if ~np.isinf(fastest_attempt):\n\n with open(outfile, 'a') as file:\n file.write(fastest_attempt_data)\n # Print blank line\n file.write('\\n')\n\n if debug:\n print(fastest_attempt_data)\n\n verbose_line = ','.join(map(str, [benchtype, nkeys, program]))\n # load factor\n nonnan = lf_attempts[~np.isnan(rt_attempts)]\n if nonnan.size:\n _min, mean, std, _max = (nonnan.min(), nonnan.mean(),\n nonnan.std(), nonnan.max())\n verbose_line += ',' + ','.join(map(str,\n [_min, mean, std, _max]))\n # memory used\n nonnan = mu_attempts[~np.isnan(rt_attempts)]\n if nonnan.size:\n _min, mean, std, _max = (nonnan.min(), nonnan.mean(),\n nonnan.std(), nonnan.max())\n verbose_line += ',' + ','.join(map(str,\n [_min, mean, std, _max]))\n # runtime seconds\n nonnan = rt_attempts[~np.isnan(rt_attempts)]\n if nonnan.size:\n _min, mean, std, _max = (nonnan.min(), nonnan.mean(),\n nonnan.std(), nonnan.max())\n verbose_line += ',' + ','.join(map(str,\n [_min, mean, std, _max]))\n\n with open(outfile_all_stats, 'a') as file:\n file.write(verbose_line)\n file.write('\\n')\n\n if debug:\n print('\\n')\n", "sub_path": "bench.py", "file_name": "bench.py", "file_ext": "py", "file_size_in_byte": 6901, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 121, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 122, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 123, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 138, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 142, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 143, "usage_type": "attribute"}, {"api_name": "subprocess.check_output", "line_number": 146, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 161, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.isinf", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 190, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 197, "usage_type": "call"}]} +{"seq_id": "194936892", "text": "#!/usr/bin/env python\nimport logging\nimport os\nimport time\nimport unittest\n\nimport phpbb2slack\n\n\nclass TestPHPBB2Slack(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up environment.\"\"\"\n logging.basicConfig(level=logging.CRITICAL)\n self.logger = logging.getLogger()\n self.logger.disabled = True\n\n def test_format_message(self):\n \"\"\"Test format_message().\"\"\"\n test_data = [\n {\n 'url': 'http://www.example.com',\n 'attrs': {\n 'category': 'test',\n 'comments_cnt': 12,\n 'title': 'someTitle',\n },\n 'handle': 'someHandle',\n 'expected': '[someHandle-test] someTitle (12) | http://www.example.com\\n',\n },\n {\n 'url': 'http://www.example.com',\n 'attrs': {\n 'category': '',\n 'comments_cnt': 1,\n 'title': 'someTitle',\n },\n 'handle': 'someHandle',\n 'expected': '[someHandle] someTitle (1) | http://www.example.com\\n',\n },\n ]\n for data in test_data:\n message = phpbb2slack.format_message(\n data['url'], data['attrs'], data['handle']\n )\n self.assertEqual(message, data['expected'])\n\n def test_get_authors_from_file(self):\n \"\"\"Test get_authors_from_file().\"\"\"\n authors_file = os.path.join(\n os.path.dirname(__file__), 'files', 'authors.txt'\n )\n expected_authors = [\n 'author1',\n 'author2',\n ]\n authors = phpbb2slack.get_authors_from_file(self.logger, authors_file)\n self.assertEqual(authors, expected_authors)\n\n def test_get_authors_from_file_no_file(self):\n \"\"\"Test get_authors_from_file() when no file is given.\"\"\"\n authors_file = ''\n expected_authors = []\n authors = phpbb2slack.get_authors_from_file(self.logger, authors_file)\n self.assertEqual(authors, expected_authors)\n\n def test_scrub_cache(self):\n \"\"\"Test scrub_cache().\"\"\"\n item_expiration = int(time.time()) + 60\n test_cache = {\n 'foo': {\n 'expiration': item_expiration,\n },\n 'bar': {\n 'expiration': int(time.time()) - 3600,\n },\n 'lar': {\n 'abc': 'efg',\n },\n }\n expected = {\n 'foo': {\n 'expiration': item_expiration,\n }\n }\n phpbb2slack.scrub_cache(self.logger, test_cache)\n self.assertEqual(test_cache, expected)\n\n def test_update_cache(self):\n \"\"\"Test update_cache().\"\"\"\n item_expiration = int(time.time()) + 60\n news = {\n 'http://example.com': {\n 'comments_cnt': 2,\n },\n 'http://www.example.com': {\n 'comments_cnt': 20,\n },\n }\n cache = {\n 'http://example.com': {\n 'expiration': 0,\n 'comments_cnt': 1,\n },\n }\n expected_cache = {\n 'http://example.com': {\n 'expiration': item_expiration,\n 'comments_cnt': 2,\n },\n 'http://www.example.com': {\n 'expiration': item_expiration,\n 'comments_cnt': 20,\n },\n }\n phpbb2slack.update_cache(cache, news, item_expiration)\n self.assertEqual(cache, expected_cache)\n", "sub_path": "tests/test_phpbb2slack.py", "file_name": "test_phpbb2slack.py", "file_ext": "py", "file_size_in_byte": 3591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.CRITICAL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 15, "usage_type": "call"}, {"api_name": "phpbb2slack.format_message", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 50, "usage_type": "call"}, {"api_name": "os.path", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "phpbb2slack.get_authors_from_file", "line_number": 57, "usage_type": "call"}, {"api_name": "phpbb2slack.get_authors_from_file", "line_number": 64, "usage_type": "call"}, {"api_name": "time.time", "line_number": 69, "usage_type": "call"}, {"api_name": "time.time", "line_number": 75, "usage_type": "call"}, {"api_name": "phpbb2slack.scrub_cache", "line_number": 86, "usage_type": "call"}, {"api_name": "time.time", "line_number": 91, "usage_type": "call"}, {"api_name": "phpbb2slack.update_cache", "line_number": 116, "usage_type": "call"}]} +{"seq_id": "229256779", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.urls import reverse\nfrom django.views import generic\nfrom django.template import loader\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom .models import Category, Product\nfrom .forms import CategoryForm, ProductForm\nfrom django.utils import timezone\n\ndef category_list(request, pk=None):\n \"\"\" display the list of categories \"\"\"\n\n categoryList = Category.objects.filter(parentId=pk).order_by('id')\n category = None\n if pk:\n category = get_object_or_404(Category, pk=pk) \n\n page = request.GET.get('page', 1)\n paginator = Paginator(categoryList, 15)\n try:\n categories = paginator.page(page)\n except PageNotAnInteger:\n categories = paginator.page(1)\n except EmptyPage:\n categories = paginator.page(paginator.num_pages)\n\n return render(request, 'myTestApplication/category_list.html', {'categoryList':categoryList, 'pk':pk, 'category':category, 'categories':categories})\n\ndef category_new(request, pk=None):\n \"\"\" add new category \"\"\"\n\n category=None\n if pk:\n category = get_object_or_404(Category, pk=pk)\n\n if request.method == \"POST\":\n form = CategoryForm(request.POST)\n if form.is_valid():\n categoryAction = form.save(commit=False)\n categoryAction.save()\n if category is None:\n return redirect('myTestApplication:category_list')\n else:\n return redirect('myTestApplication:category_list', pk=pk)\n else:\n form = CategoryForm(initial={'parentId': pk})\n\n return render(request, 'myTestApplication/category_edit.html', {'form':form, 'pk':pk})\n\ndef category_edit(request, pk=None):\n \"\"\" edit existing category \"\"\"\n\n category = get_object_or_404(Category, pk=pk)\n parentId = category.parentId\n if request.method == \"POST\":\n form = CategoryForm(request.POST, instance=category)\n if form.is_valid():\n categoryAction = form.save(commit=False)\n categoryAction.save()\n return redirect('myTestApplication:category_list')\n else:\n form = CategoryForm(instance=category)\n\n return render(request, 'myTestApplication/category_edit.html', {'form':form, 'category':category})\n\ndef category_delete(request, pk):\n \"\"\" delete existing category \"\"\"\n\n category = get_object_or_404(Category, pk=pk)\n category.delete()\n\n return redirect('myTestApplication:category_list')\n\ndef product_list(request, pk):\n \"\"\" display the list of products \"\"\"\n\n productList = Product.objects.filter(categoryId=pk).order_by('id')\n category = get_object_or_404(Category, pk=pk)\n\n page = request.GET.get('page', 1)\n paginator = Paginator(productList, 15)\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n return render(request, 'myTestApplication/product_list.html', {'productList':productList, 'pk':pk, 'category':category, 'products':products})\n\ndef product_edit(request, categoryId, pk=None):\n \"\"\" add new product \"\"\"\n\n product = None\n if pk:\n product = get_object_or_404(Product, pk=pk)\n\n if request.method == \"POST\":\n form = ProductForm(request.POST, instance = product)\n if form.is_valid():\n product = form.save(commit=False)\n product.save() \n return redirect('myTestApplication:product_list', pk=categoryId)\n else:\n form = ProductForm(instance = product, initial={'categoryId': categoryId})\n\n return render(request, 'myTestApplication/product_edit.html', {'form':form, 'pk':pk, 'categoryId':categoryId, 'product':product})\n\ndef product_delete(request, categoryId, pk):\n \"\"\" delete product \"\"\"\n\n product = get_object_or_404(Product, pk=pk)\n product.delete()\n\n return redirect('myTestApplication:product_list', pk=categoryId)\n\n\nimport csv\ndef download_csv(request, queryset, exportType=None):\n \"\"\"\n function to download csv file\n exportType = 'category' to export categories\n exportType = 'product' to export products\n \"\"\"\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment;filename=export.csv'\n writer = csv.writer(response)\n\n if exportType == 'category':\n writer.writerow([str(queryset[0]),])\n writer.writerow([\n '---',\n str(u\"ID\"),\n str(u\"Name\"),\n ])\n print(queryset[1:])\n for obj in queryset[1:]:\n for item in obj:\n writer.writerow([\n '---',\n str(item.id),\n str(item.categoryName),\n str(item),\n ])\n elif exportType == 'product':\n writer.writerow([str(queryset[0]),])\n writer.writerow([\n '---',\n str(u\"Id\"),\n str(u\"productName\"),\n str(u\"Description\"),\n ])\n for obj in queryset[1:]:\n for item in obj:\n writer.writerow([\n '---',\n str(item.id),\n str(item.productName),\n str(item.productDescription),\n ])\n else:\n writer.writerow(queryset)\n\n return response\n\ndef export_csv_file(request, data, exportType=None):\n\n data = download_csv(request, data, exportType=exportType)\n return HttpResponse (data, content_type='text/csv')\n\ndef export_categories(request, pk):\n \"\"\"function to export categories\"\"\"\n\n topLevelCategory = Category.objects.get(id=pk) \n subcategory = topLevelCategory.category_set.all()\n data = [topLevelCategory, subcategory]\n\n return export_csv_file(request, data, exportType='category') \n\ndef export_products(request, pk):\n \"\"\" function to export products\"\"\"\n\n parentCategory = Category.objects.get(id=pk)\n products = Product.objects.filter(categoryId=pk)\n data = [parentCategory, products]\n\n return export_csv_file(request, data, exportType='product')\n\nfrom django.db import connection\ndef export_total(request, pk):\n \"\"\"ugly report =(\"\"\"\n\n cursor = connection.cursor()\n data = []\n for p in Category.objects.raw('SELECT * FROM ' +\n productTableName + ' LEFT JOIN ' + categoryTableName +\n ' ON myTestApplication_product.categoryId_id = myTestApplication_category.id' +\n ' WHERE myTestApplication_category.parentId_id = ' + pk ) :\n data.append(p.categoryName + ',' + str(p.id) + ',' + p.productName + ',' + p.productDescription + '\\n' )\n\n return export_csv_file(request, data)\n", "sub_path": "myTestApplication/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "models.Category.objects.filter", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 16, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 16, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.core.paginator.Paginator", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 25, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 27, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 37, "usage_type": "argument"}, {"api_name": "forms.CategoryForm", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 45, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "forms.CategoryForm", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 56, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 56, "usage_type": "argument"}, {"api_name": "forms.CategoryForm", "line_number": 59, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 63, "usage_type": "call"}, {"api_name": "forms.CategoryForm", "line_number": 65, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 72, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 72, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 75, "usage_type": "call"}, {"api_name": "models.Product.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 80, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 81, "usage_type": "call"}, {"api_name": "models.Category", "line_number": 81, "usage_type": "argument"}, {"api_name": "django.core.paginator.Paginator", "line_number": 84, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 87, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 89, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 92, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 99, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 99, "usage_type": "argument"}, {"api_name": "forms.ProductForm", "line_number": 102, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 106, "usage_type": "call"}, {"api_name": "forms.ProductForm", "line_number": 108, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 115, "usage_type": "call"}, {"api_name": "models.Product", "line_number": 115, "usage_type": "argument"}, {"api_name": "django.shortcuts.redirect", "line_number": 118, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 128, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 130, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 172, "usage_type": "call"}, {"api_name": "models.Category.objects.get", "line_number": 177, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 177, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 177, "usage_type": "name"}, {"api_name": "models.Category.objects.get", "line_number": 186, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 186, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 186, "usage_type": "name"}, {"api_name": "models.Product.objects.filter", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Product.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Product", "line_number": 187, "usage_type": "name"}, {"api_name": "django.db.connection.cursor", "line_number": 196, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 196, "usage_type": "name"}, {"api_name": "models.Category.objects.raw", "line_number": 198, "usage_type": "call"}, {"api_name": "models.Category.objects", "line_number": 198, "usage_type": "attribute"}, {"api_name": "models.Category", "line_number": 198, "usage_type": "name"}]} +{"seq_id": "449522212", "text": "from utils.io_utils import IOUtils\nfrom utils.string_utils import StringUtils\nfrom multiprocessing.pool import Pool\nimport os\n\n\nclass OneHotVectoriser:\n\n def __init__(self):\n self.voc = dict()\n self.input_dir = ''\n self.vocab_path = ''\n self.seq_dir = ''\n self.next_dir = ''\n self.out_dir = ''\n self.seq_length = 4\n self.sequences_step = 1\n\n def clean_files_for_full_stops(self):\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n p = Pool(5)\n for in_file in in_files:\n in_json = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n clean_data = list(p.map(StringUtils.clean_full_stops, in_json))\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.input_dir, in_file), clean_data)\n\n def remove_duplicates(self):\n import os\n duplicate_map = dict()\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n for in_file in in_files:\n in_json = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n clean_data = list(filter(lambda x: StringUtils.is_not_duplicate(x, duplicate_map), in_json))\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.input_dir, in_file), clean_data)\n\n def clean_data_to_remove_smaller_ones(self):\n import json\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n for in_file in in_files:\n in_json_list = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n out_json = []\n for in_json in in_json_list:\n k = list(map(lambda x: x.strip().lower(), in_json.split(\" \")))\n if len(k) > 10:\n list(map(self.add_to_dict, k))\n out_json.append(\" \".join(k))\n\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.input_dir, in_file), out_json, indent=2)\n with open(\"{}/{}\".format(self.out_dir, \"voc.json\"), \"w+\") as f:\n json.dump(self.voc, f)\n\n def clean_data_to_remove_un_frequent_words(self, median):\n import json\n with open(\"{}/{}\".format(self.out_dir, \"voc.json\"), \"r\") as f:\n voc_local = json.load(f)\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n for in_file in in_files:\n in_json_list = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n out_json = []\n for in_json in in_json_list:\n k = list(map(lambda x: x.strip().lower(), in_json.split(\" \")))\n fi = list(filter(lambda y: not (len(y) == 1 and y != \"a\" and y != \"i\" and y != \"*\"), k))\n fi = list(map(lambda y: y if voc_local[y] >= median else \"#ner\", fi))\n out_json.append(\" \".join(fi))\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.input_dir, in_file), out_json, indent=2)\n\n def remove_multiple_ners(self):\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n for in_file in in_files:\n in_json = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n clean_data = list(filter(lambda x: x.count(\"#ner\") <= 3 and len(x) > 50, in_json))\n clean_data = list(map(StringUtils.work_to_clean_up_ners, clean_data))\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.input_dir, in_file), clean_data)\n\n def create_sequences(self, word_list):\n sequences = []\n next_words = []\n for i in range(0, len(word_list) - self.seq_length, self.sequences_step):\n sequences.append(word_list[i: i + self.seq_length])\n next_words.append(word_list[i + self.seq_length])\n return sequences, next_words\n\n def create_sentence_corpus(self):\n total_seq = []\n total_next = []\n input_count = 1\n import random\n in_files = IOUtils.get_all_file_names_in_dir(self.input_dir, 'json')\n for in_file in in_files:\n in_json = IOUtils.get_json_from_json_file(os.path.join(self.input_dir, in_file))\n sentence_list = list(map(StringUtils.get_word_list, in_json))\n random.shuffle(sentence_list)\n sequences_list = list()\n next_words_list = list()\n input_count += len(sentence_list)\n\n for word_list in sentence_list:\n sequences, next_words = self.create_sequences(word_list)\n sequences_list += sequences\n next_words_list += next_words\n\n assert len(sequences_list) == len(next_words_list)\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.seq_dir, in_file), sequences_list, indent=2)\n IOUtils.write_json_data_to_file(\"{}/{}\".format(self.next_dir, in_file), next_words_list, indent=2)\n total_next += next_words_list\n total_seq += sequences_list\n print(\"Total input count is: {}\".format(input_count))\n return total_seq, total_next\n\n def create_vocabulary(self, word_list):\n import collections\n # count the number of words\n word_counts = collections.Counter(word_list)\n\n # Mapping from index to word : that's the vocabulary\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n\n # Mapping from word to index\n vocab = {x: i for i, x in enumerate(vocabulary_inv)}\n words = [x[0] for x in word_counts.most_common()]\n\n # size of the vocabulary\n vocab_size = len(words)\n print(\"vocab size: \", vocab_size)\n\n from six.moves import cPickle\n # save the words and vocabulary\n with open(os.path.join(self.vocab_path), 'wb') as f:\n cPickle.dump((words, vocab, vocabulary_inv), f)\n\n return vocab, vocab_size\n\n def create_vectors(self, vocab, vocab_size, sequences, next_words, seq_length=4):\n import numpy as np\n import pickle\n X = np.zeros((len(sequences), seq_length, vocab_size), dtype=np.bool)\n y = np.zeros((len(sequences), vocab_size), dtype=np.bool)\n for i, sentence in enumerate(sequences):\n for t, word in enumerate(sentence):\n X[i, t, vocab[word]] = 1\n y[i, vocab[next_words[i]]] = 1\n pickle.dump(X, self.out_dir + \"X.pk\")\n pickle.dump(y, self.out_dir + \"y.pk\")\n return X, y\n\n def add_to_dict(self, wd):\n if wd in self.voc:\n self.voc[wd] += 1\n else:\n self.voc[wd] = 1\n\n def median(self):\n import json\n import statistics\n with open(\"{}/{}\".format(self.input_dir, \"voc.json\"), \"r\") as f:\n d = json.load(f)\n ls = d.values()\n ls = list(filter(lambda x: x > 9, ls))\n print(statistics.median(ls))\n", "sub_path": "vectorizer/one_hot_vectorizer.py", "file_name": "one_hot_vectorizer.py", "file_ext": "py", "file_size_in_byte": 6902, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 20, "usage_type": "name"}, {"api_name": "multiprocessing.pool.Pool", "line_number": 21, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 23, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 23, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils.clean_full_stops", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils", "line_number": 24, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 25, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 30, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 32, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils.is_not_duplicate", "line_number": 33, "usage_type": "call"}, {"api_name": "utils.string_utils.StringUtils", "line_number": 33, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 34, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 34, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 38, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 40, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 48, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 48, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 50, "usage_type": "call"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 56, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 56, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 58, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 65, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 65, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 68, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 68, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils.work_to_clean_up_ners", "line_number": 72, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils", "line_number": 72, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 73, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 73, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_all_file_names_in_dir", "line_number": 88, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 88, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.get_json_from_json_file", "line_number": 90, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 90, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils.get_word_list", "line_number": 91, "usage_type": "attribute"}, {"api_name": "utils.string_utils.StringUtils", "line_number": 91, "usage_type": "name"}, {"api_name": "random.shuffle", "line_number": 92, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 103, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 103, "usage_type": "name"}, {"api_name": "utils.io_utils.IOUtils.write_json_data_to_file", "line_number": 104, "usage_type": "call"}, {"api_name": "utils.io_utils.IOUtils", "line_number": 104, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 129, "usage_type": "call"}, {"api_name": "os.path", "line_number": 129, "usage_type": "attribute"}, {"api_name": "six.moves.cPickle.dump", "line_number": 130, "usage_type": "call"}, {"api_name": "six.moves.cPickle", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 143, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 144, "usage_type": "call"}, {"api_name": "json.load", "line_number": 157, "usage_type": "call"}, {"api_name": "statistics.median", "line_number": 160, "usage_type": "call"}]} +{"seq_id": "415644908", "text": "import operator\n\nimport numpy as np\nimport torch.nn as nn\n\nimport densetorch as dt\n\n# Random seed\nseed = 42\n\n# Data settings\ncrop_size = 450\nbatch_size = 4\nval_batch_size = 5\nnum_classes = 2#40\nn_epochs = 3\nval_every = 1\n\ndata_file = \"./lists/train.txt\"#\"./lists/train_list_depth.txt\" #\"./lists/train.txt\"\nval_file = \"./lists/test.txt\"#\"./lists/val_list_depth.txt\" #\"./lists/testing.txt\"\ndata_dir = \"./newEndoVis/train/\"#\"./datasets/nyudv2/\" #\"./Dataset/train/\"\ndata_val_dir = \"./newEndoVis/test/\"#\"./datasets/nyudv2/\" #\"./Dataset/train/\"\nmasks_names = (\"segm\",)\n\n\ndef line_to_paths_fn(x):\n #rgb, segm, depth = x.decode(\"utf-8\").strip(\"\\n\").split(\"\\t\")\n # print(x.decode(\"utf-8\").strip(\"\\n\").split(\" \"))\n rgb, segm = x.decode(\"utf-8\").strip(\"\\n\").split(\" \")\n return [rgb, segm]\n\n\ndepth_scale = 5000.0\nimg_scale = 1.0 / 255\nimg_mean = np.array([0.485, 0.456, 0.406])\nimg_std = np.array([0.229, 0.224, 0.225])\nnormalise_params = [\n img_scale, # SCALE\n img_mean.reshape((1, 1, 3)), # MEAN\n img_std.reshape((1, 1, 3)),\n depth_scale,\n] # STD\nignore_index = 255\n\n# optim options\ncrit_segm = nn.CrossEntropyLoss(ignore_index=ignore_index).cuda()\n\nlr_enc = 1e-2\noptim_enc = \"SGD\"\nmom_enc = 0.9\nwd_enc = 1e-4\nlr_dec = 5e-2\noptim_dec = \"SGD\"\nmom_dec = 0.9\nwd_dec = 1e-4\nloss_coeffs = (1.0,)\n\n# saving criterions\ninit_vals = (0.0,)\ncomp_fns = [operator.gt]\nckpt_dir = \"./\"\nckpt_path = \"./checkpoint.pth.tar\"\nsaver = dt.misc.Saver(\n args=locals(),\n ckpt_dir=ckpt_dir,\n best_val=init_vals,\n condition=comp_fns,\n save_several_mode=all,\n)\n", "sub_path": "DenseTorch/config_test.py", "file_name": "config_test.py", "file_ext": "py", "file_size_in_byte": 1566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "operator.gt", "line_number": 60, "usage_type": "attribute"}, {"api_name": "densetorch.misc.Saver", "line_number": 63, "usage_type": "call"}, {"api_name": "densetorch.misc", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "193338444", "text": "\n'''\n2013 March - \nby Yasuyuki Shima\n\nclass rsemresultparser\nrsemresultparser.genes :rsemresult object for genes\nrsemresultparser.isoforms:rsemresult object for isoforms\n\nclass rsemresult\nattributes\n-.array : numpy array (gene x data)\n-.gd : dictionary (genedic instance)\n-.genelist : list of genes in array\n-.datalist : list of data in array\nmethods\n-show_samples()\n-show_index()\n-subset_by_index(indexlist = 'all', genelist = 'all'): \n make subset of rsemresult instance by a list of index\n-subset_by_preifx(prefixstring = 'all',genelist = 'all'): \n make subset of rsemresult instance by a comma separated string of prefixes\n-Graph_Stat.xxxx(): run Graph_Stat methods. \n'''\n\nimport os, pickle\nfrom generate_genedics import *\nimport Graph_Stat\nimport numpy\nfrom EBSeqparser import *\nfrom collections import OrderedDict\n\nclass rsemresult(object):\n def __init__(self, directory, gd, valuetype):\n self.directory = directory\n self.FPKMdic = {}\n self.valuetype = valuetype\n self.dataindexdic = OrderedDict({})\n self.indexcounter = 0\n self.datatype = None\n self.gd = gd\n self.genelist = []\n self.datalist = []\n self.name_to_indexdic = {}\n self.index_to_namedic = {}\n\n def resultreader(self, filepath, prefix, repNo):\n try:\n self.dataindexdic[prefix][repNo] = self.indexcounter\n except:\n self.dataindexdic[prefix] = {}\n self.dataindexdic[prefix][repNo] = self.indexcounter\n self.datalist.append(prefix + '_' + str(repNo))\n self.indexcounter += 1\n #append data to gene/isoformFPKMdic /initiate maindic\n inf = open(filepath)\n if self.valuetype == 'TPM':\n FPKMindex = -1\n elif self.valuetype =='count':\n FPKMindex = -2\n else:\n FPKMindex = 0\n isoformindex = 0\n for line in inf:\n if line.startswith('transcript_id'):\n FPKMindex -= 2\n self.datatype = 'isoforms'\n self.genelist = self.gd.isoformlist\n continue\n elif line.startswith('gene_id'):\n FPKMindex -= 1\n self.datatype = 'genes'\n self.genelist = self.gd.symbollist\n continue\n fields = line.rstrip().split('\\t')\n if self.datatype == 'isoforms':\n index = isoformindex\n else:\n index = int(fields[0])-1\n FPKM = float(fields[FPKMindex])\n try:\n self.FPKMdic[index].append(FPKM)\n except: \n self.FPKMdic[index] = [FPKM]\n isoformindex += 1\n for i in range(len(self.genelist)):\n name = self.genelist[i]\n self.name_to_indexdic[name] = i\n self.index_to_namedic[i] = name\n\n def dic_to_array(self):\n FPKMlist = []\n for index in range(len(self.FPKMdic.keys())):\n FPKMlist.append(self.FPKMdic[index])\n self.array = numpy.array(FPKMlist)\n # del self.FPKMdic\n self.Graph_Stat = Graph_Stat.Graph_Stat(self)\n\n def show_samples(self):\n prefixlist = list(self.dataindexdic.keys())\n print('samples:')\n print(', '.join(prefixlist))\n print('gene list length:\\t{}'.format(len(self.genelist)))\n\n def show_index(self):\n indexstring = ''\n for index, data in enumerate(self.datalist):\n indexstring += '{}: {}\\t'.format(index, data)\n print('indices')\n print(indexstring)\n print('gene list length:\\t{}'.format(len(self.genelist)))\n\n def genelist_to_indexlist(self, genelist):\n indexlist = []\n for gene in genelist:\n indexlist.append(self.name_to_indexdic[gene])\n return indexlist\n\n def remake_datalist(self, original_datalist, indexlist):\n new_datalist = [original_datalist[index] for index in indexlist]\n self.datalist = new_datalist\n for i in range(len(new_datalist)):\n prefix, repNo = rsemresultparser.fileNameSplitter(self.datalist[i])\n try:\n self.dataindexdic[prefix][repNo] = i\n except:\n self.dataindexdic[prefix] = {}\n self.dataindexdic[prefix][repNo] = i\n\n def remake_genelist(self, genelist):\n new_name_to_indexdic = {}\n new_index_to_namedic = {}\n for i in range(len(genelist)):\n new_name_to_indexdic[genelist[i]] = i\n new_index_to_namedic[i] = genelist[i]\n self.genelist = genelist\n self.name_to_indexdic = new_name_to_indexdic\n self.index_to_namedic = new_index_to_namedic\n\n def subset_by_index(self, indexlist='all', genelist='all'):\n subset = rsemresult(self.directory, self.gd, self.valuetype)\n subset.datatype = self.datatype\n if indexlist == 'all':\n subset.array = self.array\n subset.datalist = self.datalist\n indexlist = list(range(len(self.datalist)))\n else:\n subset.array = self.array.take(indexlist, 1)\n subset.remake_datalist(self.datalist, indexlist)\n\n if genelist == 'all':\n subset.name_to_indexdic = self.name_to_indexdic\n subset.index_to_namedic = self.index_to_namedic\n subset.genelist = self.genelist\n else:\n genelist = self.Graph_Stat.validate_genelist(genelist)\n geneindexlist = self.genelist_to_indexlist(genelist)\n subset.array = subset.array.take(geneindexlist, 0)\n subset.remake_genelist(genelist)\n subset.Graph_Stat = Graph_Stat.Graph_Stat(subset)\n return subset\n\n def subset_by_prefix(self, prefixstring='all', genelist='all'):\n subset = rsemresult(self.directory, self.gd, self.valuetype)\n subset.datatype = self.datatype\n if prefixstring == 'all':\n subset.array = self.array\n subset.datalist = self.datalist\n dataindexlist = list(range(len(self.datalist)))\n else:\n dataindexlist = []\n prefixlist = prefixstring.split(',')\n prefixlist = sorted([prefix.strip() for prefix in prefixlist])\n for prefix in prefixlist:\n for repNo in sorted(self.dataindexdic[prefix].keys()):\n dataindexlist.append(self.dataindexdic[prefix][repNo])\n subset.array = self.array.take(dataindexlist,1)\n subset.remake_datalist(self.datalist, dataindexlist)\n if genelist == 'all':\n subset.name_to_indexdic = self.name_to_indexdic\n subset.index_to_namedic = self.index_to_namedic\n subset.genelist = self.genelist\n else:\n genelist = self.Graph_Stat.validate_genelist(genelist)\n geneindexlist = self.genelist_to_indexlist(genelist)\n subset.array = subset.array.take(geneindexlist,0)\n subset.remake_genelist(genelist)\n subset.Graph_Stat = Graph_Stat.Graph_Stat(subset)\n return subset\n\n def gene_intrsct(self, filename, genelist, **kwargs):\n EBS = EBSeqResult()\n fname = self.directory + filename\n print(fname)\n EBS.readfile(fname)\n print(\"EBseq reading hard_threshold\")\n if 'top' in list(kwargs.keys()):\n topx = EBS.top_x(2500)\n genenamelist = []\n for gene in topx:\n genenamelist.append(self.gd.geneID_to_geneName[gene])\n self.intrsct_list = list(set(genelist)&set(genenamelist))\n elif 'bottom' in list(kwargs.keys()):\n bottomx = EBS.bottom_x(2500)\n genenamelist = []\n for gene in bottomx:\n genenamelist.append(self.gd.geneID_to_geneName[gene])\n self.intrsct_list = list(set(genelist)&set(genenamelist))\n elif 'topbottom' in list(kwargs.keys()):\n genenamelist = []\n topbottomx = EBS.top_bottom_x(2500)\n for gene in topbottomx:\n genenamelist.append(self.gd.geneID_to_geneName[gene])\n self.intrsct_list = list(set(genelist)&set(genenamelist))\n else:\n topx = EBS.top_x(2500)\n genenamelist = []\n for gene in topx:\n genenamelist.append(self.gd.geneID_to_geneName[gene])\n self.intrsct_list = list(set(genelist)&set(genenamelist))\n return self.intrsct_list\n\nclass rsemresultparser(object):\n '''generate rsemresult objects for gene and isoform\n ex. result = rsemresultparser('--path to the data ---', 'basename for result')\n result.gene:rsemresult object for genes\n result.isoform:rsemresult object for isoforms'''\n\n def __init__(self, directory='/Volumes/DataStorage/test/mm10/', basename='mm10', valuetype='TPM', init=True):\n self.directory = directory\n self.basename = basename\n self.valuetype = valuetype\n gd = genedicts(self.directory)\n gd.getdic()\n self.gd = gd\n if init:\n self.genes = rsemresult(self.directory, gd, valuetype)\n self.isoforms = rsemresult(self.directory, gd, valuetype)\n print('loading and parsing data')\n self.resultparser()\n # pklpath = os.path.join(self.directory, self.basename+'.resemresult.pkl')\n # if os.path.exists(pklpath):\n # print('loading a cache file for rsemresult')\n # cache = pickle.load(open(pklpath,'rb'))\n # self.__dict__.update(cache)\n # else:\n # print('loading and parsing data')\n # self.resultparser(gd, valuetype)\n # print('saving cache file')\n # pickle.dump(self.__dict__, open(pklpath, 'wb'), 2)\n\n @staticmethod\n def fileNameSplitter(filename):\n names = filename.split('_')\n if len(names) == 2:\n return names[0], names[1].split('.')[0]\n elif len(names) == 4:\n return '_'.join(names[0:2]), '_'.join(\n [names[2], names[3].split('.')[0]])\n else:\n raise Exception('need to make special filename splitter')\n\n def resultparser(self):\n filelist = os.listdir(self.directory)\n filelist.sort()\n genefiles = '.genes.results'\n isoformfiles = '.isoforms.results'\n for filename in filelist:\n if not filename.endswith((genefiles, isoformfiles)):\n continue\n filepath = os.path.join(self.directory, filename)\n prefix, repNo = self.fileNameSplitter(filename)\n if filename.endswith(genefiles):\n self.genes.resultreader(filepath, prefix, repNo)\n elif filename.endswith(isoformfiles):\n self.isoforms.resultreader(filepath, prefix, repNo)\n self.genes.dic_to_array()\n self.isoforms.dic_to_array()\n\n def countgenerator(self, samplelist, datatype):\n dirlist = os.listdir(self.directory)\n self.count = rsemresult(self.directory, self.gd, 'count')\n for samplename in samplelist:\n if datatype == 'genes':\n postfix = '.genes.results'\n elif datatype == 'isoforms':\n postfix = '.isoforms.results'\n else:\n raise Exception('datatype should be \"genes\" or \"isoforms\"')\n sampleprefix, repNo = self.fileNameSplitter(samplename)\n if samplename + postfix in dirlist:\n samplepath = os.path.join(self.directory, samplename) + postfix\n print('reading {}'.format(samplepath))\n self.count.resultreader(samplepath, sampleprefix, repNo)\n self.count.dic_to_array()\n return self.count.array\n\n", "sub_path": "rsemdemo/rsem-EBseq/rsemresultparser.py", "file_name": "rsemresultparser.py", "file_ext": "py", "file_size_in_byte": 11667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.OrderedDict", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "Graph_Stat.Graph_Stat", "line_number": 97, "usage_type": "call"}, {"api_name": "Graph_Stat.Graph_Stat", "line_number": 160, "usage_type": "call"}, {"api_name": "Graph_Stat.Graph_Stat", "line_number": 188, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 264, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 271, "usage_type": "call"}, {"api_name": "os.path", "line_number": 271, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 292, "usage_type": "call"}, {"api_name": "os.path", "line_number": 292, "usage_type": "attribute"}]} +{"seq_id": "633167761", "text": "from django.contrib.auth.models import AnonymousUser\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.db.models import Count\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\n\nfrom product.forms import TestimonialForm\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, render\n\nfrom product.models import Product, Testimonial\n\n\ndef pagination(request, queryset):\n paginator = Paginator(queryset, 3)\n page = request.GET.get('page')\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n queryset = paginator.page(1)\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n return queryset\n\n\ndef index_view(request):\n sort_by = request.GET.get('sort_by')\n queryset = Product.objects.annotate(count_likes=Count('likes'))\n if sort_by:\n products = pagination(request, queryset.order_by(sort_by))\n else:\n products = pagination(request, queryset)\n return render(request, 'index.html',\n {'products': products,\n 'index_page': True,\n 'sort_by': sort_by, })\n\n\ndef detail_view(request, slug=None):\n page = get_object_or_404(Product, slug=slug)\n count_likes = page.likes.count()\n if request.user in page.likes.all():\n user_like = True\n else:\n user_like = False\n initial_form = TestimonialForm()\n\n if request.POST:\n form = TestimonialForm(request.POST)\n if form.is_valid():\n if not request.user.username:\n testimonial = Testimonial.objects.create(product=Product.objects.get(slug=slug),\n message=request.POST.get('message'))\n else:\n testimonial = Testimonial.objects.create(product=Product.objects.get(slug=slug),\n author=request.user,\n message=request.POST.get('message'))\n testimonial.save()\n return HttpResponseRedirect('/products/%s/' % slug)\n\n return render(request, 'detail.html',\n {'page': page,\n 'form': initial_form,\n 'count_likes': count_likes,\n 'user_like': user_like,\n 'testimonials': Testimonial.objects.filter(product__slug=slug)})\n\n\ndef like(request):\n if request.is_ajax():\n user = request.user\n slug = request.GET.get('product_slug')\n is_like = None\n product = None\n status = ''\n if slug:\n product = get_object_or_404(Product, slug=slug)\n if user.is_authenticated:\n if user not in product.likes.all():\n product.likes.add(user)\n product.save()\n is_like = False\n status = 'Liked!'\n else:\n product.likes.remove(user)\n product.save()\n is_like = True\n status = 'Unliked!'\n\n return JsonResponse({'is_like': is_like, 'count_likes': product.likes.count(), 'status': status})\n else:\n raise Http404\n", "sub_path": "product/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.core.paginator.Paginator", "line_number": 15, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 19, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 21, "usage_type": "name"}, {"api_name": "product.models.Product.objects.annotate", "line_number": 29, "usage_type": "call"}, {"api_name": "product.models.Product.objects", "line_number": 29, "usage_type": "attribute"}, {"api_name": "product.models.Product", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 29, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 34, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 41, "usage_type": "call"}, {"api_name": "product.models.Product", "line_number": 41, "usage_type": "argument"}, {"api_name": "product.forms.TestimonialForm", "line_number": 47, "usage_type": "call"}, {"api_name": "product.forms.TestimonialForm", "line_number": 50, "usage_type": "call"}, {"api_name": "product.models.Testimonial.objects.create", "line_number": 53, "usage_type": "call"}, {"api_name": "product.models.Testimonial.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "product.models.Testimonial", "line_number": 53, "usage_type": "name"}, {"api_name": "product.models.Product.objects.get", "line_number": 53, "usage_type": "call"}, {"api_name": "product.models.Product.objects", "line_number": 53, "usage_type": "attribute"}, {"api_name": "product.models.Product", "line_number": 53, "usage_type": "name"}, {"api_name": "product.models.Testimonial.objects.create", "line_number": 56, "usage_type": "call"}, {"api_name": "product.models.Testimonial.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "product.models.Testimonial", "line_number": 56, "usage_type": "name"}, {"api_name": "product.models.Product.objects.get", "line_number": 56, "usage_type": "call"}, {"api_name": "product.models.Product.objects", "line_number": 56, "usage_type": "attribute"}, {"api_name": "product.models.Product", "line_number": 56, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "product.models.Testimonial.objects.filter", "line_number": 67, "usage_type": "call"}, {"api_name": "product.models.Testimonial.objects", "line_number": 67, "usage_type": "attribute"}, {"api_name": "product.models.Testimonial", "line_number": 67, "usage_type": "name"}, {"api_name": "product.forms", "line_number": 75, "usage_type": "name"}, {"api_name": "product.forms", "line_number": 78, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 78, "usage_type": "call"}, {"api_name": "product.models.Product", "line_number": 78, "usage_type": "argument"}, {"api_name": "product.forms.likes.all", "line_number": 80, "usage_type": "call"}, {"api_name": "product.forms.likes", "line_number": 80, "usage_type": "attribute"}, {"api_name": "product.forms", "line_number": 80, "usage_type": "name"}, {"api_name": "product.forms.likes.add", "line_number": 81, "usage_type": "call"}, {"api_name": "product.forms.likes", "line_number": 81, "usage_type": "attribute"}, {"api_name": "product.forms", "line_number": 81, "usage_type": "name"}, {"api_name": "product.forms.save", "line_number": 82, "usage_type": "call"}, {"api_name": "product.forms", "line_number": 82, "usage_type": "name"}, {"api_name": "product.forms.likes.remove", "line_number": 86, "usage_type": "call"}, {"api_name": "product.forms.likes", "line_number": 86, "usage_type": "attribute"}, {"api_name": "product.forms", "line_number": 86, "usage_type": "name"}, {"api_name": "product.forms.save", "line_number": 87, "usage_type": "call"}, {"api_name": "product.forms", "line_number": 87, "usage_type": "name"}, {"api_name": "django.http.JsonResponse", "line_number": 91, "usage_type": "call"}, {"api_name": "product.forms.likes.count", "line_number": 91, "usage_type": "call"}, {"api_name": "product.forms.likes", "line_number": 91, "usage_type": "attribute"}, {"api_name": "product.forms", "line_number": 91, "usage_type": "name"}, {"api_name": "django.http.Http404", "line_number": 93, "usage_type": "name"}]} +{"seq_id": "209780731", "text": "from typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import Distribution\n\nfrom gluonts.core.component import validated\nfrom gluonts.torch.distributions.distribution_output import DistributionOutput\nfrom pts.model import weighted_average\nfrom pts.modules import MeanScaler, NOPScaler, FeatureEmbedder\n\n\ndef prod(xs):\n p = 1\n for x in xs:\n p *= x\n return p\n\n\nclass DeepARNetwork(nn.Module):\n @validated()\n def __init__(\n self,\n input_size: int,\n num_layers: int,\n num_cells: int,\n cell_type: str,\n history_length: int,\n context_length: int,\n prediction_length: int,\n distr_output: DistributionOutput,\n dropout_rate: float,\n cardinality: List[int],\n embedding_dimension: List[int],\n lags_seq: List[int],\n scaling: bool = True,\n dtype: np.dtype = np.float32,\n ) -> None:\n super().__init__()\n self.num_layers = num_layers\n self.num_cells = num_cells\n self.cell_type = cell_type\n self.history_length = history_length\n self.context_length = context_length\n self.prediction_length = prediction_length\n self.dropout_rate = dropout_rate\n self.cardinality = cardinality\n self.embedding_dimension = embedding_dimension\n self.num_cat = len(cardinality)\n self.scaling = scaling\n self.dtype = dtype\n\n self.lags_seq = lags_seq\n\n self.distr_output = distr_output\n rnn = {\"LSTM\": nn.LSTM, \"GRU\": nn.GRU}[self.cell_type]\n self.rnn = rnn(\n input_size=input_size,\n hidden_size=num_cells,\n num_layers=num_layers,\n dropout=dropout_rate,\n batch_first=True,\n )\n\n self.target_shape = distr_output.event_shape\n\n self.proj_distr_args = distr_output.get_args_proj(num_cells)\n\n self.embedder = FeatureEmbedder(\n cardinalities=cardinality, embedding_dims=embedding_dimension\n )\n\n if scaling:\n self.scaler = MeanScaler(keepdim=True)\n else:\n self.scaler = NOPScaler(keepdim=True)\n\n @staticmethod\n def get_lagged_subsequences(\n sequence: torch.Tensor,\n sequence_length: int,\n indices: List[int],\n subsequences_length: int = 1,\n ) -> torch.Tensor:\n \"\"\"\n Returns lagged subsequences of a given sequence.\n Parameters\n ----------\n sequence : Tensor\n the sequence from which lagged subsequences should be extracted.\n Shape: (N, T, C).\n sequence_length : int\n length of sequence in the T (time) dimension (axis = 1).\n indices : List[int]\n list of lag indices to be used.\n subsequences_length : int\n length of the subsequences to be extracted.\n Returns\n --------\n lagged : Tensor\n a tensor of shape (N, S, C, I), where S = subsequences_length and\n I = len(indices), containing lagged subsequences. Specifically,\n lagged[i, j, :, k] = sequence[i, -indices[k]-S+j, :].\n \"\"\"\n assert max(indices) + subsequences_length <= sequence_length, (\n f\"lags cannot go further than history length, found lag {max(indices)} \"\n f\"while history length is only {sequence_length}\"\n )\n assert all(lag_index >= 0 for lag_index in indices)\n\n lagged_values = []\n for lag_index in indices:\n begin_index = -lag_index - subsequences_length\n end_index = -lag_index if lag_index > 0 else None\n lagged_values.append(sequence[:, begin_index:end_index, ...])\n return torch.stack(lagged_values, dim=-1)\n\n def unroll_encoder(\n self,\n feat_static_cat: torch.Tensor, # (batch_size, num_features)\n feat_static_real: torch.Tensor, # (batch_size, num_features)\n past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)\n past_target: torch.Tensor, # (batch_size, history_length, *target_shape)\n past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)\n future_time_feat: Optional[\n torch.Tensor\n ] = None, # (batch_size, prediction_length, num_features)\n future_target: Optional[\n torch.Tensor\n ] = None, # (batch_size, prediction_length, *target_shape)\n ) -> Tuple[torch.Tensor, Union[torch.Tensor, List], torch.Tensor, torch.Tensor]:\n\n if future_time_feat is None or future_target is None:\n time_feat = past_time_feat[\n :, self.history_length - self.context_length :, ...\n ]\n sequence = past_target\n sequence_length = self.history_length\n subsequences_length = self.context_length\n else:\n time_feat = torch.cat(\n (\n past_time_feat[:, self.history_length - self.context_length :, ...],\n future_time_feat,\n ),\n dim=1,\n )\n sequence = torch.cat((past_target, future_target), dim=1)\n sequence_length = self.history_length + self.prediction_length\n subsequences_length = self.context_length + self.prediction_length\n\n lags = self.get_lagged_subsequences(\n sequence=sequence,\n sequence_length=sequence_length,\n indices=self.lags_seq,\n subsequences_length=subsequences_length,\n )\n\n # scale is computed on the context length last units of the past target\n # scale shape is (batch_size, 1, *target_shape)\n _, scale = self.scaler(\n past_target[:, -self.context_length :, ...],\n past_observed_values[:, -self.context_length :, ...],\n )\n\n # (batch_size, num_features)\n embedded_cat = self.embedder(feat_static_cat)\n\n # in addition to embedding features, use the log scale as it can help\n # prediction too\n # (batch_size, num_features + prod(target_shape))\n static_feat = torch.cat(\n (\n embedded_cat,\n feat_static_real,\n scale.log() if len(self.target_shape) == 0 else scale.squeeze(1).log(),\n ),\n dim=1,\n )\n\n # (batch_size, subsequences_length, num_features + 1)\n repeated_static_feat = static_feat.unsqueeze(1).expand(\n -1, subsequences_length, -1\n )\n\n # (batch_size, sub_seq_len, *target_shape, num_lags)\n lags_scaled = lags / scale.unsqueeze(-1)\n\n # from (batch_size, sub_seq_len, *target_shape, num_lags)\n # to (batch_size, sub_seq_len, prod(target_shape) * num_lags)\n input_lags = lags_scaled.reshape(\n (-1, subsequences_length, len(self.lags_seq) * prod(self.target_shape))\n )\n\n # (batch_size, sub_seq_len, input_dim)\n inputs = torch.cat((input_lags, time_feat, repeated_static_feat), dim=-1)\n\n # unroll encoder\n outputs, state = self.rnn(inputs)\n\n # outputs: (batch_size, seq_len, num_cells)\n # state: list of (num_layers, batch_size, num_cells) tensors\n # scale: (batch_size, 1, *target_shape)\n # static_feat: (batch_size, num_features + prod(target_shape))\n return outputs, state, scale, static_feat\n\n\nclass DeepARTrainingNetwork(DeepARNetwork):\n def distribution(\n self,\n feat_static_cat: torch.Tensor,\n feat_static_real: torch.Tensor,\n past_time_feat: torch.Tensor,\n past_target: torch.Tensor,\n past_observed_values: torch.Tensor,\n future_time_feat: torch.Tensor,\n future_target: torch.Tensor,\n future_observed_values: torch.Tensor,\n ) -> Distribution:\n rnn_outputs, _, scale, _ = self.unroll_encoder(\n feat_static_cat=feat_static_cat,\n feat_static_real=feat_static_real,\n past_time_feat=past_time_feat,\n past_target=past_target,\n past_observed_values=past_observed_values,\n future_time_feat=future_time_feat,\n future_target=future_target,\n )\n\n distr_args = self.proj_distr_args(rnn_outputs)\n\n return self.distr_output.distribution(distr_args, scale=scale)\n\n def forward(\n self,\n feat_static_cat: torch.Tensor,\n feat_static_real: torch.Tensor,\n past_time_feat: torch.Tensor,\n past_target: torch.Tensor,\n past_observed_values: torch.Tensor,\n future_time_feat: torch.Tensor,\n future_target: torch.Tensor,\n future_observed_values: torch.Tensor,\n ) -> torch.Tensor:\n distr = self.distribution(\n feat_static_cat=feat_static_cat,\n feat_static_real=feat_static_real,\n past_time_feat=past_time_feat,\n past_target=past_target,\n past_observed_values=past_observed_values,\n future_time_feat=future_time_feat,\n future_target=future_target,\n future_observed_values=future_observed_values,\n )\n\n # put together target sequence\n # (batch_size, seq_len, *target_shape)\n target = torch.cat(\n (\n past_target[:, self.history_length - self.context_length :, ...],\n future_target,\n ),\n dim=1,\n )\n\n # (batch_size, seq_len)\n loss = -distr.log_prob(target)\n\n # (batch_size, seq_len, *target_shape)\n observed_values = torch.cat(\n (\n past_observed_values[\n :, self.history_length - self.context_length :, ...\n ],\n future_observed_values,\n ),\n dim=1,\n )\n\n # mask the loss at one time step iff one or more observations is missing in the target dimensions\n # (batch_size, seq_len)\n loss_weights = (\n observed_values\n if (len(self.target_shape) == 0)\n else observed_values.min(dim=-1, keepdim=False)\n )\n\n weighted_loss = weighted_average(loss, weights=loss_weights)\n\n return weighted_loss, loss\n\n\nclass DeepARPredictionNetwork(DeepARNetwork):\n def __init__(self, num_parallel_samples: int = 100, **kwargs) -> None:\n super().__init__(**kwargs)\n self.num_parallel_samples = num_parallel_samples\n\n # for decoding the lags are shifted by one, at the first time-step\n # of the decoder a lag of one corresponds to the last target value\n self.shifted_lags = [l - 1 for l in self.lags_seq]\n\n def sampling_decoder(\n self,\n static_feat: torch.Tensor,\n past_target: torch.Tensor,\n time_feat: torch.Tensor,\n scale: torch.Tensor,\n begin_states: Union[torch.Tensor, List[torch.Tensor]],\n ) -> torch.Tensor:\n \"\"\"\n Computes sample paths by unrolling the RNN starting with a initial\n input and state.\n\n Parameters\n ----------\n static_feat : Tensor\n static features. Shape: (batch_size, num_static_features).\n past_target : Tensor\n target history. Shape: (batch_size, history_length).\n time_feat : Tensor\n time features. Shape: (batch_size, prediction_length, num_time_features).\n scale : Tensor\n tensor containing the scale of each element in the batch. Shape: (batch_size, 1, 1).\n begin_states : List or Tensor\n list of initial states for the LSTM layers or tensor for GRU.\n the shape of each tensor of the list should be (num_layers, batch_size, num_cells)\n Returns\n --------\n Tensor\n A tensor containing sampled paths.\n Shape: (batch_size, num_sample_paths, prediction_length).\n \"\"\"\n\n # blows-up the dimension of each tensor to batch_size * self.num_parallel_samples for increasing parallelism\n repeated_past_target = past_target.repeat_interleave(\n repeats=self.num_parallel_samples, dim=0\n )\n repeated_time_feat = time_feat.repeat_interleave(\n repeats=self.num_parallel_samples, dim=0\n )\n repeated_static_feat = static_feat.repeat_interleave(\n repeats=self.num_parallel_samples, dim=0\n ).unsqueeze(1)\n repeated_scale = scale.repeat_interleave(\n repeats=self.num_parallel_samples, dim=0\n )\n if self.cell_type == \"LSTM\":\n repeated_states = [\n s.repeat_interleave(repeats=self.num_parallel_samples, dim=1)\n for s in begin_states\n ]\n else:\n repeated_states = begin_states.repeat_interleave(\n repeats=self.num_parallel_samples, dim=1\n )\n\n future_samples = []\n\n # for each future time-units we draw new samples for this time-unit and update the state\n for k in range(self.prediction_length):\n # (batch_size * num_samples, 1, *target_shape, num_lags)\n lags = self.get_lagged_subsequences(\n sequence=repeated_past_target,\n sequence_length=self.history_length + k,\n indices=self.shifted_lags,\n subsequences_length=1,\n )\n\n # (batch_size * num_samples, 1, *target_shape, num_lags)\n lags_scaled = lags / repeated_scale.unsqueeze(-1)\n\n # from (batch_size * num_samples, 1, *target_shape, num_lags)\n # to (batch_size * num_samples, 1, prod(target_shape) * num_lags)\n input_lags = lags_scaled.reshape(\n (-1, 1, prod(self.target_shape) * len(self.lags_seq))\n )\n\n # (batch_size * num_samples, 1, prod(target_shape) * num_lags + num_time_features + num_static_features)\n decoder_input = torch.cat(\n (input_lags, repeated_time_feat[:, k : k + 1, :], repeated_static_feat),\n dim=-1,\n )\n\n # output shape: (batch_size * num_samples, 1, num_cells)\n # state shape: (batch_size * num_samples, num_cells)\n rnn_outputs, repeated_states = self.rnn(decoder_input, repeated_states)\n\n distr_args = self.proj_distr_args(rnn_outputs)\n\n # compute likelihood of target given the predicted parameters\n distr = self.distr_output.distribution(distr_args, scale=repeated_scale)\n\n # (batch_size * num_samples, 1, *target_shape)\n new_samples = distr.sample()\n\n # (batch_size * num_samples, seq_len, *target_shape)\n repeated_past_target = torch.cat((repeated_past_target, new_samples), dim=1)\n future_samples.append(new_samples)\n\n # (batch_size * num_samples, prediction_length, *target_shape)\n samples = torch.cat(future_samples, dim=1)\n\n # (batch_size, num_samples, prediction_length, *target_shape)\n return samples.reshape(\n (\n (-1, self.num_parallel_samples)\n + (self.prediction_length,)\n + self.target_shape\n )\n )\n\n # noinspection PyMethodOverriding,PyPep8Naming\n def forward(\n self,\n feat_static_cat: torch.Tensor, # (batch_size, num_features)\n feat_static_real: torch.Tensor, # (batch_size, num_features)\n past_time_feat: torch.Tensor, # (batch_size, history_length, num_features)\n past_target: torch.Tensor, # (batch_size, history_length, *target_shape)\n past_observed_values: torch.Tensor, # (batch_size, history_length, *target_shape)\n future_time_feat: torch.Tensor, # (batch_size, prediction_length, num_features)\n ) -> torch.Tensor:\n \"\"\"\n Predicts samples, all tensors should have NTC layout.\n Parameters\n ----------\n feat_static_cat : (batch_size, num_features)\n feat_static_real : (batch_size, num_features)\n past_time_feat : (batch_size, history_length, num_features)\n past_target : (batch_size, history_length, *target_shape)\n past_observed_values : (batch_size, history_length, *target_shape)\n future_time_feat : (batch_size, prediction_length, num_features)\n\n Returns\n -------\n Tensor\n Predicted samples\n \"\"\"\n\n # unroll the decoder in \"prediction mode\", i.e. with past data only\n _, state, scale, static_feat = self.unroll_encoder(\n feat_static_cat=feat_static_cat,\n feat_static_real=feat_static_real,\n past_time_feat=past_time_feat,\n past_target=past_target,\n past_observed_values=past_observed_values,\n future_time_feat=None,\n future_target=None,\n )\n\n return self.sampling_decoder(\n past_target=past_target,\n time_feat=future_time_feat,\n static_feat=static_feat,\n scale=scale,\n begin_states=state,\n )\n", "sub_path": "pts/model/deepar/deepar_network.py", "file_name": "deepar_network.py", "file_ext": "py", "file_size_in_byte": 17023, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "gluonts.torch.distributions.distribution_output.DistributionOutput", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 35, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 36, "usage_type": "name"}, {"api_name": "numpy.dtype", "line_number": 38, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 38, "usage_type": "attribute"}, {"api_name": "torch.nn.LSTM", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 57, "usage_type": "name"}, {"api_name": "torch.nn.GRU", "line_number": 57, "usage_type": "attribute"}, {"api_name": "pts.modules.FeatureEmbedder", "line_number": 70, "usage_type": "call"}, {"api_name": "pts.modules.MeanScaler", "line_number": 75, "usage_type": "call"}, {"api_name": "pts.modules.NOPScaler", "line_number": 77, "usage_type": "call"}, {"api_name": "gluonts.core.component.validated", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 81, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 121, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 122, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 123, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 124, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 125, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 127, "usage_type": "attribute"}, {"api_name": "typing.Optional", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 130, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 197, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 132, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 132, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 132, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 212, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 213, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 214, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 215, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 217, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 218, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 219, "usage_type": "attribute"}, {"api_name": "torch.distributions.Distribution", "line_number": 220, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 237, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 238, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 239, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 240, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 241, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 242, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 243, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 244, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 259, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 271, "usage_type": "call"}, {"api_name": "pts.model.weighted_average", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 245, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 305, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 306, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 307, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 308, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 309, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 309, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 309, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 380, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 398, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 402, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 310, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 416, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 417, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 418, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 419, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 420, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 421, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 422, "usage_type": "attribute"}]} +{"seq_id": "180432634", "text": "from collections import defaultdict\n\nn, k = map(int,input().split(\" \"))\nlink = defaultdict(list)\n \nqueue = [n]\nnewqueue = set()\nfor i in range(k):\n for j in range(len(queue)):\n data = queue[j]\n dataList = []\n if link[data]:\n newqueue.update(set(link[data]))\n continue\n while data>0:\n tmp = data%10\n dataList = [tmp]+dataList\n data//=10\n for x in range(len(dataList)):\n for y in range(x+1,len(dataList)):\n if x == 0 and dataList[y] == 0:\n continue\n newData = dataList[:]\n newData[x],newData[y] = newData[y],newData[x]\n newData = int(\"\".join(map(str,newData)))\n link[data].append(newData)\n newqueue.add(newData)\n queue = list(newqueue)\n newqueue = set()\nif list(queue):\n print(max(list(queue)))\nelse:\n print(-1)", "sub_path": "BOJ/boj_01039. change/shlee.py", "file_name": "shlee.py", "file_ext": "py", "file_size_in_byte": 937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "collections.defaultdict", "line_number": 4, "usage_type": "call"}]} +{"seq_id": "385474789", "text": "from django.shortcuts import render\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, parser_classes\nfrom rest_framework.response import Response\nfrom rest_framework.parsers import FileUploadParser, MultiPartParser\nfrom rest_framework.viewsets import ModelViewSet\nfrom django.apps import apps\nfrom django.db import models, connection, migrations\n\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nimport os\nfrom rest_framework.parsers import JSONParser\n\nfrom .models import Total_files\nfrom .serializers import GeneralSerializer\n\nimport json\nimport glob\n\n\ndef create_model(name, fields=None, app_label='', module='', options=None, admin_opts=None):\n \"\"\"\n Create specified model\n \"\"\"\n class Meta:\n # Using type('Meta', ...) gives a dictproxy error during model creation\n pass\n\n if app_label:\n # app_label must be set using the Meta inner class\n setattr(Meta, 'app_label', app_label)\n\n # Update Meta with any options that were provided\n if options is not None:\n for key, value in options.iteritems():\n setattr(Meta, key, value)\n\n # Set up a dictionary to simulate declarations within a class\n attrs = {'__module__': module, 'Meta': Meta}\n\n # Add in any fields that were provided\n if fields:\n attrs.update(fields)\n\n # Create the class, which automatically triggers ModelBase processing\n model = type(name, (models.Model,), attrs)\n\n # Create an Admin class if admin options were provided\n if admin_opts is not None:\n class Admin(admin.ModelAdmin):\n pass\n for key, value in admin_opts:\n setattr(Admin, key, value)\n admin.site.register(model, Admin)\n\n return model\n\ndef one_time_setup():\n directory = settings.FILE_UPLOAD\n\n for filename in glob.glob(directory + \"/*.json\"):\n f = open(filename)\n data = json.load(f)\n fields = {}\n for key in data[0].keys():\n if key == \"id\":\n continue\n datatype = type(data[0][key])\n if datatype is int:\n fields[key] = models.IntegerField()\n else: \n fields[key] = models.CharField(max_length=255)\n\n model_name = os.path.basename(os.path.splitext(filename)[0])\n created_model = create_model(model_name, fields, 'webapp')\n\n# Create your views here.\nclass CreateListModelMixin(object):\n\n def get_serializer(self, *args, **kwargs):\n \"\"\" if an array is passed, set serializer to many \"\"\"\n if isinstance(kwargs.get('data', {}), list):\n kwargs['many'] = True\n return super(CreateListModelMixin, self).get_serializer(*args, **kwargs)\n\n\nclass GeneralViewSet(CreateListModelMixin, ModelViewSet):\n\n @property\n def model(self):\n return apps.get_model(app_label=str(self.kwargs['app_label']), model_name=str(self.kwargs['model_name']))\n\n def get_queryset(self):\n model = self.model\n return model.objects.all() \n\n def get_serializer_class(self):\n GeneralSerializer.Meta.model = self.model\n return GeneralSerializer\n\n\nclass GeneralViewSet2(ModelViewSet):\n @property\n def model(self):\n return apps.get_model(app_label=str(self.kwargs['app_label']), model_name=str(self.kwargs['model_name']))\n\n def get_queryset(self):\n pk = self.kwargs['pk']\n model = self.model\n return model.objects.filter(pk=pk)\n\n\n def get_serializer_class(self):\n GeneralSerializer.Meta.model = self.model\n return GeneralSerializer\n\n# from .tasks import model_upload_task\n@api_view(['POST'])\n@parser_classes((MultiPartParser,))\ndef model_upload(request, format=None):\n\n uploaded_file = request.FILES['file']\n\n data = JSONParser().parse(uploaded_file.open())\n # model_upload_task.delay(data, uploaded_file.name)\n\n fields = {}\n for key in data[0].keys():\n if key == \"id\":\n continue\n\n datatype = type(data[0][key])\n if datatype is int:\n fields[key] = models.IntegerField()\n else: \n fields[key] = models.CharField(max_length=255)\n\n model_name = os.path.splitext(uploaded_file.name)[0]\n created_model = create_model(model_name, fields, 'webapp')\n\n with connection.schema_editor() as schema_editor:\n schema_editor.create_model(created_model)\n\n for record in data:\n created_obj = created_model(**record)\n created_obj.save()\n\n # save file\n fs = FileSystemStorage()\n filename = fs.save( os.path.join(settings.FILE_UPLOAD, uploaded_file.name), uploaded_file)\n \n return Response({'Message': 'Successful'})", "sub_path": "webapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4693, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.db.models.Model", "line_number": 48, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 48, "usage_type": "name"}, {"api_name": "django.conf.settings.FILE_UPLOAD", "line_number": 61, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 61, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 63, "usage_type": "call"}, {"api_name": "json.load", "line_number": 65, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 76, "usage_type": "call"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 89, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 93, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 93, "usage_type": "name"}, {"api_name": "serializers.GeneralSerializer.Meta", "line_number": 100, "usage_type": "attribute"}, {"api_name": "serializers.GeneralSerializer", "line_number": 100, "usage_type": "name"}, {"api_name": "serializers.GeneralSerializer", "line_number": 101, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 104, "usage_type": "name"}, {"api_name": "django.apps.apps.get_model", "line_number": 107, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 107, "usage_type": "name"}, {"api_name": "serializers.GeneralSerializer.Meta", "line_number": 116, "usage_type": "attribute"}, {"api_name": "serializers.GeneralSerializer", "line_number": 116, "usage_type": "name"}, {"api_name": "serializers.GeneralSerializer", "line_number": 117, "usage_type": "name"}, {"api_name": "rest_framework.parsers.JSONParser", "line_number": 126, "usage_type": "call"}, {"api_name": "django.db.models.IntegerField", "line_number": 136, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 136, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 138, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 138, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 140, "usage_type": "call"}, {"api_name": "os.path", "line_number": 140, "usage_type": "attribute"}, {"api_name": "django.db.connection.schema_editor", "line_number": 143, "usage_type": "call"}, {"api_name": "django.db.connection", "line_number": 143, "usage_type": "name"}, {"api_name": "django.core.files.storage.FileSystemStorage", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.conf.settings.FILE_UPLOAD", "line_number": 152, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 152, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 154, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 120, "usage_type": "call"}, {"api_name": "rest_framework.decorators.parser_classes", "line_number": 121, "usage_type": "call"}, {"api_name": "rest_framework.parsers.MultiPartParser", "line_number": 121, "usage_type": "name"}]} +{"seq_id": "251857043", "text": "from rest_framework import serializers\nfrom .models import Purchase, Buyer, Stone\n\n\n\nclass GetSerializersList(serializers.ModelSerializer):\n gems = serializers.SerializerMethodField('gems_dict')\n\n\n def gems_dict(self, obj):\n buyer = Buyer.objects.all().order_by('-spent_money')[:5]\n gemss = dict()\n for i in obj.gems.all():\n for y in buyer:\n if obj.id != y.id:\n for gems in y.gems.all():\n if i == gems:\n gemss[i.id] = i.name\n\n return gemss\n\n # def gems_dict_test(self, obj):\n # gemss = dict()\n # for i in obj.gems.all():\n # gemss[i.id] = i.name\n # return gemss\n\n class Meta:\n model = Buyer\n fields = ('username', 'spent_money', 'gems',)\n\n\nclass SerializersList(serializers.ModelSerializer):\n class Meta:\n model = Purchase\n fields = ('customer', 'item', \"total\", 'quantity', 'date')\n\n\nclass SerializersCreate(serializers.ModelSerializer):\n class Meta:\n model = Purchase\n # list_serializer_class = SerializersCreateList\n fields = ('customer', 'item', \"total\", 'quantity', 'date')\n", "sub_path": "main/serializer.py", "file_name": "serializer.py", "file_ext": "py", "file_size_in_byte": 1200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 6, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 7, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 7, "usage_type": "name"}, {"api_name": "models.Buyer.objects.all", "line_number": 11, "usage_type": "call"}, {"api_name": "models.Buyer.objects", "line_number": 11, "usage_type": "attribute"}, {"api_name": "models.Buyer", "line_number": 11, "usage_type": "name"}, {"api_name": "models.Buyer", "line_number": 29, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 33, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Purchase", "line_number": 35, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 39, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 39, "usage_type": "name"}, {"api_name": "models.Purchase", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "562726806", "text": "import requests\nimport random\nimport time\nimport serial\n\n'''\nglobal variables\n'''\n\nENDPOINT = \"industrial.api.ubidots.com\" #replace this with the ubidots stem \nDEVICE_LABEL = \"sensornode\"\nVARIABLE_LABEL_1 = \"humidity\"\nVARIABLE_LABEL_2 = \"temperature\"\nTOKEN = \"insert Token here\"\nDELAY = 1 # Delay in seconds\nser = serial.Serial('COM4',9600)\n\n\ndef post_var(payload, url=ENDPOINT, device=DEVICE_LABEL, token=TOKEN):\n try:\n url = \"http://{}/api/v1.6/devices/{}\".format(url, device)\n headers = {\"X-Auth-Token\": token, \"Content-Type\": \"application/json\"}\n\n attempts = 0\n status_code = 400\n\n while status_code >= 400 and attempts < 5:\n print(\"[INFO] Sending data, attempt number: {}\".format(attempts))\n req = requests.post(url=url, headers=headers,\n json=payload)\n status_code = req.status_code\n attempts += 1\n time.sleep(1)\n\n print(\"[INFO] Results:\")\n print(req.text)\n except Exception as e:\n print(\"[ERROR] Error posting, details: {}\".format(e))\n\n\ndef main():\n # Simulates sensor values\n ser_bytes = ser.readline()\n decoded_bytes = ser_bytes[0:len(ser_bytes)-2].decode(\"utf-8\")\n print(decoded_bytes)\n data = decoded_bytes\n print(data)\n print(type(data))\n\n data_1 = data.split(\",\")\n \n sensor_value_1 = float( data_1[0])\n sensor_value_2 = float( data_1[1])\n print(sensor_value_2)\n print(sensor_value_1)\n\n payload = {VARIABLE_LABEL_1: sensor_value_1,\n VARIABLE_LABEL_2: sensor_value_2\n }\n\n # Sends data\n post_var(payload)\n\n\nif __name__ == \"__main__\":\n while True:\n main()\n time.sleep(DELAY)\n", "sub_path": "python3code.py", "file_name": "python3code.py", "file_ext": "py", "file_size_in_byte": 1722, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "serial.Serial", "line_number": 16, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "284763097", "text": "# -*- coding: utf-8 -*-\nimport logging\nfrom .f139_price import F139Price\nfrom ..config import f139_config, f139_logger\n\n__author__ = \"Feng_Hui\"\n__time__ = \"2018.02.08 08:30\"\n__remarks__ = \"富宝报价抓取-模拟登录获取报价\"\nlogger = logging.basicConfig()\n\n\nclass F139Fdp(F139Price):\n\n job_name = '富宝报价抓取——全国各地废电瓶价格行情'\n data_url = \"http://data.f139.com/list.do?vid=137\"\n title = '{}{}'.format(f139_config.prefix_of_title, \"全国各地废电瓶价格行情\")\n\n def run(self):\n f139_logger.logger.info('正在抓取: {}'.format(self.job_name))\n if not self.is_login():\n f139_logger.info('未登录,开始登录……')\n self.login()\n selector = self.get_selector(self.data_url)\n area = selector.xpath('//div[@id=\"#\"]/div/table/tr/td[position()=2]')\n price = selector.xpath('//div[@id=\"#\"]/div/table/tr/td[position()=4]')\n up_or_down = selector.xpath('//div[@id=\"#\"]/div/table/tr/td[position()=5]')\n # content2 = selector.xpath('//div[@id=\"#\"]/div/table/tr/td[position()>3]')\n # 第一列:地区\n first_column = [each_row.xpath('text()')[0].strip().replace('地区/来源', '地区') for each_row in area[0:18]]\n # 第二列:价格区间\n second_column = [each_row.xpath('text()')[0].strip().replace('价格', '价格区间') for each_row in price[0:18]]\n # 第三列:单位\n third_column = ['单位']\n third_column.extend(['元/吨' for _ in range(17)])\n # 第四列:涨跌\n fourth_column = []\n for each_row in up_or_down[0:18]:\n text_flat = each_row.xpath('text()')\n # print(text_flat)\n if text_flat and text_flat != ['\\r\\n\\t\\t\\t\\t\\t\\t\\t', '\\r\\n\\t\\t\\t\\t\\t']:\n fourth_column.append(text_flat[0].strip())\n else:\n # print(each_row.xpath('string(.)'))\n text_rise = each_row.xpath('font[@class=\"up\"]/text()')\n # print(text_rise)\n if text_rise:\n fourth_column.append('↑' + text_rise[0].strip())\n else:\n text_fall = each_row.xpath('font[@class=\"down\"]/text()')\n # print(text_fall)\n if text_fall:\n # print(text_fall)\n fourth_column.append('↓' + text_fall[0].strip())\n else:\n fourth_column.append('')\n # 整合表格\n table = zip(first_column, second_column, third_column, fourth_column)\n single_tr = []\n\n # 构造表格\n for each_row in table:\n # print(each_row, type(each_row))\n single_tr.append('' + ''.join(['' + str(each) + '' for each in each_row]) + '')\n table_content = '' + ''.join(single_tr) + '
    '\n print(table_content)\n return table_content\n\n\n# if __name__ == \"__main__\":\n# start_time = time.time()\n# f139_price = F139Price()\n# # f139_price.run()\n# print(f139_price.is_login2())\n# # import os\n# # print(os.pardir)\n# print('总共用时:', time.time() - start_time)\n", "sub_path": "f139/cralwers/jobs/f139_feidianping.py", "file_name": "f139_feidianping.py", "file_ext": "py", "file_size_in_byte": 3223, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.basicConfig", "line_number": 9, "usage_type": "call"}, {"api_name": "f139_price.F139Price", "line_number": 12, "usage_type": "name"}, {"api_name": "config.f139_config.prefix_of_title", "line_number": 16, "usage_type": "attribute"}, {"api_name": "config.f139_config", "line_number": 16, "usage_type": "name"}, {"api_name": "config.f139_logger.logger.info", "line_number": 19, "usage_type": "call"}, {"api_name": "config.f139_logger.logger", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.f139_logger", "line_number": 19, "usage_type": "name"}, {"api_name": "config.f139_logger.info", "line_number": 21, "usage_type": "call"}, {"api_name": "config.f139_logger", "line_number": 21, "usage_type": "name"}]} +{"seq_id": "126441919", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : dictionary1.py\n# @Author: Wendell\n# @Date : 11/1/18\n# @Desc :\nimport gi\n\ngi.require_version('Gtk', \"3.0\")\n\nfrom gi.repository import Gtk, Gdk, GObject\nfrom dict_meaning import get_meaning\n\n\nclass dictionary(GObject.GObject):\n\t__gsignals__ = {'my_hide_signal': (GObject.SIGNAL_RUN_FIRST, None, ())}\n\n\twindow = Gtk.Window()\n\tsearch_entry = Gtk.SearchEntry()\n\tmeaning_view = Gtk.TextView()\n\tclip = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)\n\twidth = 0\n\theight = 0\n\n\tdef __init__(self):\n\t\tGObject.GObject.__init__(self)\n\t\tself.clip.connect('owner-change', self.clipboard_change)\n\t\tbuilder = Gtk.Builder()\n\t\tbuilder.add_from_file('dictionary_ui.glade')\n\t\tbuilder.connect_signals(self)\n\t\tself.window = builder.get_object('dictionary')\n\t\tself.search_entry = builder.get_object('search_entry')\n\t\tself.search_entry.set_text('Input Word')\n\t\tself.meaning_view = builder.get_object('meaning_view')\n\t\tself.meaning_view.get_buffer().set_text('hello world!')\n\t\t# buffer.set_text('hhhhhhhhh',-1)\n\t\t# self.meaning_view.set_text('Hello World')\n\t\tself.window.connect(\"destroy\", Gtk.main_quit)\n\t\tself.window.set_opacity(0.9)\n\t\taccel = Gtk.AccelGroup()\n\t\tkey, mods = Gtk.accelerator_parse('Escape')\n\t\taccel.connect(\n\t\t\tkey, mods, Gtk.AccelFlags.VISIBLE,\n\t\t\tlambda accel_group, acceleratable, keyval, modifier: self.emit('my_hide_signal')\n\t\t)\n\t\tself.window.add_accel_group(accel)\n\t\tself.search_entry.connect(\"activate\", self.search)\n\t\tself.show()\n\n\n\tdef search(self, *args):\n\t\tsearch_word = self.search_entry.get_text()\n\t\tmeaning = get_meaning(search_word)\n\t\tif meaning is not None:\n\t\t\ttext = ''\n\t\t\tfor i in meaning:\n\t\t\t\ttext += i\n\t\t\t\ttext += '\\n'\n\t\t\t# self.meaning_view.set_text(text)\n\t\t\tself.meaning_view.get_buffer().set_text(text)\n\t\tself.search_entry.set_text(search_word)\n\n\tdef clipboard_change(self, *args):\n\t\ttext = self.clip.wait_for_text()\n\t\ttext.strip()\n\t\tself.search_word = text\n\t\tself.search_entry.set_text(text)\n\t\tself.search()\n\t\tself.show()\n\n\tdef do_my_hide_signal(self):\n\t\tself.window.hide()\n\n\tdef show(self):\n\t\tself.window.set_keep_above(True)\n\t\tself.window.move(self.window.get_screen().get_width() - self.width,\n\t\t\t\t\t\t self.window.get_screen().get_height() - self.height)\n\t\tself.window.show_all()\n\n\nif __name__ == '__main__':\n\tm_dictionary = dictionary()\n\tGtk.main()\n", "sub_path": "gtk/dictionary.py", "file_name": "dictionary.py", "file_ext": "py", "file_size_in_byte": 2322, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "gi.require_version", "line_number": 9, "usage_type": "call"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 15, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 15, "usage_type": "name"}, {"api_name": "gi.repository.GObject.SIGNAL_RUN_FIRST", "line_number": 16, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 16, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Window", "line_number": 18, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 18, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.SearchEntry", "line_number": 19, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 19, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.TextView", "line_number": 20, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 20, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Clipboard.get", "line_number": 21, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Clipboard", "line_number": 21, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 21, "usage_type": "name"}, {"api_name": "gi.repository.Gdk.SELECTION_CLIPBOARD", "line_number": 21, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 21, "usage_type": "name"}, {"api_name": "gi.repository.GObject.GObject.__init__", "line_number": 26, "usage_type": "call"}, {"api_name": "gi.repository.GObject.GObject", "line_number": 26, "usage_type": "attribute"}, {"api_name": "gi.repository.GObject", "line_number": 26, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Builder", "line_number": 28, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 28, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.main_quit", "line_number": 38, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 38, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AccelGroup", "line_number": 40, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 40, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.accelerator_parse", "line_number": 41, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 41, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.AccelFlags", "line_number": 43, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 43, "usage_type": "name"}, {"api_name": "dict_meaning.get_meaning", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.main", "line_number": 83, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 83, "usage_type": "name"}]} +{"seq_id": "396746524", "text": "import linecache\nimport os\nimport pickle\nimport xlwt\n\npicklefile = open('/home/songjz671/covariancesurfaceout/surface.pickle','rb')\nsurface = pickle.load(picklefile)\n#print(surface)\n#print(surface['1ACB_E'])\nmisurface = {}\nfilelist=[]\nfor root,dirs,files in os.walk('/home/songjz671/covarianceout/mi'):\n for file in files:\n filelist.append(os.path.join(root,file))\nfor each in filelist:\n #w=xlwt.Workbook()\n #ws = w.add_sheet('covariance') \n filename=each.split('/')[-1]\n proteinid = filename.split('.')[0]\n a=linecache.getlines(each)\n pointresult={}\n surfaceresult={}\n for i in range(1,len(a)):\n part1 = a[i].split()[0]\n part2 = a[i].split()[1]\n if part1 not in pointresult.keys():\n pointresult[part1] = 0\n if part2 not in pointresult.keys():\n pointresult[part2] = 0 \n num = len(pointresult.keys()) - 1\n for each_item in pointresult.keys():\n for each_one in range(1,len(a)):\n partone = a[each_one].split()[0]\n parttwo = a[each_one].split()[1]\n if each_item == partone or each_item == parttwo:\n pointresult[each_item] = pointresult[each_item] + float(a[each_one].split()[2])\n pointresult[each_item] = pointresult[each_item] / num\n #if each_item not in surface[proteinid]:\n #del pointresult[each_item]\n try:\n for eachsurface in surface[proteinid]:\n try:\n surfaceresult[eachsurface] = pointresult[eachsurface]\n except:\n print('no this site:'+proteinid+' '+eachsurface)\n except:\n print('no this file:'+proteinid)\n misurface[proteinid] = surfaceresult\n '''\n result=sorted(surfaceresult.items(), key=lambda d:d[1], reverse = True)\n row = 0\n for key,value in result:\n ws.write(row,0,key)\n ws.write(row,1,value)\n row = row + 1\n w.save('/home/songjz671/covariancesurfaceout/mi/'+proteinid+'.xls') \n #print(pointresult)\n '''\noutfile=open('/home/songjz671/covariancesurfaceout/misurface.pickle','wb')\npickle.dump(misurface,outfile)\n", "sub_path": "OMPContact/covariance/misurfaceparser.py", "file_name": "misurfaceparser.py", "file_ext": "py", "file_size_in_byte": 2118, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pickle.load", "line_number": 7, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "linecache.getlines", "line_number": 20, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "429571824", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 20 11:54:16 2020\n\n@author: Freedom\n\"\"\"\n\nimport torch\nimport pandas as pd\nimport numpy as np\nimport torch.nn as nn\n\n\nclass LSTMNet(nn.Module):\n def __init__(self, input_dim, hidden_dim, output_dim, n_layers, drop_prob=0.2):\n super(LSTMNet, self).__init__()\n self.hidden_dim = hidden_dim\n self.n_layers = n_layers\n\n self.lstm = nn.LSTM(input_dim, hidden_dim, n_layers, batch_first=True, dropout=drop_prob)\n self.fc = nn.Linear(hidden_dim, output_dim)\n self.relu = nn.ReLU()\n\n def forward(self, x, h):\n out, h = self.lstm(x, h)\n out = self.fc(self.relu(out[:, -1]))\n return out, h\n\n def init_hidden(self, batch_size):\n weight = next(self.parameters()).data\n hidden = (weight.new(self.n_layers, 1, self.hidden_dim).zero_(),\n weight.new(self.n_layers, 1, self.hidden_dim).zero_())\n return hidden\n\nis_cuda = torch.cuda.is_available()\n\n# If we have a GPU available, we'll set our device to GPU. We'll use this device variable later in our code.\nif is_cuda:\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n \nPATH = \"modelLSTM/entire_model2.pt\"\nmodel = torch.load(PATH)\nmodel.eval()\nt = np.load('test_X.npy' )\n\ndef pred(model, test_x):\n model.eval()\n inp = torch.from_numpy(np.expand_dims(test_x, axis=0))\n h = model.init_hidden(inp.shape[0])\n out, h = model(inp.to(device).float(), h)\n return np.squeeze(out.detach().numpy())\n\nprediction = pred(model, t)\n", "sub_path": "DataSet and Data_prepariton/__copy_of_mreza__/probica_izbrisi me kada zavrsis.py", "file_name": "probica_izbrisi me kada zavrsis.py", "file_ext": "py", "file_size_in_byte": 1547, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "16918111", "text": "import os, csv, numpy, openpyxl;\r\ndef GC(foldernamebyte, MolFractionN2, recordfilenameByte):\r\n \r\n ResponseFactors= [0.845, 1.031, 1.358] #response factors for Co2 O2 and Ch4\r\n \r\n foldername=foldernamebyte.decode('ASCII')\r\n recordfilename=recordfilenameByte.decode('ASCII')\r\n \r\n #this line of code changes the directory, or folder that the code is working in, you need to change the directory to the folder where the GC file is\r\n directory='C:/CDSProjects/Methane Oxidation/Results/'\r\n end='.rslt'\r\n fulldirectoryname=directory+foldername+end\r\n \r\n endoffile='_2_Short_Area_1'\r\n filename=foldername+endoffile\r\n \r\n os.chdir(fulldirectoryname)\r\n \r\n #this line of code lists all the files in the directory\r\n CWDfiles=os.listdir(\"./\")#returns list of files in CWD\r\n #this line of code prints to the screen the names of all the files in the directory, this line should be commented so it is not being used \r\n #print (\"All items in the CWD incldue \"+str(CWDfiles))#this is how you combine text with variable strings in Python\r\n \r\n #this for loop searches for the file name you are looking for in the current directory. then it reads all the entries in that full into the variable full_file\r\n for entry in CWDfiles:\r\n root, ext = os.path.splitext(entry)\r\n #print(root) #shows just the root (name) of each file in CWD\r\n #print(ext) #shows the extension, the part after the . of each file for example .csv or .xlsx or .pdf\r\n if ext == \".CSV\" and filename in entry:#here sample is the target file name, it needs to be changed for new files. It only reports CSV files which have the right name\r\n #print(\"file is found\")\r\n with open(entry, 'r') as inputfile:\r\n reader = csv.reader(inputfile)\r\n full_file = list(reader)\r\n #else:\r\n #print(\"no file of that name found\")\r\n \r\n #We need to create empty vectors that we will later fill with the useful infomration from the GC file \r\n RT=[] #residence time\r\n W =[] #width\r\n A =[] #area\r\n H =[] #height\r\n Apercent = [] #area percent\r\n N =[] #name\r\n \r\n #we start at 10 (which is actually 11 as python starts at 0 not 1), as the first 10 rows only have text\r\n #We end at -2 because the last two rows are empty\r\n for i in range (10, len(full_file)-2):\r\n RT.append(full_file[i][0])\r\n W.append(full_file[i][2])\r\n A.append(full_file[i][3])\r\n H.append(full_file[i][4])\r\n Apercent.append(full_file[i][5])\r\n N.append(full_file[i][6])\r\n inputfile.close()\r\n \r\n ResTime=numpy.array(RT,dtype=float)\r\n Width =numpy.array(W,dtype=float)\r\n Area =numpy.array(A,dtype=float)\r\n Heigth =numpy.array(H,dtype=float)\r\n Areapercent=numpy.array(Apercent,dtype=float)\r\n \r\n #This code is to identify the correct species\r\n MolFracCO2=0.0\r\n MolFracO2=0.0\r\n MolFracCH4=0.0\r\n \r\n #you need 2 loops because you must do nitrogen first\r\n for i in range(0,len(N)):\r\n if N[i]=='N2':\r\n indexN2=i\r\n AreaN2=Area[indexN2]\r\n \r\n \r\n #now you can do other species\r\n for i in range(0,len(N)): \r\n if N[i]=='CO2':\r\n indexCO2=i\r\n AreaCO2=Area[indexCO2]\r\n AreaRatioCO2=AreaCO2/AreaN2\r\n MolFracCO2=AreaRatioCO2*MolFractionN2*ResponseFactors[0]\r\n \r\n \r\n if N[i]=='O2':\r\n indexO2=i\r\n AreaO2=Area[indexO2]\r\n AreaRatioO2=AreaO2/AreaN2\r\n MolFracO2=AreaRatioO2*MolFractionN2*ResponseFactors[1]\r\n \r\n \r\n if N[i]=='CH4':\r\n indexCH4=i\r\n AreaCH4=Area[indexCH4]\r\n AreaRatioCH4=AreaCH4/AreaN2\r\n MolFracCH4=AreaRatioCH4*MolFractionN2*ResponseFactors[2]\r\n \r\n \r\n #This code puts the correct area to the correct species\r\n \r\n \r\n \r\n #append results to a different file\r\n \r\n #change directory to get to different folder\r\n os.chdir('C:/Users/Solomon/Documents/Experimental conditions')\r\n \r\n #loading files\r\n wbwrite=openpyxl.load_workbook(recordfilename)\r\n #print (wb.sheetnames)\r\n wswrite=wbwrite.active #opens the first sheet in the wb\r\n \r\n #Mainipulating Files\r\n row_count = wswrite.max_row\r\n wswrite['H'+str(row_count-1)]=MolFracCO2 #using the excel numbering system \r\n wswrite['I'+str(row_count-1)]=MolFracO2 #using the excel numbering system \r\n wswrite['J'+str(row_count-1)]=MolFracCH4 #using the excel numbering system \r\n \r\n #Saving File\r\n wbwrite.save(recordfilename)# overwrites without warning. So be careful\r\n \r\n return MolFracCO2, MolFracO2, MolFracCH4\r\n ", "sub_path": "Methane/Methane_GC_forCSV.py", "file_name": "Methane_GC_forCSV.py", "file_ext": "py", "file_size_in_byte": 4782, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.chdir", "line_number": 17, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 20, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "csv.reader", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 104, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "28393364", "text": "# Global importsA\nimport colander\nfrom translationstring import TranslationStringFactory\nimport pytz\n_ = TranslationStringFactory('deform')\n\n# Local import\nfrom facile.forms.Deform import Form\nutc = pytz.UTC\n\n\nclass DocumentForm(Form):\n\n def __init__(self, request, static_path, **kwargs):\n\n self.mapping_name = {'index': None, 'document': None}\n\n Form.__init__(self, request, static_path, self.get_schema(kwargs['nodes']), buttons=('Editer le document',))\n\n @staticmethod\n def get_schema(l_nodes):\n schema_ = colander.Schema()\n for node in l_nodes:\n schema_.add(node)\n\n return schema_\n", "sub_path": "facile/forms/document.py", "file_name": "document.py", "file_ext": "py", "file_size_in_byte": 641, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "translationstring.TranslationStringFactory", "line_number": 5, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 9, "usage_type": "attribute"}, {"api_name": "facile.forms.Deform.Form", "line_number": 12, "usage_type": "name"}, {"api_name": "facile.forms.Deform.Form.__init__", "line_number": 18, "usage_type": "call"}, {"api_name": "facile.forms.Deform.Form", "line_number": 18, "usage_type": "name"}, {"api_name": "colander.Schema", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "609942019", "text": "import argparse, pickle\nfrom spt3g import core, todfilter, calibration, coordinateutils, mapmaker, todfilter, frbutils\nimport random, time\nimport numpy as np\n#fixed_rates, n_iterations, confidence_percent\n\n#first one was 0.45\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--exposure_ldf_pkl\", required=True)\nparser.add_argument(\"--measure_ldf_pkl\", required=True)\nparser.add_argument(\"--output_file\", required=True)\n\nparser.add_argument(\"--confidence_percent\", required=True, type=float)\nparser.add_argument(\"--n_iterations\", required=True, type=int)\n\nparser.add_argument(\"--max_fixed_rates\", required=True, type=float)\nparser.add_argument(\"--fixed_step\", required=True, type=float)\nparser.add_argument(\"--seed\", required=True, type=int)\n\nparser.add_argument(\"--use_low_sig_ldf_func\", type=int, default = 0)\nparser.add_argument(\"--low_sig_ldf_roll_off\", type=float, default = 0.55)\n\n\nargs = parser.parse_args()\n\nrandom.seed((args.seed * int(time.time())) % 4294967295 )\nnp.random.seed((args.seed * int(time.time())) % 4294967295 )\n\nargs = parser.parse_args()\n\n#unlabeled was 0.45\ndef fitting_func_low_sig(x, ps, exp_val = args.low_sig_ldf_roll_off):\n tval = np.abs(ps[0]) * np.exp(-1 * abs(exp_val * x) )\n return tval\n\ndef fitting_func_poly(x, ps):\n tval = ps[0] + 0*x\n return tval\n\n\nfixed_rates = np.arange(0,args.max_fixed_rates, args.fixed_step)\n\nprint('EXPOSURE', args.exposure_ldf_pkl)\nprint('MEASURE', args.measure_ldf_pkl)\ndummy, ldf_to_get_exposure, __ = pickle.load(open(args.exposure_ldf_pkl))\ncounts, ldf, __ = pickle.load(open(args.measure_ldf_pkl))\n\n\nrates, ordering, bg_rates, counts = frbutils.frbbackgrounds.get_fc_rates_and_orders(\n ldf, ldf_to_get_exposure, \n counts, fixed_rates, args.n_iterations, \n args.confidence_percent, degradation_factor = 100,\n fitting_func = fitting_func_poly, max_x = 8\n )\n\npickle.dump( (rates, ordering, bg_rates, counts), open(args.output_file, 'w'))\n", "sub_path": "scratch/nlharr/frbhunt/bgestimation/frbbgmonte.py", "file_name": "frbbgmonte.py", "file_ext": "py", "file_size_in_byte": 1957, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 8, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 26, "usage_type": "call"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 27, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 41, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 45, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 46, "usage_type": "call"}, {"api_name": "spt3g.frbutils.frbbackgrounds.get_fc_rates_and_orders", "line_number": 49, "usage_type": "call"}, {"api_name": "spt3g.frbutils.frbbackgrounds", "line_number": 49, "usage_type": "attribute"}, {"api_name": "spt3g.frbutils", "line_number": 49, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "608787332", "text": "# pylint: disable=C0325\nr\"\"\"\nScript for SQLite\n\"\"\"\nimport sqlite3\n\ndef create_table():\n \"\"\"run create_table\"\"\"\n conn = sqlite3.connect(\"files/lite.db\")\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS store(item TEXT, quantity INTEGER, price REAL)\")\n conn.commit()\n conn.close()\n\ndef insert(item, quantity, price):\n \"\"\"insert\"\"\"\n conn = sqlite3.connect(\"files/lite.db\")\n cur = conn.cursor()\n cur.execute(\"INSERT INTO store VALUES(?,?,?)\", (item, quantity, price))\n conn.commit()\n conn.close()\n\ndef view():\n \"\"\"view\"\"\"\n conn = sqlite3.connect(\"files/lite.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM store\")\n rows = cur.fetchall()\n conn.close()\n return rows\n\ndef delete(item):\n \"\"\"delete\"\"\"\n conn = sqlite3.connect(\"files/lite.db\")\n cur = conn.cursor()\n cur.execute(\"DELETE FROM store WHERE item = ?\", (item))\n conn.commit()\n conn.close()\n\ndef update(item, quantity, price):\n \"\"\"update\"\"\"\n conn = sqlite3.connect(\"files/lite.db\")\n cur = conn.cursor()\n cur.execute(\"UPDATE store SET quantity = ?, price = ? WHERE item = ?\", (quantity, price, item))\n conn.commit()\n conn.close()\n\n# update(\"Coffe Cup\", 11, 24)\n# delete(\"Water Glass\")\n# insert(\"Coffe Cup\", 10, 5)\nprint(view())\n\n", "sub_path": "section14/start1.py", "file_name": "start1.py", "file_ext": "py", "file_size_in_byte": 1290, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sqlite3.connect", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "380862587", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport uncertainties.unumpy as unp\nfrom uncertainties import ufloat\nfrom scipy.stats import stats\n\n#Nullmessung 240s N=216\nN_U_Fehler = 216**(1/2)/240*20\nN_U = 216/240*20\n\nt, N_mitU = np.genfromtxt('Vanadium.txt', unpack=True)\n\n#Subtraktion der Untergrundstrahlung + Fehlerrechnung\nN = N_mitU - N_U\nN_mitU_Fehler = N_mitU**(1/2)\nN_Fehler = (N_mitU_Fehler**2 + N_U_Fehler**2)**(1/2)\nlnN = np.log(N)\nlnN_Fehler = N_Fehler/N\n\n#linearer Fit\n\n#plt.yscale('log')\nL1plot = np.linspace(0, 1050)\ndef f1(t, l, b):\n return l*t+b\nparamsI, covarianceI = curve_fit(f1, t, lnN)\nerrorsI = np.sqrt(np.diag(covarianceI))\nb = ufloat(paramsI[1], errorsI[1])\nl = ufloat(paramsI[0], errorsI[0])\nprint('l= ', l)\nprint('b= ', b)\n#Umrechnungen und prints\nN_0 = unp.exp(b)\nN_U_mitFehler=ufloat(N_U, N_U_Fehler)\nprint('Untergundstrahlung: ', N_U_mitFehler)\nprint('N_0: ', N_0)\nprint('lambda: ', l)\nT = np.log(0.5)/l\nprint('T1/2=', T)\n#tabelle\n\nx1 = np.round_(N_mitU, decimals=3)\nx2 = np.round_(N_mitU_Fehler, decimals=3)\ny1 = np.round_(N, decimals=3)\ny2 = np.round_(N_Fehler, decimals=3)\nz1 = np.round_(lnN, decimals=3)\nz2 = np.round_(lnN_Fehler, decimals=3)\n\nnp.savetxt('Vanadiumtabelle.txt', np.column_stack([t, x1, x2, y1, y2, z1, z2]),\n delimiter=' & ', newline= r' \\\\'+'\\n', fmt='%.2f')\n\n#plot\n\nplt.errorbar(t, lnN, yerr=lnN_Fehler, fmt='.b', label = \"Messwerte mit Fehlerbalken\")\nplt.plot(L1plot, f1(L1plot, *paramsI) , 'g-', label = \"Lineare Regression\")\nplt.xlabel(r\"$ t / \\mathrm{s}$\")\nplt.ylabel(r\"$ \\mathrm{ln}(N_{\\mathrm{\\Delta t \\, Vanadium}}) $\")\nplt.tight_layout()\nplt.legend(loc=\"best\")\nplt.savefig('Vanadium.pdf')\n", "sub_path": "V702/Vanadium.py", "file_name": "Vanadium.py", "file_ext": "py", "file_size_in_byte": 1706, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.genfromtxt", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 28, "usage_type": "call"}, {"api_name": "uncertainties.ufloat", "line_number": 29, "usage_type": "call"}, {"api_name": "uncertainties.ufloat", "line_number": 30, "usage_type": "call"}, {"api_name": "uncertainties.unumpy.exp", "line_number": 34, "usage_type": "call"}, {"api_name": "uncertainties.unumpy", "line_number": 34, "usage_type": "name"}, {"api_name": "uncertainties.ufloat", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.round_", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 61, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "286678572", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nscikit-learnの機械学習でよく使われるものを関数化\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import train_test_split, StratifiedKFold\nfrom sklearn.metrics import accuracy_score, classification_report, confusion_matrix\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\nimport seaborn as sns\n\n\n# from malss import MALSS\n\n\ndef smart(lists: list):\n \"\"\"A1 A10 A2みたいなものをスマートに並び替える\"\"\"\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n lists.sort(key=alphanum_key)\n return lists\n\n\ndef sort_smart(lists: list):\n try:\n return smart(lists)\n except:\n return sorted(lists)\n\n\ndef plot_confusion_matrix(cm: np.ndarray, genre_list: list):\n plt.clf()\n plt.matshow(cm, fignum=False, cmap=\"Blues\", vmin=0, vmax=1.0)\n plt.xticks(list(range(len(genre_list))), genre_list, rotation=90, verticalalignment='bottom')\n plt.yticks(list(range(len(genre_list))), genre_list)\n plt.title(\"confusion_matrix\")\n plt.colorbar()\n plt.grid(False)\n plt.xlabel(\"Predicted class\")\n plt.ylabel(\"True class\")\n plt.grid(False)\n plt.show()\n\n\nclass Classification():\n \"\"\"使用関数のクラス化\"\"\"\n\n def __init__(self, train: np.ndarray, train_label: np.ndarray):\n \"\"\"\n :param train: 学習データ\n :param train_label: 正解データ\n \"\"\"\n self.train = train\n self.train_label = train_label\n\n def set_train(self, train: np.ndarray, train_label: np.ndarray):\n \"\"\"\n :param train: 学習データ\n :param train_label: 正解データ\n \"\"\"\n self.train = train\n self.train_label = train_label\n\n def set_test(self, test: np.ndarray, test_label: np.ndarray):\n \"\"\"\n テストデータを設定\n :param test: テストデータ\n :param test_label: 正解ラベル\n \"\"\"\n self.test = test\n self.test_label = test_label\n\n def set_split(self, train: np.ndarray, train_label: np.ndarray, test_size: float = 0.25):\n \"\"\"\n データを学習用とテスト用に分割\n :param train: 学習データ\n :param train_label: 正解ラベル\n :param test_size: テストデータの割合\n \"\"\"\n self.train, self.test, self.train_label, self.test_label = \\\n train_test_split(train, train_label, test_size=test_size, random_state=0)\n\n def set_classifier(self, clf):\n self.clf = clf\n\n def svm_gridsearch(self, n: int = 5):\n \"\"\"\n :param n: 交差検定の交差数\n \"\"\"\n tuned_parameters = [{'kernel': ['rbf'], 'gamma': np.logspace(-4, -2, 10),\n 'C': np.r_[np.logspace(0, 2, 10), np.logspace(2, 3, 10)]},\n {'kernel': ['linear'], 'gamma': np.logspace(-4, -2, 10),\n 'C': np.r_[np.logspace(0, 2, 10), np.logspace(2, 3, 10)]}]\n\n cv = StratifiedKFold(self.train_label, n_folds=n, shuffle=True)\n clf = GridSearchCV(SVC(probability=True, class_weight='balanced', decision_function_shape='ovr'),\n tuned_parameters, cv=cv, n_jobs=-1)\n\n print(\"grid search...\")\n clf.fit(self.train, self.train_label)\n print(clf.best_estimator_)\n self.clf = clf.best_estimator_\n self.bestclf = clf.best_estimator_\n print(\"set classifier\")\n\n def lr_gridsearch(self, n: int = 5):\n \"\"\"\n :param n: 交差検定の交差数\n \"\"\"\n parameters = {'penalty': [\"l1\", \"l2\"],\n 'C': np.r_[np.logspace(0, 2, 50), np.logspace(2, 3, 50)],\n 'class_weight': [None, \"auto\"]}\n\n cv = StratifiedKFold(self.train_label, n_folds=n, shuffle=True)\n clf = GridSearchCV(LogisticRegression(multi_class='multinomial', solver='lbfgs'), parameters, cv=cv, n_jobs=-1)\n\n print(\"grid search...\")\n clf.fit(self.train, self.train_label)\n print(clf.best_estimator_)\n self.clf = clf.best_estimator_\n self.bestclf = clf.best_estimator_\n print(\"set classifier\")\n\n def cv(self, k: int = 5):\n \"\"\"\n 交差検定を行う\n :param k: 交差数\n \"\"\"\n self.key = sort_smart(sorted(list(set(self.train_label))))\n self.conf_mat = np.zeros((len(self.key), len(self.key)))\n self.miss = []\n\n self.merge_true = np.array([])\n self.merge_pred = np.array([])\n\n cv = StratifiedKFold(self.train_label, n_folds=k, shuffle=True)\n\n for train_index, test_index in cv:\n cv_train = self.train[train_index]\n cv_trainlabel = self.train_label[train_index]\n cv_test = self.train[test_index]\n cv_testlabel = self.train_label[test_index]\n\n self.clf.fit(cv_train, cv_trainlabel)\n cv_pred = self.clf.predict(cv_test)\n\n for i in range(0, len(cv_testlabel)):\n if cv_testlabel[i] != cv_pred[i]:\n self.miss.append([test_index[i], cv_testlabel[i], cv_pred[i]])\n\n self.merge_true = np.hstack([self.merge_true, cv_testlabel])\n self.merge_pred = np.hstack([self.merge_pred, cv_pred])\n # print classification_report(cv_testlabel,cv_pred)\n cm = confusion_matrix(cv_testlabel, cv_pred, self.key)\n self.conf_mat = self.conf_mat + cm\n # scores = cross_validation.cross_val_score(self.clf,self.train,self.train_label,cv=cv)\n # print \"\\nAccuracy: %0.2f (+/- %0.2f)\" % (scores.mean(),scores.std() * 2)\n print('\\nfinal classification report\\n')\n print('accuracy score:', accuracy_score(self.merge_true, self.merge_pred), '\\n')\n print(classification_report(self.merge_true, self.merge_pred, labels=self.key))\n self.conf_mat = np.array([list(c / float(sum(c))) for c in self.conf_mat])\n plot_confusion_matrix(self.conf_mat, self.key)\n self.miss.sort(key=lambda x: x[0])\n\n def prediction(self):\n self.clf.fit(self.train, self.train_label)\n pred = self.clf.predict(test)\n print(classification_report(test_label, pred))\n plot_confusion_matrix(confusion_matrix(test_label, pred), key)\n\n\ndef report_classification(train: np.ndarray, train_label: np.ndarray, name: str = 'result_classification'):\n \"\"\"\n MALSSというツール\n http://qiita.com/canard0328/items/5da95ff4f2e1611f87e1\n :param name: ファイル名\n :param train_label: 正解ラベル\n :param train: 学習データ\n \"\"\"\n cls = MALSS('classification', standardize=False, n_jobs=-1, random_state=0, lang='jp')\n cls.fit(train, train_label, name)\n", "sub_path": "classification.py", "file_name": "classification.py", "file_ext": "py", "file_size_in_byte": 6899, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.ndarray", "line_number": 36, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.matshow", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 69, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 78, "usage_type": "attribute"}, {"api_name": "sklearn.cross_validation.train_test_split", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 96, "usage_type": "attribute"}, {"api_name": "numpy.logspace", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.logspace", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.logspace", "line_number": 98, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.grid_search.GridSearchCV", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 116, "usage_type": "attribute"}, {"api_name": "numpy.logspace", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 119, "usage_type": "call"}, {"api_name": "sklearn.grid_search.GridSearchCV", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "sklearn.cross_validation.StratifiedKFold", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 157, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 159, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 164, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 166, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 173, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 174, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 177, "usage_type": "attribute"}]} +{"seq_id": "476401128", "text": "from django.urls import path\n\nfrom . import views\n\napp_name = 'professionalskills'\n\nurlpatterns = [\n path('filetype/create/', views.create_filetype, name='filetypecreate'),\n path('filetype/edit//', views.edit_filetype, name='filetypeedit'),\n path('filetype/grading//', views.list_filetype_aspects, name='filetypeaspects'),\n path('filetype/grading/add//', views.add_filetype_aspect, name='addaspect'),\n path('filetype/grading/edit//', views.edit_filetype_aspect, name='editaspect'),\n path('filetype/grading/delete//', views.delete_filetype_aspect, name='deleteaspect'),\n path('filetype/delete//', views.delete_filetype, name='filetypedelete'),\n path('filetype/list/', views.list_filetypes, name='filetypelist'),\n path('studentfiles//', views.list_student_files, name='liststudentfiles'),\n path('files/', views.list_own_files, name='listownfiles'),\n path('files/type//all/', views.list_files_of_type, name='listfileoftype'),\n path('files/type//missing/', views.list_missing_of_type, name='listmissingoftype'),\n path('file//respond/', views.respond_file, name='respondfile'),\n path('file//', views.view_response, name='viewresponse'),\n\n path('mail/overdue/', views.mail_overdue_students, name='mailoverduestudents'),\n path('print/forms/', views.print_forms, name='printprvforms'),\n path('download/type//', views.download_all_of_type, name='downloadall'),\n\n path('group/create//', views.create_group, name='creategroup'),\n path('group/create/', views.create_group, name='creategroup'),\n path('group/edit//', views.edit_group, name='editgroup'),\n path('group/listall//', views.list_groups, name='listgroups'),\n path('group/assign//', views.assign, name='assignshuffle'),\n path('group/', views.list_own_groups, name='listowngroups'),\n # path('group/switch//)/', views.switch_group, name='switchgroups'),\n path('group/switch//', views.switch_group, name='switchgroups'),\n path('group/members//', views.list_group_members, name='listgroupmembers'),\n\n path('extensions/', views.edit_extensions, name='extensions'),\n]\n", "sub_path": "professionalskills/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 31, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 34, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 35, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "150830899", "text": "import folium as fl\nimport pandas as pd\n\n# Initialization - data import and initial map creation\nmy_map = fl.Map(location=[65.089959, 134.153830], zoom_start=4)\nvol_df = pd.read_csv('Data/volcanoes_rus_clear.csv')\n\nlat = list(vol_df['lat'])\nlon = list(vol_df['lon'])\nname = list(vol_df['name'])\nelev = list(vol_df['height'])\n\nhtml = \"\"\"%s
    \nHeight: %s m\n\"\"\"\ndef color_selector (el):\n \"\"\" Select color of the marker depending on height\"\"\"\n\n if el < 1000:\n return 'green'\n elif 1000 < el < 2500:\n return 'orange'\n else:\n return 'red'\n\n# Forming group of markers\nfgv = fl.FeatureGroup(name=\"Volcanoes\")\n\nfor n, lt, ln, el in zip(name, lat, lon, elev):\n\n iframe = fl.IFrame(html=html % (n, n, el), width=150, height=80)\n fgv.add_child(fl.CircleMarker(location=[lt, ln], popup=fl.Popup(iframe), color=color_selector(el), radius = 6, weight = 1, fill=True, fill_opacity = 0.8))\n\n# Adding group to the map and saving the map\nmy_map.add_child(fgv)\nmy_map.save('Results/Map2.html')\n", "sub_path": "app2_my_1.py", "file_name": "app2_my_1.py", "file_ext": "py", "file_size_in_byte": 1087, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "folium.Map", "line_number": 5, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "folium.FeatureGroup", "line_number": 27, "usage_type": "call"}, {"api_name": "folium.IFrame", "line_number": 31, "usage_type": "call"}, {"api_name": "folium.CircleMarker", "line_number": 32, "usage_type": "call"}, {"api_name": "folium.Popup", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "620901408", "text": "#!/usr/bin/env python\n\n\"\"\"\nAdapted from a Jupter Notebook to a .py script to be executed in the command line. \n\nInputs: a fastq file, Q-score filters each read, identifies and counts mutants in each sample. \nOutputs: a txt file with a list of amino acid mutants and one with nucleic acid mutants \n\n**USAGE**\n\tpython countingAlleles.py fastqFilePathName -sl sublibraryID -o outputPathName [-q] qualityScoreThreshold \n\n\n**Keyword Arguments**\n\t--p\t\\*.fasta File of DNA sequences from Next-Generation Sequencing Run\n\t--sl\tDHFR sublibrary ('SL1', 'SL2', 'SL3','SL4') \n\t--o\tPath for output file containing a list of amino acid mutants and one with nucleic acid mutants\n\t--q\tMinimum Quality Score threshold. Default:20 (1/100 chance of incorrect base call)\n\n**Example**::\n\n python countingAlleles.py path2fasta.fasta --sl sublibraryID --o path2output --q 30 \n\nBy Thuy N. Nguyen on 20191031\n\"\"\"\n\n#import modules \t\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nfrom Bio.Seq import Seq\nfrom Bio.Alphabet import generic_dna\nfrom itertools import islice\n\nparser = argparse.ArgumentParser()\nparser.add_argument('p', help = '\\*.fasta File of DNA sequences from Next-Generation Sequencing Run')\nparser.add_argument('--sl',help = 'DHFR sublibrary: SL1, SL2, SL3,SL4')\nparser.add_argument('--o',help = 'Path for output file containing a list of amino acid mutants and one with nucleic acid mutants')\nparser.add_argument('--q',default = '20',help = 'Minimum Quality Score threshold. Default:20 (1/100 chance of incorrect base call)')\nargs = parser.parse_args()\n\n\n#define core functions\ndef fileNames(line):\n spLine = line.split('\\t')\n fwdFileName = spLine[0]\n sl_id = spLine[1]\n outName = spLine[2].strip('\\n')\n return fwdFileName, sl_id, outName\n\ndef qscore_filter(qscores,qthreshold = 20):\n low_quality = 'False'\n for qscore in qscores: \n if (ord(qscore) -33) <= qthreshold: #ord() converts ASCII encoding to numerical qscore\n low_quality = 'True'\n return low_quality\n\ndef return_stats():\n print( 'read_total: ' + str(read_total)) \n print( 'read_fail: ' + str(read_fail))\n print( 'read_pass: '+ str(read_pass))\n print( 'read_unassigned: '+ str(read_unassigned))\n print( 'qscore_used was: '+ str(qthreshold))\n print( str(percent_pass)+'% reads pass qscore filter threshold - '+str(qthreshold))\n\ndef trim_read(read,coding_region_index):\n trim = read[coding_region_index[0]:coding_region_index[1]] \n return trim\n\ndef translate_seq(seq):\n seq = Seq(seq,generic_dna)\n seq_translate = str(seq.translate().strip())\n return seq_translate\n\n\ndef id_sublibrary(nuc_seq):\n sl_match = 'na'\n for sl in ['sl1','sl2','sl3','sl4']:\n barcode = nuc_seq[barcode_ix[sl][0]:barcode_ix[sl][1]]\n barcode_ref = barcode_region[sl]\n if barcode == barcode_ref:\n sl_match = sl\n return sl_match\n\ndef identifyMutant(nuc_seq,nuc_ref,seq_len):\n mutants = []\n nuc_list = []\n \n aa_seq = translate_seq(nuc_seq)\n aa_ref = translate_seq(nuc_ref)\n \n def fwdMut():\n for ix,aa in enumerate(aa_seq): \n codonIX = np.multiply(ix,3)\n if nuc_seq[codonIX:(codonIX+3)] != nuc_ref[codonIX:(codonIX+3)]:\n mutants.append(aa_ref[ix]+str(aa_pos[sl_id][ix])+aa)\n nuc_list.append(str(aa_pos[sl_id][ix])+nuc_seq[codonIX:(codonIX+3)])\n \n #compare reference and sequence read\n if aa_seq == aa_ref:\n nuc_list.append('WT') \n mutants.append('WT')\n else:\n fwdMut() \n return mutants, nuc_list\n\ndef record_mut_counts(mutant_dict):\n \n #for each fastq file, make a fresh mutant_counts dictionary\n #counting mutants from parsing the mutant dictionary \n\n mutant_counts = {'sl1':{},'sl2':{},'sl3':{},'sl4':{},'cl':{}}\n\n def count_mutant(mutant,sl):\n if mutant in mutant_counts[sl].keys():\n mutant_counts[sl][mutant] +=1\n else:\n mutant_counts[sl][mutant] = 1\n\n for sl in mutant_dict.keys(): #iterate through each sub-library\n for mut_list in mutant_dict[sl]:\n if len(mut_list) == 1:\n count_mutant(mut_list[0],sl)\n elif len(mut_list) >=2: \n count_mutant('fail_multimutant',sl)\n \n return mutant_counts\n\n\n\ndef record_nuc_counts(nuc_list_dict):\n \n #for each fastq file, make a fresh mutant_counts dictionary\n #counting mutants from parsing the mutant dictionary \n\n nuc_counts = {'sl1':{},'sl2':{},'sl3':{},'sl4':{},'cl':{}}\n\n def count_nuc(nuc,sl):\n if nuc in nuc_counts[sl].keys():\n nuc_counts[sl][nuc] +=1\n else:\n nuc_counts[sl][nuc] = 1\n\n\n for sl in nuc_dict.keys(): #iterate through each sub-library\n for nuc_list in nuc_dict[sl]:\n if len(nuc_list) == 1:\n count_nuc(nuc_list[0],sl)\n elif len(nuc_list) >=2: \n count_nuc('fail_multimutant',sl)\n \n return nuc_counts\n\n\n\ndef writeOutputFile(outName):\n\n #writes read statistics & mutant counts into one convenient .txt file \n \n output_file = open(outName+'.txt','w')\n #write out statistics\n output_file.write('read_total:\\t'+str(read_total)+'\\t'+str(sl_id)+'\\n')\n output_file.write('read_fail:\\t'+str(read_fail)+'\\n')\n output_file.write('read_pass:\\t'+str(read_pass)+'\\n')\n output_file.write('read_unassigned:\\t'+str(read_unassigned)+'\\n')\n output_file.write('qscore_used was:\\t'+str(qthreshold)+'\\n')\n output_file.write(str(percent_pass)+'\\t% reads pass qscore filter threshold -\\t'+str(qthreshold)+'\\n')\n\n #write out mutant counts \n for sl in mutant_counts.keys():\n for key in mutant_counts[sl].keys():\n output_file.write(sl+'\\t'+key+'\\t'+str(mutant_counts[sl][key])+'\\n')\n\n output_file.close()\n \ndef writeOutputFile_nuc(outName):\n\n #writes read statistics & mutant counts into one convenient .txt file \n \n output_file = open(outName+'nuc'+'.txt','w')\n #write out statistics\n output_file.write('read_total:\\t'+str(read_total)+'\\t'+str(sl_id)+'\\n')\n output_file.write('read_fail:\\t'+str(read_fail)+'\\n')\n output_file.write('read_pass:\\t'+str(read_pass)+'\\n')\n output_file.write('read_unassigned:\\t'+str(read_unassigned)+'\\n')\n output_file.write('qscore_used was:\\t'+str(qthreshold)+'\\n')\n output_file.write(str(percent_pass)+'\\t% reads pass qscore filter threshold -\\t'+str(qthreshold)+'\\n')\n\n #write out mutant counts \n for sl in nuc_counts.keys():\n for key in nuc_counts[sl].keys():\n output_file.write(sl+'\\t'+key+'\\t'+str(nuc_counts[sl][key])+'\\n')\n\n output_file.close()\n \n###### INITIALIZE ############\n# WITH WT AMPLICON SEQUENCE FROM 4N TO 4N AND INDICIES OF REGIONS IN READ YOU CARE ABOUT \n# sequence of the wt amplicon from 4N to 4N \n# see 190218_ampliconCodingIX and 190808_ampliconDesign\n\nwt_ref = {}\nwt_ref['sl1'] = 'NNNNACTTTAATAATGAGATATACCATGATCAGTCTGATTGCGGCGTTAGCGGTAGATCGCGTTATCGGCATGGAAAACGCCATGCCGTGGAACCTGCCTGCCGATCTCGCCTGGTTTAAACGCAACACCTTAAATAAACCCGTGATTATGGGCCGCCATACCTGGGAATCAATCNNNN'\nwt_ref['sl2'] = 'NNNNCACCTTAAATAAACCCGTGATTATGGGCCGCCATACCTGGGAATCAATCGGTCGTCCGTTGCCAGGACGCAAAAATATTATCCTCAGCAGTCAACCGGGTACGGACGATCGCGTAACGTGGGTGAAGTCGGTGGATGAAGCCATCGCGGCGTGTGGTGACGTACNNNN'\nwt_ref['sl3'] = 'NNNNGTGAAGTCGGTGGATGAAGCCATCGCGGCGTGTGGTGACGTACCAGAAATCATGGTGATTGGCGGCGGTCGCGTTTATGAACAGTTCTTGCCAAAAGCGCAAAAACTGTATCTGACGCATATCGACGCAGAAGTGGAAGGCGACACCCATTTCCNNNN'\nwt_ref['sl4'] = 'NNNNGCATATCGACGCAGAAGTGGAAGGCGACACCCATTTCCCGGATTACGAGCCGGATGACTGGGAATCGGTATTCAGCGAATTCCACGATGCTGATGCGCAGAACTCTCACAGCTATTGCTTTGAGATTCTGGAGCGGCGGTAACAGGCGTCGACAAGNNNN'\n\namplicon_length = {} #expecation, for filtering reads that are shorter/longer than you expect\nfor sl in ['sl1','sl2','sl3','sl4']:\n amplicon_length[sl] = len(wt_ref[sl])\n\n#indices of amplicon where the coding region of interest begins & ends in FWD READ\ncoding_ix = {}\ncoding_ix['sl1'] = [25,25+120] \ncoding_ix['sl2'] = [23,23+120]\ncoding_ix['sl3'] = [22,22+120]\ncoding_ix['sl4'] = [26,26+120]\n\n#assigning barcodes for each SL \nbarcode_ix = {}\nbarcode_ix['sl1'] = [4,25]\nbarcode_ix['sl2'] = [4,23]\nbarcode_ix['sl3'] = [4,22]\nbarcode_ix['sl4'] = [4,26]\n\n#pulling out the wt sequence of the coding regions of interest\ncoding_region = {}\nbarcode_region = {}\n\nfor sl in ['sl1','sl2','sl3','sl4']:\n coding_region[sl] = wt_ref[sl][coding_ix[sl][0]:coding_ix[sl][1]]\n barcode_region[sl] = wt_ref[sl][barcode_ix[sl][0]:barcode_ix[sl][1]]\n #OUT PUT AMINO ACID SEQUENCES \n print( sl+' sequence: ' + translate_seq(coding_region[sl])), (sl+ ' barcode: '+barcode_region[sl])\n\n#these cover positions 1-40 in DHFR\n#sl1_aa_pos = np.arange(1,40+1) #make a list from 1:40 the indicies of this will map to indicies in coding region\n\naa_pos = {}\naa_pos['sl1'] = np.arange(1,40+1) \naa_pos['sl2'] = np.arange(41,80+1)\naa_pos['sl3'] = np.arange(81,120+1)\naa_pos['sl4'] = np.arange(121,160+1)\n\nif args.p:\n fastqFilePathName = args.p\n if fastqFilePathName.split('.')[-1] == 'fastq':\n qthreshold = int(args.q)\n sample_sl_id = args.sl\n outName = args.o\n \n # the next step is to open a fastq file and go through each read\n print('Start reading file: %s' % fastqFilePathName)\n openfastq = open(fastqFilePathName,'rU')\n fastqpath = openfastq.readlines()\n totalFileReads = len(fastqpath)/4\n openfastq.close()\n mutant_dict = {'sl1':[],'sl2':[],'sl3':[],'sl4':[]}\n nuc_dict = {'sl1':[],'sl2':[],'sl3':[],'sl4':[]}\n \n read_unassigned = 0\n read_total = 0\n read_pass = 0\n read_fail = 0\n\n with open(fastqFilePathName) as fwdFile:\n #this bit is to read huge files 4 lines at a time. The second line should be coding \\\n #Can implement line recognition. There is none here. Illumina + flash + ucombine outputs use the same order \\\n #So no problem right now, just be aware. \n while True:\n next_n_lines= list(islice(fwdFile, 4))\n if not next_n_lines:\n break\n read_total += 1\n fwd_seq = next_n_lines[1].strip('\\n')\n qscore_line_fwd = next_n_lines[3]\n\n if sample_sl_id == 'mix':\n sl_id = id_sublibrary(fwd_seq)\n if sl_id == 'na':\n read_unassigned+=1\n read_fail +=1\n continue\n else:\n sl_id = sample_sl_id\n\n\n #filter for amplicon sequence length\n seq_length = len(fwd_seq)\n if seq_length != amplicon_length[sl_id]:\n read_unassigned += 1\n read_fail +=1 \n continue\n\n #trim to coding regions of interest\n fwd_seq_trim = trim_read(fwd_seq,coding_ix[sl_id])\n qscore_line_fwd_trim = trim_read(qscore_line_fwd,coding_ix[sl_id])\n #call function for qscore filter \n fail = qscore_filter(qscore_line_fwd_trim,qthreshold=20) \n\n #give a progress report... \n if read_total%(200000) == 0:\n print('On read %i of %i (%1.3f)' % (read_total,totalFileReads,float(read_total)/float(totalFileReads)))\n\n if fail == 'True':\n read_fail+=1\n continue\n\n else: \n #function for determining mutant\n reference_fwd = coding_region[sl_id]\n mutants, nuc_list = identifyMutant(fwd_seq_trim,reference_fwd,seq_length)\n\n mutant_dict[sl_id].append(mutants)\n nuc_dict[sl_id].append(nuc_list)\n \n openfastq.close()\n mutant_counts = record_mut_counts(mutant_dict)\n nuc_counts = record_nuc_counts(nuc_dict)\n read_pass = float(read_total)-float(read_fail)\n percent_pass = float(read_pass)*100/float(read_total)\n\n return_stats()\n\n #writing statistics and mutant counts to a txt file \n writeOutputFile(outName)\n writeOutputFile_nuc(outName)\n \n else: \n print (' you must input a valid fastq file ' )", "sub_path": "ngs_growthRate_dataAnalysis/countingAlleles.py", "file_name": "countingAlleles.py", "file_ext": "py", "file_size_in_byte": 12332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}, {"api_name": "Bio.Seq.Seq", "line_number": 71, "usage_type": "call"}, {"api_name": "Bio.Alphabet.generic_dna", "line_number": 71, "usage_type": "argument"}, {"api_name": "numpy.multiply", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 239, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 242, "usage_type": "call"}, {"api_name": "itertools.islice", "line_number": 270, "usage_type": "call"}]} +{"seq_id": "583667289", "text": "from tkinter import *\nfrom PIL import Image, ImageTk\n\nviewport_width = 980\nviewport_height = 600\ncanvas = Canvas(Tk(), width=viewport_width, height=viewport_height, background=\"white\")\n\ncanvas.pack()\n\n\ndef get_text_size(d):\n bounds = canvas.bbox(canvas.create_text(-1000, -1000, text=d.word, font=d.font))\n width = bounds[2] - bounds[0]\n height = bounds[3] - bounds[1]\n return width + d.word_spacing, height\n\n\ndef img_load(file_name):\n return Image.open(file_name)\n\n\ndef img_resize(img, size):\n return img.resize((int(size[0]), int(size[1])))\n\n\ndef paint(d):\n\n def _paint_background(d):\n if d.type is \"ELEM\":\n if hasattr(d, \"box\"):\n canvas.create_rectangle(d.box.x, d.box.y, d.box.x + d.box.width, d.box.y + d.box.height,\n width=0, fill=d.style[\"background-color\"])\n\n for c in d.children:\n _paint_background(c)\n\n def _paint_words(d):\n if d.type is \"WORD\":\n canvas.create_text(d.box.x, d.box.y, text=d.word, fill=d.color, font=d.font, anchor=NW)\n if d.decoration != \"none\":\n if d.decoration == \"underline\":\n dy = d.box.height\n elif d.decoration == \"line-through\":\n dy = d.box.height/2\n elif d.decoration == \"overline\":\n dy = 0\n canvas.create_line(d.box.x, d.box.y + dy, d.box.x + d.box.width, d.box.y + dy, fill=d.color)\n\n for c in d.children:\n _paint_words(c)\n\n def _paint_imgs(d):\n if hasattr(d, \"tag\") and d.tag == \"img\":\n if hasattr(d, \"img\"):\n d.imgtk = ImageTk.PhotoImage(d.img) # to prevent the image garbage collected.\n canvas.create_image(d.box.x, d.box.y, image=d.imgtk, anchor='nw')\n pass\n for c in d.children:\n _paint_imgs(c)\n\n _paint_background(d)\n _paint_imgs(d)\n _paint_words(d)\n\n mainloop()\n\n\n\n", "sub_path": "graphics.py", "file_name": "graphics.py", "file_ext": "py", "file_size_in_byte": 1983, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "PIL.Image.open", "line_number": 19, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 19, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 55, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 55, "usage_type": "name"}]} +{"seq_id": "587828767", "text": "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom network import Network\n\n\nclass FC(nn.Module):\n def __init__(self, in_size, out_size):\n super(FC, self).__init__()\n self.fc1 = nn.Linear(in_size, out_size,bias=False)\n self.softmax = nn.Softmax(1)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.softmax(x)\n return x\n\n\ndef predicate(net, *input):\n d = torch.zeros(42)\n d[int(input[0])] = 1.0\n d[int(input[1])+21] = 1.0\n d = torch.autograd.Variable(d.unsqueeze(0))\n output = net.net(d)\n return output.squeeze(0)\n\n\n\n\n\nnetwork = FC(42, 2)\nAD_net = Network(network, 'active_dealer_net', predicate)\nAD_net.optimizer = optim.Adam(network.parameters(), lr=1.0)\n", "sub_path": "examples/CAPITA/PART1/nets_AD.py", "file_name": "nets_AD.py", "file_ext": "py", "file_size_in_byte": 775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 11, "usage_type": "name"}, {"api_name": "torch.nn.Softmax", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 24, "usage_type": "attribute"}, {"api_name": "network.Network", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 34, "usage_type": "name"}, {"api_name": "network.parameters", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "292120250", "text": "# All entities (not to be mistaken by the actual map)\r\n# are defined here in this file\r\n# PS: Good luck finding what is what\r\n\r\nimport math\r\nimport pygame.constants as pyConst\r\nimport pygame\r\nimport levelData\r\nimport renderer\r\nimport collisionDetection\r\nimport mathHelpers\r\nimport enum\r\nimport os\r\nimport gameIO\r\nimport pathFinding\r\n\r\n#Base class for everything \r\nclass Entity():\r\n def __init__(self, start_pos):\r\n self.px = start_pos[0]\r\n self.py = start_pos[1]\r\n\r\n def update(self, dt, events):\r\n pass\r\n\r\n def get_pos(self):\r\n return (self.px, self.py)\r\n\r\n# An entity with an \"agent_pack\" which is a\r\n# way to group everything related to an object\r\n# to a folder name in the Assets folder \r\nclass SpriteEntity(Entity):\r\n def __init__(self, start_pos, agent_pack_name=\"Default\"):\r\n super().__init__(start_pos)\r\n # We only store the name because pickle cant\r\n # store surfaces and sound classes\r\n self.agent_pack_name = agent_pack_name\r\n \r\n def get_sprite(self, camObj):\r\n return gameIO.get_sprite(self.agent_pack_name, 0)\r\n\r\n# An entity with a camera\r\n# everything derived froom this class\r\n# can be considered playerish\r\nclass Agent(SpriteEntity):\r\n def __init__(self, start_pos, fov, moveSpeed, fovDepth):\r\n super().__init__(start_pos)\r\n # Everything has health but only the player actualy uses it\r\n # HMMMMMMMMMMMMMMMM\r\n self.health = 100\r\n self.entitiesInSight = []\r\n\r\n # I could use \"player.instance in entitiesInSight\" but whatever\r\n self.canSeePlayer = False\r\n\r\n # \"A ray\" that store rays (pun intended) so they can be drawn \r\n # on screen. You can spectate enemies!\r\n self.rayDistanceTable = {}\r\n self.FOV = fov * (math.pi/180)\r\n self.FOVDepth = fovDepth\r\n self.angleY = 0\r\n self.moveSpeed = moveSpeed\r\n\r\n self.dirX = -1\r\n self.dirY = 0\r\n\r\n self.planeX = 0\r\n self.planeY = 0.66\r\n\r\n def update(self, dt, events):\r\n renderer.generate_distance_table(self)\r\n return super().update(dt, events)\r\n\r\n def move(self, dirX, dirY, deltaTime):\r\n # I splitted movemnt in two so the player\r\n # can slide walls\r\n nexPosX = self.px + dirX\r\n nexPosY = self.py + dirY\r\n # Move X\r\n\r\n if (nexPosX < 0 or nexPosX > levelData.Level.currentMap.level_width):\r\n self.px += dirX\r\n elif self.py < 0 or self.py > levelData.Level.currentMap.level_height:\r\n self.px += dirX\r\n elif levelData.Level.currentMap.grid[int(nexPosX)][int(self.py)] == 0:\r\n self.px += dirX\r\n\r\n # Move Y\r\n\r\n if nexPosY < 0 or nexPosY > levelData.Level.currentMap.level_height:\r\n self.py += dirY\r\n elif (self.px < 0 or self.px > levelData.Level.currentMap.level_width):\r\n self.py += dirY\r\n elif levelData.Level.currentMap.grid[int(self.px)][int(nexPosY)] == 0:\r\n self.py += dirY\r\n\r\n def move_to(self, target, deltaTime):\r\n angle = math.atan2(self.dirY, self.dirX)\r\n self.look_at(target, deltaTime * 2)\r\n dirX = math.cos(angle) * self.moveSpeed * deltaTime\r\n dirY = math.sin(angle) * self.moveSpeed * deltaTime\r\n self.move(dirX, dirY, deltaTime)\r\n\r\n def rotate(self, amount):\r\n #Rotating the player and its projection plane\r\n oldDirX = self.dirX\r\n self.dirX = self.dirX * math.cos(amount) - self.dirY * math.sin(amount)\r\n self.dirY = oldDirX * math.sin(amount) + self.dirY * math.cos(amount)\r\n\r\n oldPlaneX = self.planeX\r\n self.planeX = self.planeX * math.cos(amount) - self.planeY * math.sin(amount)\r\n self.planeY = oldPlaneX * math.sin(amount) + self.planeY * math.cos(amount)\r\n\r\n def look_at(self, target, deltaTime):\r\n\r\n dx, dy = mathHelpers.slope(self.get_pos(), target.get_pos())\r\n # Finds out whats the target angle to the target\r\n theta = math.atan2(dy, dx)\r\n angle = math.atan2(self.dirY, self.dirX)\r\n targetAngle = theta\r\n targetAngle = math.degrees(targetAngle)\r\n # Good old stack exchange :)\r\n # https://math.stackexchange.com/a/2898118\r\n shortest_angle=((((targetAngle - math.degrees(angle)) % 360) + 540) % 360) - 180\r\n self.rotate(math.radians(shortest_angle) * deltaTime )\r\n\r\n\r\nclass EnemyStatus(enum.Enum):\r\n import colors\r\n Normal = (\"Normal\", colors.GREEN, 0)\r\n Alert = (\"Alert\", colors.RED, 10)\r\n Evasion = (\"Evasion\", colors.YELLOW, 20)\r\n Caution = (\"Caution\", colors.MAROON, 30)\r\n\r\n\r\n# My failed attempt to have 8 directional sprites\r\n# the is still some remainings of my attempt in the \r\n# SpriteAgent class\r\n\r\n# class AngleDirection(enum.Enum):\r\n# back = 4\r\n# backDiag = 3\r\n# side = 2\r\n# fronDiag = 1\r\n# front = 0\r\n\r\n# I created this class by accident\r\n# all Agents already have sprites\r\nclass SpriteAgent(Agent):\r\n def __init__(self, start_pos, fov, moveSpeed, fovDepth, agent_pack):\r\n super().__init__(start_pos, fov, moveSpeed, fovDepth)\r\n # Five sprites from 0 to 180 in steps of 45\r\n self.agent_pack_name = agent_pack\r\n\r\n \r\n def get_sprite(self, camObj):\r\n angle = math.atan2(self.dirY, self.dirX)\r\n camObjAngle =math.atan2(camObj.dirY, camObj.dirX)\r\n\r\n camPos = camObj.get_pos()\r\n dx, dy = mathHelpers.slope(camPos, self.get_pos())\r\n camAngleToSprite = (math.atan2(dy, dx))\r\n angleCamDelta = mathHelpers.fixed_angle(camAngleToSprite + angle)\r\n\r\n curr = gameIO.get_sprite(self.agent_pack_name, 0)\r\n\r\n if math.degrees(angleCamDelta) < 180:\r\n curr = pygame.transform.flip(curr, True, False)\r\n\r\n return curr\r\n\r\n# The player is a singleton\r\n# You can actually see yourself\r\n# if you change the \"draw_first_person(player_instance)\"\r\n# to levelData.current_map.grid_entities[some_number]\r\nclass Player(SpriteAgent):\r\n instance = None\r\n\r\n def __init__(self, start_pos):\r\n super().__init__(start_pos, 90, 2, renderer.DEPTH,\r\n \"enemyIdle\")\r\n\r\n Player.instance = self\r\n self.mouseEnable = False\r\n self.cameraYawSens = 2\r\n self.cameraPitchSens = 360\r\n self.keys = 0\r\n\r\n def update(self, dt, events):\r\n get_input(self, dt, events)\r\n return super().update(dt, events)\r\n \r\nclass Enemy(SpriteAgent):\r\n enemy_status = EnemyStatus.Normal\r\n enemy_status_time_left = 0\r\n\r\n def __init__(self, start_pos, patrolPoint=None):\r\n super().__init__(start_pos, 90, 2, 6,\r\n \"Droog\")\r\n\r\n self.patrolPoint = patrolPoint\r\n self.target = self.patrolPoint\r\n\r\n self.pathFindingNodesTarget = self.target\r\n self.pathFindingNodes = None\r\n\r\n self.timeGuarded = 0\r\n self.pathFindingComplete = False\r\n self.originalFov = self.FOV\r\n self.originalFovDepth = self.FOVDepth\r\n self.lastPathFindingPoint = None\r\n self.cameraYawSens = 4\r\n\r\n # Base enemy behaviour\r\n # It stills needs some tweaking\r\n def update(self, dt, events):\r\n super().update(dt, events)\r\n # Status time will lower faster if there is more enemies\r\n # PS: Its a feature\r\n Enemy.enemy_status_time_left -= dt\r\n if Enemy.enemy_status_time_left < 0:\r\n Enemy.enemy_status_time_left = 0\r\n if Enemy.enemy_status == EnemyStatus.Alert:\r\n Enemy.enemy_status = EnemyStatus.Evasion\r\n elif Enemy.enemy_status == EnemyStatus.Evasion:\r\n Enemy.enemy_status = EnemyStatus.Caution\r\n elif Enemy.enemy_status == EnemyStatus.Caution:\r\n Enemy.enemy_status = EnemyStatus.Normal\r\n\r\n if Enemy.enemy_status != EnemyStatus.Normal:\r\n Enemy.reset_enemy_status_time()\r\n\r\n if Enemy.enemy_status == EnemyStatus.Normal:\r\n self.change_target(self.patrolPoint)\r\n\r\n if Player.instance in [x[0] for x in self.entitiesInSight]:\r\n self.canSeePlayer = True\r\n Enemy.change_enemy_status(EnemyStatus.Alert)\r\n else:\r\n self.canSeePlayer = False\r\n\r\n if self.canSeePlayer or Enemy.enemy_status == EnemyStatus.Alert:\r\n self.change_target(Player.instance)\r\n\r\n if Enemy.enemy_status == EnemyStatus.Evasion:\r\n if self.pathFindingNodesTarget != Player.instance:\r\n self.change_target(Player.instance)\r\n if self.pathFindingComplete:\r\n # pick random point on the map\r\n while len(self.pathFindingNodes) == 0:\r\n random_pos = Entity(\r\n levelData.Level.currentMap.pick_random_point())\r\n\r\n self.change_target(random_pos)\r\n\r\n if Enemy.enemy_status == EnemyStatus.Caution:\r\n if self.pathFindingComplete:\r\n # pick random point on the map\r\n random_pos = Entity(\r\n levelData.Level.currentMap.pick_random_point())\r\n dx, dy = mathHelpers.slope(\r\n self.get_pos(), random_pos.get_pos())\r\n targetDistance = math.hypot(dx, dy)\r\n if len(self.pathFindingNodes) == 0 or targetDistance < 1:\r\n while len(self.pathFindingNodes) == 0 and targetDistance < 1:\r\n random_pos = Entity(\r\n levelData.Level.currentMap.pick_random_point())\r\n dx, dy = mathHelpers.slope(\r\n self.get_pos(), random_pos.get_pos())\r\n targetDistance = math.hypot(dx, dy)\r\n\r\n self.change_target(random_pos)\r\n\r\n if (Enemy.enemy_status == EnemyStatus.Evasion or\r\n Enemy.enemy_status == EnemyStatus.Alert or\r\n Enemy.enemy_status == EnemyStatus.Caution):\r\n self.FOV = self.originalFov * 1.5\r\n self.FOVDepth = self.originalFovDepth * 1.5\r\n else:\r\n self.FOV = self.originalFov\r\n self.FOVDepth = self.originalFovDepth\r\n\r\n if self.target is not None:\r\n dx, dy = mathHelpers.slope(self.get_pos(), self.target.get_pos())\r\n targetDistance = math.hypot(dx, dy)\r\n if self.pathFindingNodes is not None and len(self.pathFindingNodes) > 0:\r\n nextStep = self.pathFindingNodes[0]\r\n \r\n nextPathNodeDistance = mathHelpers.distance_to(self.get_pos(), nextStep)\r\n adjustedNextStep = (nextStep[0]+0.5, nextStep[1]+0.5)\r\n self.move_to(Entity(adjustedNextStep), dt)\r\n if nextPathNodeDistance < 0.1:\r\n self.pathFindingNodes.pop(0) \r\n else:\r\n self.pathFindingComplete = True\r\n if targetDistance > 0.5:\r\n self.move_to(self.target, dt)\r\n\r\n if isinstance(self.target, Node) and targetDistance < 0.5:\r\n self.timeGuarded += dt\r\n self.rotate(math.radians(36) * dt)\r\n\r\n if self.timeGuarded > 1:\r\n self.change_patrol_point()\r\n\r\n def change_target(self, target):\r\n if target != None:\r\n self.target = target\r\n x, y = target.get_pos()\r\n x = int(x)\r\n y = int(y)\r\n my_pos = (int(self.px), int(self.py))\r\n self.lastPathFindingPoint = target.get_pos()\r\n self.pathFindingComplete = False\r\n self.pathFindingNodesTarget = target\r\n self.pathFindingNodes = pathFinding.go_to(my_pos, (x, y))\r\n self.pathFindingNodes.pop(0)\r\n\r\n def change_patrol_point(self):\r\n self.patrolPoint = self.target.pick_random_node()\r\n self.timeGuarded = 0\r\n self.change_target(self.patrolPoint)\r\n\r\n @staticmethod\r\n def change_enemy_status(status):\r\n Enemy.enemy_status = status\r\n Enemy.enemy_status_time_left = Enemy.enemy_status.value[2]\r\n\r\n @staticmethod\r\n def reset_enemy_status_time():\r\n Enemy.enemy_status_time_left = Enemy.enemy_status.value[2]\r\n\r\n\r\nclass Node(Entity):\r\n \"\"\"\r\n Nodes are points on the map were enemies depend on\r\n to patrol. They are like bus stops for them\r\n \"\"\"\r\n\r\n def __init__(self, start_pos):\r\n super().__init__(start_pos)\r\n self.behaviour = None\r\n self.destination_point = 0\r\n self.nodes = []\r\n\r\n def join_node(self, nextNode):\r\n if nextNode not in self.nodes:\r\n self.nodes.append(nextNode)\r\n if self not in nextNode.nodes:\r\n nextNode.nodes.append(self)\r\n\r\n def remove_node(self, node):\r\n if node in self.nodes:\r\n self.nodes.remove(node)\r\n node.remove_node(self)\r\n\r\n\r\n # If biased the last node on the list will \r\n # get an advantage to be picked\r\n # so the enemy will be less probable to run\r\n # in circles between two nodes\r\n def pick_random_node(self, biased=False):\r\n import random\r\n node_list = self.nodes.copy()\r\n\r\n if biased: \r\n for x in range(int( len(self.nodes)*0.5 ) ):\r\n node_list.append( node_list[-1] )\r\n\r\n\r\n if len(node_list) > 0:\r\n node_pos = random.randint(0, len(node_list) - 1)\r\n return node_list[node_pos]\r\n else:\r\n return self\r\n\r\n# tbh I don't know why I created this class\r\n# this is a basically an enemy that produces\r\n# sounds.\r\nclass Monster(Enemy):\r\n def __init__(self, start_pos, patrolPoint=None):\r\n super().__init__(start_pos, patrolPoint=patrolPoint)\r\n self.sound = None\r\n self.channel = None\r\n def update(self, dt, events):\r\n import os\r\n\r\n super().update(dt, events)\r\n if self.channel == None:\r\n self.channel = pygame.mixer.find_channel(True)\r\n\r\n \r\n if self.target is not None:\r\n dist_to_player = mathHelpers.distance_to(self.get_pos(), Player.instance.get_pos())\r\n if isinstance(self.target, Player) and dist_to_player < 3 and self.canSeePlayer:\r\n self.attack(self.target, dt)\r\n if isinstance(self.target, Player) and dist_to_player < 3 and not self.canSeePlayer:\r\n self.look_at(self.target, dt)\r\n if dist_to_player < 5:\r\n self.play_sound(dist_to_player, Enemy.enemy_status.name)\r\n \r\n\r\n \r\n \r\n def play_sound(self, distance, flag, force=False):\r\n volume = mathHelpers.translate(distance, 0, 5, 2, 0.5 )\r\n self.sound = gameIO.get_audio(self.agent_pack_name, flag )\r\n if not self.channel.get_busy(): # Do not play two sounds at the same time\r\n if self.sound != None:\r\n self.channel.play(self.sound)\r\n self.channel.set_volume(volume, volume)\r\n else:\r\n if force:\r\n self.sound.play()\r\n \r\n def attack(self, target, dt):\r\n import random\r\n self.look_at(self.target, dt * 2)\r\n self.target.health -= dt * 30\r\n self.target.look_at(self, dt)\r\n self.target.angleY += dt * random.randint(-400, 400)\r\n self.target.rotate(dt * random.randint(-3, 3))\r\n dist_to_player = mathHelpers.distance_to(self.get_pos(), Player.instance.get_pos())\r\n self.play_sound(dist_to_player, \"Attack\", True)\r\n \r\nclass Collectible(SpriteEntity):\r\n def __init__(self, start_pos):\r\n super().__init__(start_pos, \"Collectible\")\r\n self.collected = False\r\n def update(self, dt, events):\r\n if not self.collected:\r\n dist = mathHelpers.distance_to(self.get_pos(), Player.instance.get_pos())\r\n if dist < 0.5:\r\n levelData.Level.currentMap.num_of_collected += 1\r\n self.collected = True\r\n self.agent_pack_name = \"\"\r\n\r\n\r\nclass Gate(SpriteEntity):\r\n def __init__(self, start_pos):\r\n super().__init__(start_pos, \"Gate\")\r\n self.open = False #Depends on a key class to be opened\r\n\r\n def update(self, dt, events):\r\n if not self.open:\r\n dist = mathHelpers.distance_to(self.get_pos(), Player.instance.get_pos())\r\n if not self.open:\r\n if dist < 0.5:\r\n if Player.instance.keys > 0:\r\n Player.instance.keys -= 1\r\n self.open= True\r\n self.agent_pack_name = \"\"\r\n elif dist < 1 and Player.instance.keys == 0:\r\n Player.instance.move(-Player.instance.dirX * 0.5, -Player.instance.dirY * 0.5, dt)\r\n\r\nclass Key(SpriteEntity):\r\n def __init__(self, start_pos):\r\n super().__init__(start_pos, \"Key\")\r\n self.collected = False\r\n\r\n def update(self, dt, events):\r\n if not self.collected:\r\n dist = mathHelpers.distance_to(self.get_pos(), Player.instance.get_pos())\r\n if dist < 0.5:\r\n Player.instance.keys += 1\r\n self.collected = True\r\n self.agent_pack_name = \"\"\r\n\r\n# I was using this test global var to tweak some values while I played. \r\ntestGlobalVar = 0\r\ndef get_input(entity, dt, events):\r\n global testGlobalVar\r\n deltaTime = dt\r\n\r\n kb = pygame.key.get_pressed()\r\n for event in events:\r\n if event.type == pyConst.KEYDOWN:\r\n if event.key == pyConst.K_ESCAPE:\r\n pygame.mouse.set_pos(\r\n [renderer.SCREEN_WIDTH//2,\r\n renderer.SCREEN_HEIGHT//2])\r\n entity.mouseEnable = not entity.mouseEnable\r\n \r\n pygame.mouse.set_visible(not entity.mouseEnable)\r\n pygame.event.set_grab(entity.mouseEnable)\r\n if entity.mouseEnable:\r\n\r\n mouse_pos = pygame.mouse.get_pos()\r\n pygame.mouse.set_pos(\r\n [renderer.SCREEN_WIDTH//2,\r\n renderer.SCREEN_HEIGHT//2])\r\n mouseDeltaX = (mouse_pos[0] - renderer.SCREEN_WIDTH//2)\r\n mouseDeltaY = (mouse_pos[1] - renderer.SCREEN_HEIGHT//2)\r\n entity.rotate(-entity.cameraYawSens * 0.05 * deltaTime * mouseDeltaX)\r\n\r\n entity.angleY -= 0.05 * deltaTime * entity.cameraPitchSens * mouseDeltaY\r\n \r\n\r\n if kb[pyConst.K_LEFT]:\r\n entity.rotate(entity.cameraYawSens * deltaTime)\r\n\r\n\r\n if kb[pyConst.K_RIGHT]:\r\n entity.rotate(-entity.cameraYawSens * deltaTime)\r\n\r\n if kb[pyConst.K_UP]:\r\n entity.angleY += entity.cameraPitchSens * deltaTime\r\n\r\n if kb[pyConst.K_DOWN]:\r\n entity.angleY -= entity.cameraPitchSens * deltaTime\r\n\r\n newPx = 0\r\n newPy = 0\r\n angle = math.atan2(-entity.dirY, entity.dirX)\r\n\r\n entity.angleY = mathHelpers.clamp(\r\n entity.angleY, -renderer.VIEWPORT_HEIGHT, renderer.VIEWPORT_HEIGHT)\r\n\r\n if kb[pyConst.K_d]:\r\n newPx -= math.sin(angle) * 2 * deltaTime\r\n newPy -= math.cos(angle) * 2 * deltaTime\r\n\r\n \r\n\r\n if kb[pyConst.K_a]:\r\n newPx += math.sin(angle) * entity.moveSpeed * deltaTime\r\n newPy += math.cos(angle) * entity.moveSpeed * deltaTime\r\n \r\n if kb[pyConst.K_w]:\r\n newPx += math.cos(angle) * entity.moveSpeed * deltaTime\r\n newPy -= math.sin(angle) * entity.moveSpeed * deltaTime \r\n\r\n if kb[pyConst.K_s]:\r\n newPx -= math.cos(angle) * entity.moveSpeed * deltaTime\r\n newPy += math.sin(angle) * entity.moveSpeed * deltaTime\r\n\r\n \r\n \r\n\r\n #Used for prototyping purposes see line: 468\r\n if kb[pyConst.K_n]:\r\n testGlobalVar -= 1 * deltaTime\r\n print(testGlobalVar)\r\n\r\n if kb[pyConst.K_m]:\r\n testGlobalVar += 1 * deltaTime\r\n print(testGlobalVar)\r\n\r\n entity.move(newPx, newPy, deltaTime)", "sub_path": "The dawn of Otrozhny/entities.py", "file_name": "entities.py", "file_ext": "py", "file_size_in_byte": 19721, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "gameIO.get_sprite", "line_number": 40, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 59, "usage_type": "attribute"}, {"api_name": "renderer.generate_distance_table", "line_number": 71, "usage_type": "call"}, {"api_name": "levelData.Level", "line_number": 81, "usage_type": "attribute"}, {"api_name": "levelData.Level", "line_number": 83, "usage_type": "attribute"}, {"api_name": "levelData.Level", "line_number": 85, "usage_type": "attribute"}, {"api_name": "levelData.Level", "line_number": 90, "usage_type": "attribute"}, {"api_name": "levelData.Level", "line_number": 92, "usage_type": "attribute"}, {"api_name": "levelData.Level", "line_number": 94, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 98, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 100, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 101, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 107, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 107, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 108, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 108, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 111, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 111, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 112, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 112, "usage_type": "call"}, {"api_name": "mathHelpers.slope", "line_number": 116, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 118, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 119, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 121, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 124, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 125, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 128, "usage_type": "attribute"}, {"api_name": "colors.GREEN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "colors.RED", "line_number": 131, "usage_type": "attribute"}, {"api_name": "colors.YELLOW", "line_number": 132, "usage_type": "attribute"}, {"api_name": "colors.MAROON", "line_number": 133, "usage_type": "attribute"}, {"api_name": "math.atan2", "line_number": 157, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 158, "usage_type": "call"}, {"api_name": "mathHelpers.slope", "line_number": 161, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 162, "usage_type": "call"}, {"api_name": "mathHelpers.fixed_angle", "line_number": 163, "usage_type": "call"}, {"api_name": "gameIO.get_sprite", "line_number": 165, "usage_type": "call"}, {"api_name": "math.degrees", "line_number": 167, "usage_type": "call"}, {"api_name": "pygame.transform.flip", "line_number": 168, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 168, "usage_type": "attribute"}, {"api_name": "renderer.DEPTH", "line_number": 180, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Normal", "line_number": 194, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Alert", "line_number": 223, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Evasion", "line_number": 224, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Evasion", "line_number": 225, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Caution", "line_number": 226, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Caution", "line_number": 227, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Normal", "line_number": 228, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Normal", "line_number": 230, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Normal", "line_number": 233, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Alert", "line_number": 238, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Alert", "line_number": 242, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Evasion", "line_number": 245, "usage_type": "attribute"}, {"api_name": "levelData.Level.currentMap.pick_random_point", "line_number": 252, "usage_type": "call"}, {"api_name": "levelData.Level", "line_number": 252, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Caution", "line_number": 256, "usage_type": "attribute"}, {"api_name": "levelData.Level.currentMap.pick_random_point", "line_number": 260, "usage_type": "call"}, {"api_name": "levelData.Level", "line_number": 260, "usage_type": "attribute"}, {"api_name": "mathHelpers.slope", "line_number": 261, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 263, "usage_type": "call"}, {"api_name": "levelData.Level.currentMap.pick_random_point", "line_number": 267, "usage_type": "call"}, {"api_name": "levelData.Level", "line_number": 267, "usage_type": "attribute"}, {"api_name": "mathHelpers.slope", "line_number": 268, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 270, "usage_type": "call"}, {"api_name": "{'colors': 'colors'}.Evasion", "line_number": 274, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Alert", "line_number": 275, "usage_type": "attribute"}, {"api_name": "{'colors': 'colors'}.Caution", "line_number": 276, "usage_type": "attribute"}, {"api_name": "mathHelpers.slope", "line_number": 284, "usage_type": "call"}, {"api_name": "math.hypot", "line_number": 285, "usage_type": "call"}, {"api_name": "mathHelpers.distance_to", "line_number": 289, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 301, "usage_type": "call"}, {"api_name": "pathFinding.go_to", "line_number": 316, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 372, "usage_type": "call"}, {"api_name": "pygame.mixer.find_channel", "line_number": 390, "usage_type": "call"}, {"api_name": "pygame.mixer", "line_number": 390, "usage_type": "attribute"}, {"api_name": "mathHelpers.distance_to", "line_number": 394, "usage_type": "call"}, {"api_name": "mathHelpers.translate", "line_number": 406, "usage_type": "call"}, {"api_name": "gameIO.get_audio", "line_number": 407, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 421, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 422, "usage_type": "call"}, {"api_name": "mathHelpers.distance_to", "line_number": 423, "usage_type": "call"}, {"api_name": "mathHelpers.distance_to", "line_number": 432, "usage_type": "call"}, {"api_name": "levelData.Level", "line_number": 434, "usage_type": "attribute"}, {"api_name": "mathHelpers.distance_to", "line_number": 446, "usage_type": "call"}, {"api_name": "mathHelpers.distance_to", "line_number": 463, "usage_type": "call"}, {"api_name": "pygame.key.get_pressed", "line_number": 475, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 475, "usage_type": "attribute"}, {"api_name": "pygame.constants.KEYDOWN", "line_number": 477, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 477, "usage_type": "name"}, {"api_name": "pygame.constants.K_ESCAPE", "line_number": 478, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 478, "usage_type": "name"}, {"api_name": "pygame.mouse.set_pos", "line_number": 479, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 479, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_WIDTH", "line_number": 480, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_HEIGHT", "line_number": 481, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 484, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 484, "usage_type": "attribute"}, {"api_name": "pygame.event.set_grab", "line_number": 485, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 485, "usage_type": "attribute"}, {"api_name": "pygame.mouse.get_pos", "line_number": 488, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 488, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_pos", "line_number": 489, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 489, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_WIDTH", "line_number": 490, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_HEIGHT", "line_number": 491, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_WIDTH", "line_number": 492, "usage_type": "attribute"}, {"api_name": "renderer.SCREEN_HEIGHT", "line_number": 493, "usage_type": "attribute"}, {"api_name": "pygame.constants.K_LEFT", "line_number": 499, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 499, "usage_type": "name"}, {"api_name": "pygame.constants.K_RIGHT", "line_number": 503, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 503, "usage_type": "name"}, {"api_name": "pygame.constants.K_UP", "line_number": 506, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 506, "usage_type": "name"}, {"api_name": "pygame.constants.K_DOWN", "line_number": 509, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 509, "usage_type": "name"}, {"api_name": "math.atan2", "line_number": 514, "usage_type": "call"}, {"api_name": "mathHelpers.clamp", "line_number": 516, "usage_type": "call"}, {"api_name": "renderer.VIEWPORT_HEIGHT", "line_number": 517, "usage_type": "attribute"}, {"api_name": "pygame.constants.K_d", "line_number": 519, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 519, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 520, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 521, "usage_type": "call"}, {"api_name": "pygame.constants.K_a", "line_number": 525, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 525, "usage_type": "name"}, {"api_name": "math.sin", "line_number": 526, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 527, "usage_type": "call"}, {"api_name": "pygame.constants.K_w", "line_number": 529, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 529, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 530, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 531, "usage_type": "call"}, {"api_name": "pygame.constants.K_s", "line_number": 533, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 533, "usage_type": "name"}, {"api_name": "math.cos", "line_number": 534, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 535, "usage_type": "call"}, {"api_name": "pygame.constants.K_n", "line_number": 541, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 541, "usage_type": "name"}, {"api_name": "pygame.constants.K_m", "line_number": 545, "usage_type": "attribute"}, {"api_name": "pygame.constants", "line_number": 545, "usage_type": "name"}]} +{"seq_id": "103896393", "text": "import unittest\nimport torch\nimport numpy as np\nfrom pytorch_metric_learning.miners import AngularMiner\nfrom pytorch_metric_learning.utils import common_functions as c_f\n\nclass TestAngularMiner(unittest.TestCase):\n def test_angular_miner(self):\n embedding_angles = torch.arange(0, 16)\n embeddings = torch.tensor([c_f.angle_to_coord(a) for a in embedding_angles], requires_grad=True, dtype=torch.float) #2D embeddings\n labels = torch.randint(low=0, high=2, size=(16,))\n triplets = []\n for i in range(len(embeddings)):\n anchor, anchor_label = embeddings[i], labels[i]\n for j in range(len(embeddings)):\n if j == i:\n continue\n positive, positive_label = embeddings[j], labels[j]\n center = (anchor + positive) / 2\n if positive_label == anchor_label:\n ap_dist = torch.nn.functional.pairwise_distance(anchor.unsqueeze(0), positive.unsqueeze(0), 2)\n for k in range(len(embeddings)):\n if k == j or k == i:\n continue\n negative, negative_label = embeddings[k], labels[k]\n if negative_label != positive_label:\n nc_dist = torch.nn.functional.pairwise_distance(center.unsqueeze(0), negative.unsqueeze(0), 2)\n angle = torch.atan(ap_dist / (2*nc_dist))\n triplets.append((i,j,k,angle))\n\n for angle_in_degrees in range(0, 70, 10):\n miner = AngularMiner(angle_in_degrees)\n angle_in_radians = np.radians(angle_in_degrees)\n correct = []\n for i,j,k,angle in triplets:\n if angle > angle_in_radians:\n correct.append((i,j,k))\n correct_triplets = set(correct)\n a1, p1, n1 = miner(embeddings, labels)\n mined_triplets = set([(a.item(),p.item(),n.item()) for a,p,n in zip(a1,p1,n1)])\n self.assertTrue(mined_triplets == correct_triplets) \n\n\n def test_empty_output(self):\n miner = AngularMiner(35)\n batch_size = 32\n embeddings = torch.randn(batch_size, 64)\n labels = torch.arange(batch_size)\n a, p, n = miner(embeddings, labels)\n self.assertTrue(len(a)==0)\n self.assertTrue(len(p)==0)\n self.assertTrue(len(n)==0)\n", "sub_path": "tests/miners/test_angular_miner.py", "file_name": "test_angular_miner.py", "file_ext": "py", "file_size_in_byte": 2431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "unittest.TestCase", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 10, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.utils.common_functions.angle_to_coord", "line_number": 10, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.utils.common_functions", "line_number": 10, "usage_type": "name"}, {"api_name": "torch.float", "line_number": 10, "usage_type": "attribute"}, {"api_name": "torch.randint", "line_number": 11, "usage_type": "call"}, {"api_name": "torch.nn.functional.pairwise_distance", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.pairwise_distance", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "attribute"}, {"api_name": "torch.atan", "line_number": 28, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.miners.AngularMiner", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.radians", "line_number": 33, "usage_type": "call"}, {"api_name": "pytorch_metric_learning.miners.AngularMiner", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.randn", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.arange", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "302401036", "text": "#Dependencies\nfrom flask import Flask, redirect, url_for, render_template, request, abort, session, flash, jsonify\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import *\nfrom wtforms.fields.html5 import DecimalField\nfrom wtforms.validators import DataRequired, NumberRange\nfrom flask_mongoengine import MongoEngine\nfrom flask_migrate import Migrate\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom datetime import *\nfrom dateutil.relativedelta import *\nimport calendar\nimport os\nfrom bson.objectid import ObjectId\nfrom dotenv import load_dotenv\nload_dotenv()\n\n#create multiple checkbox field from wtforms\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput()\n\n\n# Configuration\nbasedir= os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\napp.config['MONGODB_SETTINGS']={\n 'db': 'freelance',\n 'host': os.environ.get('MONGO_LOGIN'),\n 'port': 27017,\n 'username': 'admin',\n 'password': 'Glnayr86'\n}\napp.config['SECRET_KEY'] = 'very hard to guess string'\ndb = MongoEngine()\ndb.init_app(app)\nbootstrap = Bootstrap(app)\nmigrate = Migrate(app,db)\n\n#Database Classes\nclass User(db.DynamicDocument):\n username = db.StringField()\n password_hash = db.StringField()\n role = db.StringField()\n date_joined = db.DateField()\n posts = db.ReferenceField('JobPost')\n\n @property\n def password(self):\n raise AttributeError(\"Password is not a readable attribute\")\n\n @password.setter\n def password(self, password):\n self.password_hash = generate_password_hash(password)\n\n def verify_password(self, password):\n return check_password_hash(self.password_hash, password)\n\n def to_json(self):\n return {\n 'username': self.username,\n 'password_hash': self.password_hash,\n 'role': self.role,\n 'date_joined': self.date_joined,\n 'posts': self.posts\n }\n\nclass Message(db.DynamicDocument):\n sender = db.StringField()\n recipient = db.StringField()\n read_yn = db.BooleanField()\n sender_deleted = db.BooleanField()\n receiver_deleted = db.BooleanField()\n date = db.DateField()\n title = db.StringField()\n message = db.StringField()\n\n def to_json(self):\n return {\n 'sender': self.sender,\n 'recipient': self.recipient,\n 'read_yn': self.read_yn,\n 'sender_deleted': self.sender_deleted,\n 'receiver_deleted': self.receiver_deleted,\n 'date': self.date,\n 'title': self.title,\n 'message': self.message}\n\nclass JobPost(db.DynamicDocument):\n poster = db.StringField()\n title= db.StringField()\n description = db.StringField()\n budget = db.StringField()\n hourlypay = db.StringField()\n\n def to_json(self):\n return {\n 'poster_id': self.poster_id,\n 'title': self.title,\n 'description': self.description,\n 'budget': self.budget,\n 'hourlypay': self.hourlypay\n }\n\n#Form Classes\nclass NameForm(FlaskForm):\n name = StringField('What is your name?', validators=[DataRequired()])\n password = PasswordField('What is your password', validators=[DataRequired()])\n submit = SubmitField('Submit')\n\nclass RegisterForm(FlaskForm):\n username = StringField('Enter your username', validators=[DataRequired()])\n password = PasswordField(\"Enter your password\", validators=[DataRequired()])\n password2 = PasswordField(\"Re-enter your password\", validators=[DataRequired()])\n submit = SubmitField('Submit')\n\nclass ComposeMessageForm(FlaskForm):\n send_to = StringField(\"Username to send to:\", validators=[DataRequired()])\n title = StringField('Title:', validators=[DataRequired()])\n message = TextAreaField('Message:', validators=[DataRequired()])\n submit = SubmitField('Submit')\n\nclass UpdateUserForm(FlaskForm):\n new_type = SelectField('Type of User:', choices = [('Freelancer', 'Freelancer'), ('Company', 'Company')], validators=[DataRequired()])\n languages = MultiCheckboxField('Programming Language/Stack:', choices=[('cpp', 'C++'), ('py', 'Python'), ('rb', 'Ruby'), ('php', 'PHP'),('java', 'Java'),('js', 'Javascript'),('sass', 'Sass')])\n biography = TextAreaField('Bio:')\n hourlyrate = DecimalField('Hourly Rate:', validators=[DataRequired()])\n submit=SubmitField('Update Details')\n\nclass JobPostForm(FlaskForm):\n title= StringField('Title:', validators=[DataRequired()])\n description= TextAreaField('Description of Job:', validators=[DataRequired()])\n budget = DecimalField('How much are you looking to spend:')\n hourlypay = DecimalField('How much do you pay per hour:', validators=[NumberRange(min=0, max=100000, message='bla')])\n languages = SelectField('What languages/technology do you need principally: ', choices=[('Ruby', 'Ruby'),('Python', 'Python'),('C++', 'C++'),('PHP', 'PHP'),('Javascript', 'Javascript'),('Java', 'Java'),('Sass', 'Sass')])\n submit = SubmitField('Submit')\n\nclass JobSearchForm(FlaskForm):\n term=StringField('Search Term:', validators=[DataRequired()])\n searchfield= SelectField('Search By:', choices=[('title', 'Title'), ('locationj', 'Location'), ('languagesj', 'Programming Languages')])\n submit=SubmitField('Search')\n\nclass FreelancerSearchForm(FlaskForm):\n term=StringField('Search Term:', validators=[DataRequired()])\n searchfield= SelectField('Search By:', choices=[('username', 'Username'), ('locationf', 'Location'), ('languagesf', 'Programming Languages')])\n submit=SubmitField('Search')\n\nclass CompanySearchForm(FlaskForm):\n term=StringField('Search Term:', validators=[DataRequired()])\n searchfield= SelectField('Search By:', choices=[('company', 'Company Name'), ('locationc', 'Location'), ('languagesc', 'Programming Languages Required')])\n submit=SubmitField('Search')\n\n#Python Shell\n@app.shell_context_processor\ndef make_shell_context():\n return dict(db=db, User=User, Message=Message, JobPost=JobPost)\n\n#Routing\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n try: current_user=User.objects(username=session['name']).first()\n except KeyError:\n session['name'] = None\n current_user = None\n form = NameForm()\n usercount = User.objects(role=\"Freelancer\").count()\n companycount = User.objects(role=\"Company\").count()\n interested = []\n if session['name'] != None:\n posts = JobPost.objects(poster=session['name'])\n for language in current_user.languages:\n jobs = JobPost.objects(stack=language)\n interested.append(jobs)\n else:\n posts = JobPost.objects()[:5]\n interested.append(posts)\n if form.validate_on_submit():\n user = User.objects(username=form.name.data).first()\n if user == None:\n flash(\"Username not found\")\n else:\n if user.password == form.password.data:\n session['known'] = True\n session['name'] = form.name.data\n else:\n session['known'] = False\n flash('Password/Username combination incorrect')\n form.name.data = \"\"\n return redirect(url_for('index'))\n return render_template('index.html', interested=interested[0], usercount=usercount, companycount=companycount, user=current_user, form=form, name=session.get('name'), known = session.get('known', False), posts=posts)\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n return redirect(url_for('index'))\n else:\n form = NameForm()\n if form.validate_on_submit():\n user = User.objects(username=form.name.data).first()\n if user == None:\n flash(\"Username not found\")\n else:\n if user.verify_password(form.password.data) == True:\n session['known'] = True\n session['name'] = form.name.data\n return redirect(url_for('index'))\n else:\n form.name.data = \"\"\n session['known'] = False\n flash('Password/Username combination incorrect')\n return render_template('login.html', form=form, name=session.get('name'), known = session.get('known', False))\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] == None:\n form = RegisterForm()\n if form.validate_on_submit():\n users = User.objects(username=form.username.data).first()\n try: user_registered = users.username\n except TypeError: user_registered = None\n except AttributeError: user_registered = None\n if form.password.data != form.password2.data:\n flash (\"Password mismatch, please check and try again\")\n else:\n if user_registered == form.username.data:\n flash(\"Username already exists\")\n else:\n User(username=form.username.data, password_hash=generate_password_hash(form.password.data), role=\"Freelancer\", date=datetime.utcnow()).save()\n session['name']=form.username.data\n session['known'] = True\n return redirect(url_for('account'))\n return render_template('register.html', form=form)\n else:\n return redirect(url_for('index'))\n\n@app.route(\"/account\", methods=['GET', 'POST'])\ndef account():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n user=User.objects(username=session['name']).first()\n form=UpdateUserForm()\n if form.validate_on_submit():\n language = request.form.getlist('languages')\n languages = []\n for response in language:\n if response == \"cpp\":\n languages.append(\"C++\")\n if response == \"py\":\n languages.append(\"Python\")\n if response == \"rb\":\n languages.append(\"Ruby\")\n if response == \"php\":\n languages.append(\"PHP\")\n if response == \"java\":\n languages.append(\"Java\")\n if response == \"js\":\n languages.append(\"Javascript\")\n if response == \"sass\":\n languages.append(\"Sass\")\n new_role = request.values.get('new_type')\n hourlyrate = request.values.get('hourlyrate')\n bio = request.values.get('biography')\n user.update(role=new_role, hourly_rate = hourlyrate, languages=languages, bio=bio)\n flash(\"Details Updated\")\n return redirect(url_for(\"account\"))\n return render_template('account.html', name=session['name'], form=form, user=user)\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/logout\", methods=['POST', 'GET'])\ndef logout():\n session['name'] = None\n session['known'] = False\n return redirect(url_for('index'))\n\n@app.route('/messages')\ndef message():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n user = User.objects(username=session['name']).first()\n messages_received = Message.objects(recipient=session['name'])\n rcount = []\n for message in messages_received:\n if message.receiver_deleted == False:\n rcount.append(message)\n received_count = len(rcount)\n messages_sent = Message.objects(sender=session['name'])\n scount = []\n for message in messages_sent:\n if message.sender_deleted == False:\n scount.append(message)\n sent_count=len(scount)\n return render_template('message.html', name=session['name'], messages_received=messages_received, rcount=received_count, scount= sent_count, messages_sent = messages_sent)\n else:\n return redirect(url_for('register'))\n\n@app.route('/userlist')\ndef user_list():\n list=User.objects()\n message = []\n for user in list:\n message.append(user.username)\n return jsonify(message)\n\n@app.route ('/userdata')\ndef user_data():\n data = User.objects(username = session['name']).first()\n return jsonify(data)\n\n@app.route('/messages/compose', methods=['POST', 'GET'])\ndef compose_message():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n user = User.objects(username=session['name']).first()\n form=ComposeMessageForm()\n if form.validate_on_submit():\n send_to=form.send_to.data\n send_to_exist= True\n send_to_exist_checker = User.objects(username=send_to).first()\n if len(send_to_exist_checker) < 1:\n send_to_exist = False\n send_from=session['name']\n title=form.title.data\n message=form.message.data\n if send_to_exist == False:\n flash(\"This username is not valid\")\n else:\n message_to_send=Message(sender=send_from, recipient=send_to, title=title, message=message, read_yn=False, sender_deleted=False, receiver_deleted=False, date=datetime.utcnow()).save()\n return redirect(url_for('message'))\n return render_template('messagecompose.html', name=session['name'], user=user, form=form)\n else:\n return redirect(url_for('register'))\n\n@app.route('/message/delete/receiver', methods=['POST'])\ndef delete_from_recipient():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n message_id=request.values.get('message_id')\n message_to_delete= Message.objects(id=message_id, recipient=session['name']).first()\n message_to_delete.update(receiver_deleted = True)\n if message_to_delete.sender_deleted == True:\n message_to_delete.delete()\n return redirect(url_for('message'))\n else:\n return redirect(url_for('register'))\n\n@app.route('/message/delete/sender', methods=['POST'])\ndef delete_from_sender():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n message_id=request.values.get('message_id')\n message_to_delete= Message.objects(id=message_id, sender=session['name']).first()\n message_to_delete.update(sender_deleted = True)\n if message_to_delete.receiver_deleted == True:\n message_to_delete.delete()\n return redirect(url_for('message'))\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/message/reply\", methods=['POST'])\ndef reply():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n form=ComposeMessageForm()\n message_number=request.values.get('message_id')\n is_correspondence=request.values.get('send')\n message_content= Message.objects(id=message_number).first()\n return render_template(\"messagereply.html\", message=message_content, correspondence=is_correspondence, name=session['name'], form=form)\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/message/send\", methods=['POST'])\ndef send():\n if request.values.get('response') == 'true':\n flash('Message sent - Please respond further in your messages tab')\n send_to=request.values.get('recipient')\n send_from=session['name']\n title=request.values.get('title')\n message_data=request.values.get('message')\n message_to_send=Message(sender=send_from, recipient=send_to, title=title, message=message_data,read_yn=False, sender_deleted=False, receiver_deleted=False, date=datetime.utcnow()).save()\n return redirect(url_for('index'))\n else:\n send_to=request.values.get('recipient')\n send_from=session['name']\n title=request.values.get('title')\n message_data=request.values.get('message')\n message_to_send=Message(sender=send_from, recipient=send_to, title=title, message=message_data,read_yn=False, sender_deleted=False, receiver_deleted=False, date=datetime.utcnow()).save()\n return redirect(url_for('message'))\n\n@app.route(\"/jobpost\", methods=['GET', 'POST'])\ndef post_form():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n form=JobPostForm()\n return render_template(\"jobpost.html\", name=session['name'], form=form)\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/jobpost/post\", methods=['POST'])\ndef post_job():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n title = request.values.get('title')\n description = request.values.get('description')\n budget = request.values.get('budget')\n hourlypay = request.values.get('hourlypay')\n stack = request.values.get('languages')\n poster = session['name']\n post = JobPost(title=title, description=description, budget=budget, hourlypay=hourlypay, stack=stack, poster=poster).save()\n return redirect(url_for('index'))\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/jobpost/delete\", methods=['POST'])\ndef delete_job():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n job_number=request.values.get('post_number')\n job_to_delete= JobPost.objects(id=job_number).first()\n job_to_delete.delete()\n return redirect(url_for('index'))\n else:\n return redirect(url_for('register'))\n\n@app.route(\"/job/\")\ndef job(job_id):\n job = JobPost.objects(pk=job_id).first()\n poster = User.objects(username=job.poster).first()\n return render_template(\"job.html\", name=session['name'], job=job, poster=poster)\n\n@app.route(\"/job/interest\", methods=['POST'])\ndef interest():\n try: session['name']\n except KeyError: session['name'] = None\n if session['name'] != None:\n job_id = request.values.get('job_id')\n username = request.values.get('name')\n shortintro = request.values.get('shortintro')\n hourlybudget=request.values.get('pricehour')\n fullbudget = request.values.get('pricejob')\n jobinterest = {\n 'username' : username,\n 'intro' : shortintro,\n 'hourlybudget' : hourlybudget,\n 'fullbudget' : fullbudget\n }\n jobtoappend = JobPost.objects(pk=job_id).first()\n jobtoappend.update(push__response__0 = jobinterest)\n return redirect(url_for('index'))\n else:\n return redirect(url_for('register'))\n\n@app.route('/check/', methods=['POST'])\ndef check(job_id):\n job = JobPost.objects(pk=job_id).first()\n return render_template(\"check.html\", name=session['name'], job=job)\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\", name=session['name'])\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\", name=session['name'])\n\n@app.route(\"/browser\")\ndef browser():\n user_agent = request.headers.get(\"user-agent\")\n return '

    Your browser is {}

    '.format(user_agent)\n\n@app.route(\"/search\")\ndef search():\n jobform=JobSearchForm()\n freelancerform=FreelancerSearchForm()\n companyform=CompanySearchForm()\n return render_template(\"search.html\", name=session['name'], job=jobform, freelancer=freelancerform, company=companyform)\n\n@app.route(\"/search/results\", methods=['POST'])\ndef results():\n jobform=JobSearchForm()\n freelancerform=FreelancerSearchForm()\n companyform=CompanySearchForm()\n type = request.values.get('type')\n field = request.values.get('searchfield')\n term = request.values.get('term')\n if type == \"freelancer\":\n if field == \"languagesf\":\n results = User.objects(languages__icontains=term, role=\"Freelancer\")\n else:\n results = User.objects(username__icontains=term, role=\"Freelancer\")\n elif type == \"company\":\n if field == \"languagesc\":\n results = User.objects(languages__icontains=term, role=\"Company\")\n else:\n results = User.objects(username__icontains=term, role=\"Company\")\n else:\n if field == \"languagesj\":\n results = JobPost.objects(stack__icontains=term)\n else:\n results = JobPost.objects(title__icontains=term)\n return render_template('searchresults.html', name=session['name'], results=results, job=jobform, freelancer=freelancerform, company=companyform)\n\n@app.route(\"/profile/\")\ndef profile(username):\n user = User.objects(username=username).first()\n return render_template('profile.html', name=session['name'], profile=user)\n\n@app.route(\"/attributions\")\ndef attributions():\n return render_template('attributions.html', name=session['name'])\n\n@app.errorhandler(404)\ndef page_not_found(e):\n try: session['name']\n except KeyError: session['name'] = None\n return render_template('404.html', e=e, name=session['name']), 404\n\n@app.errorhandler(500)\ndef internal_server_error(e):\n try: session['name']\n except KeyError: session['name'] = None\n return render_template('500.html', e=e, name=session['name']), 500\n\n#Run app\nif __name__ == \"__main__\":\n app.run(\n host = os.environ.get(\"IP\", \"0.0.0.0\"),\n port = int(os.environ.get(\"PORT\", \"5000\")),\n debug = False\n )\n", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 21534, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 30, "usage_type": "attribute"}, {"api_name": "flask_mongoengine.MongoEngine", "line_number": 36, "usage_type": "call"}, {"api_name": "flask_bootstrap.Bootstrap", "line_number": 38, "usage_type": "call"}, {"api_name": "flask_migrate.Migrate", "line_number": 39, "usage_type": "call"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 55, "usage_type": "call"}, {"api_name": "werkzeug.security.check_password_hash", "line_number": 58, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 107, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 108, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 109, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 112, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 113, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 114, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 115, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 118, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 119, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 120, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 121, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 124, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 125, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.DecimalField", "line_number": 128, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 128, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 131, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 132, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 133, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.DecimalField", "line_number": 134, "usage_type": "call"}, {"api_name": "wtforms.fields.html5.DecimalField", "line_number": 135, "usage_type": "call"}, {"api_name": "wtforms.validators.NumberRange", "line_number": 135, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 139, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 140, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 144, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 145, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 149, "usage_type": "name"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 162, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 164, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 170, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 171, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 188, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 191, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 195, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 196, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 197, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 198, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 204, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 207, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 208, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 209, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 212, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 213, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.session.get", "line_number": 214, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 218, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 219, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 220, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 228, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 231, "usage_type": "call"}, {"api_name": "werkzeug.security.generate_password_hash", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.utcnow", "line_number": 233, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 236, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 237, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 239, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 244, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 245, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 246, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 249, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 249, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 249, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 266, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 266, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 266, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 267, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 267, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 267, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 268, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 268, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 268, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 272, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 279, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 280, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 280, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 284, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 285, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 286, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 287, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 288, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 294, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 300, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 300, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 302, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 302, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 310, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 314, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 315, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 319, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 320, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 321, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 322, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 330, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 334, "usage_type": "call"}, {"api_name": "datetime.utcnow", "line_number": 336, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 337, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 337, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 338, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 338, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 340, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 344, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 345, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 346, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 347, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 347, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 347, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 348, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 352, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 354, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 358, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 359, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 360, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 361, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 361, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 361, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 362, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 366, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 366, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 368, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 368, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 372, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 373, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 374, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 376, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 376, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 376, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 377, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 377, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 377, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 379, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 379, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 381, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 381, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 385, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 385, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 385, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 386, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 387, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 387, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 387, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 388, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 389, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 389, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 389, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 390, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 390, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 390, "usage_type": "name"}, {"api_name": "datetime.utcnow", "line_number": 391, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 392, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 392, "usage_type": "call"}, {"api_name": "flask.request.values.get", "line_number": 394, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 394, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 394, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 395, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 396, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 396, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 396, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 397, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 397, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 397, "usage_type": "name"}, {"api_name": "datetime.utcnow", "line_number": 398, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 399, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 399, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 403, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 404, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 405, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 407, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 407, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 409, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 409, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 413, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 414, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 415, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 416, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 416, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 416, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 417, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 417, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 417, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 418, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 418, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 418, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 419, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 419, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 419, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 420, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 420, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 420, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 421, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 423, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 423, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 425, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 425, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 429, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 430, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 431, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 432, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 432, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 432, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 435, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 435, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 437, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 443, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 443, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 447, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 448, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 449, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 450, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 450, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 450, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 451, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 451, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 451, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 452, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 452, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 452, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 453, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 453, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 453, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 454, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 454, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 454, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 463, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 463, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 465, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 465, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 470, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 470, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 474, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 474, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 478, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 478, "usage_type": "name"}, {"api_name": "flask.request.headers.get", "line_number": 482, "usage_type": "call"}, {"api_name": "flask.request.headers", "line_number": 482, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 482, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 490, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 490, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 497, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 497, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 497, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 498, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 498, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 498, "usage_type": "name"}, {"api_name": "flask.request.values.get", "line_number": 499, "usage_type": "call"}, {"api_name": "flask.request.values", "line_number": 499, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 499, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 515, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 515, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 520, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 520, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 524, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 524, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 528, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 529, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 530, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 530, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 534, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 535, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 536, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 536, "usage_type": "name"}, {"api_name": "os.environ.get", "line_number": 541, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 541, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 542, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 542, "usage_type": "attribute"}]} +{"seq_id": "433780047", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler\nimport preprocessing as pp\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\n\n\n\nclass Autoencoder(nn.Module):\n\n def __init__(self,hidden_size,input_size):\n super(Autoencoder, self).__init__()\n \n self.encoder = nn.Sequential(\n nn.Linear(input_size,hidden_size),\n nn.ReLU(),\n #nn.Linear(15,hidden_size),\n #nn.ReLU(),\n #nn.Linear(30,10),\n #nn.ReLU(),\n #nn.Linear(10,5),\n #nn.ReLU()\n )\n self.decoder = nn.Sequential(\n nn.Linear(hidden_size,input_size),\n nn.ReLU(),\n #nn.Linear(15,input_size),\n #nn.ReLU()\n )\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return x\n\ndef train_model( data, window_size = 10 , batch_size = 100, num_epochs = 2 , \n learning_rate = 0.001, weight_decay=1e-5, test = 0 , verbose = True ):\n\n input_size = 2*window_size\n hidden_size = 2 * input_size\n\n X = pp.sub_trajectories_sw1(data,window_size)\n\n X_train, X_test = train_test_split(X)\n\n trainloader = DataLoader(X_train, batch_size=batch_size, shuffle=True)\n testloader = DataLoader(X_test, batch_size=batch_size, shuffle=False)\n\n model = Autoencoder(hidden_size,input_size)\n criterion = nn.MSELoss()\n optimizer = torch.optim.Adam(\n model.parameters(), lr=learning_rate, weight_decay=weight_decay)\n\n train_losses = []\n test_losses = []\n for epoch in range(num_epochs):\n if verbose:\n print(\"epoch: \" + str(epoch + 1) + \"/\" + str(num_epochs+1))\n train_loss = 0\n test_loss = 0\n for x in trainloader:\n #print(x.float())\n #x, _ = data\n #img = img.view(img.size(0), -1)\n #x = torch.tensor(x,dtype=torch.float).view([batch_size,input_size])\n # ===================forward=====================\n output = model(x.float())\n loss = criterion(output, x.float())\n # ===================backward====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n train_loss += loss\n train_losses.append(train_loss.item()/ len(X_train))\n # ===================log========================\n \n # .format(epoch + 1, num_epochs, loss.item()))\n with torch.no_grad():\n loss = 0\n for x in testloader:\n #print(x.float())\n #x, _ = data\n #img = img.view(img.size(0), -1)\n #x = torch.tensor(x,dtype=torch.float).view([batch_size,input_size])\n # ===================forward=====================\n output = model(x.float())\n loss = criterion(output, x.float())\n test_loss += loss\n \n \n test_losses.append(test_loss.item()/ len(X_test))\n if verbose and test:\n \n plt.plot(train_losses)\n plt.plot(test_losses, color = 'r')\n plt.show()\n\n return model\n #if epoch % 10 == 0:\n #pic = to_img(output.cpu().data)\n #save_image(pic, './mlp_img/image_{}.png'.format(epoch))\n\n", "sub_path": "libs/autoencoder.py", "file_name": "autoencoder.py", "file_ext": "py", "file_size_in_byte": 3381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 19, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 19, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "preprocessing.sub_trajectories_sw1", "line_number": 46, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}]} +{"seq_id": "182434732", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 31 2017\n\n@author: N1705165D (Manaar)\n\"\"\"\n\nimport os\nimport parameters as param\nimport pandas as pd\nimport numpy as np\nimport random\nimport timeit\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\nimport matplotlib.pyplot as plt\n\ndef create_benign_and_malware_files():\n if not os.path.exists(param.data_file):\n# print(\"Data File Not Found..!!\")\n return\n if not (os.path.exists(param.benign_directory) and os.path.exists(param.malware_directory)):\n os.makedirs(param.benign_directory)\n os.makedirs(param.malware_directory)\n raw_data = pd.read_csv(param.data_file, dtype='float64', header=None).as_matrix()[:, :-1]\n for i in range(param.total_benign):\n np.savetxt(param.benign_directory+\"prog\"+str(i+1)+\".txt\", raw_data[(i*50):((i+1)*50), :], fmt=\"%.1f\")\n for i in range(param.total_malware):\n np.savetxt(param.malware_directory+\"prog\"+str(i+1)+\".txt\", raw_data[((54*50)+(i*50)):((54*50)+(i+1)*50), :], fmt=\"%.1f\")\n# print(\"All the Benign and Malware files created..\")\n\ndef generate_benign_data(i):\n if not os.path.exists(param.temporary_directory):\n os.makedirs(param.temporary_directory)\n train_benign = random.sample(range(1, param.total_benign + 1), param.total_benign - param.num_test_benign)\n test_benign = []\n for item in range(1, param.total_benign + 1):\n if item not in train_benign:\n test_benign.append(item)\n train_benign.sort()\n test_benign.sort()\n with open(param.temporary_directory+\"train_benign_program_list\"+str(i)+\".txt\", \"w\") as outFile:\n for item in train_benign:\n outFile.write(\"%s \" % item)\n with open(param.temporary_directory+\"test_benign_program_list\"+str(i)+\".txt\", \"w\") as outFile:\n for item in test_benign:\n outFile.write(\"%s \" % item)\n# print(\"Benign Train and Test files are selected..\")\n return train_benign, test_benign\n\ndef generate_malware_data(i):\n train_malware = random.sample(range(1, param.total_malware + 1), param.total_malware - param.num_test_malware)\n test_malware = []\n for item in range(1, param.total_malware + 1):\n if item not in train_malware:\n test_malware.append(item)\n train_malware.sort()\n test_malware.sort()\n with open(param.temporary_directory+\"train_malware_program_list\"+str(i)+\".txt\", \"w\") as outFile:\n for item in train_malware:\n outFile.write(\"%s \" % item)\n with open(param.temporary_directory+\"test_malware_program_list\"+str(i)+\".txt\", \"w\") as outFile:\n for item in test_malware:\n outFile.write(\"%s \" % item)\n# print(\"Malware Train and Test files are selected..\")\n return train_malware, test_malware\n\ndef createDataSet(i):\n with open(\"data/train_data\"+str(i)+\".txt\", \"w\") as outFile:\n with open(\"tempData/train_benign_program_list\"+str(i)+\".txt\", \"r\") as inFile:\n content = inFile.readlines()\n content = content[0].strip().split(\" \")\n for item in content:\n with open(\"benign/prog\"+item+\".txt\") as dataFile:\n data = dataFile.readlines()\n for lines in data:\n outFile.write(lines.strip() + \" 0\\n\")\n with open(\"tempData/train_malware_program_list\"+str(i)+\".txt\", \"r\") as inFile:\n content = inFile.readlines()\n content = content[0].strip().split(\" \")\n for item in content:\n with open(\"malware/prog\"+item+\".txt\") as dataFile:\n data = dataFile.readlines()\n for lines in data:\n outFile.write(lines.strip() + \" 1\\n\")\n with open(\"data/test_data\"+str(i)+\".txt\", \"w\") as outFile:\n with open(\"tempData/test_benign_program_list\"+str(i)+\".txt\", \"r\") as inFile:\n content = inFile.readlines()\n content = content[0].strip().split(\" \")\n for item in content:\n with open(\"benign/prog\"+item+\".txt\") as dataFile:\n data = dataFile.readlines()\n for lines in data:\n outFile.write(lines.strip() + \" 0\\n\")\n with open(\"tempData/test_malware_program_list\"+str(i)+\".txt\", \"r\") as inFile:\n content = inFile.readlines()\n content = content[0].strip().split(\" \")\n for item in content:\n with open(\"malware/prog\"+item+\".txt\") as dataFile:\n data = dataFile.readlines()\n for lines in data:\n outFile.write(lines.strip() + \" 1\\n\")\n\ndef get_score(pred):\n file_y_test = []\n for _ in range(10):\n file_y_test.append(0)\n for _ in range(10):\n file_y_test.append(1)\n pred_y_test = []\n for i in range(10):\n c = sum(x==0 for x in pred[50*i:50*(i+1)])\n if c == 50:\n pred_y_test.append(0)\n else:\n pred_y_test.append(1)\n for i in range(10):\n c = sum(x==1 for x in pred[500+50*i:500+50*(i+1)])\n if c == 50:\n pred_y_test.append(1)\n else:\n pred_y_test.append(0)\n return f1_score(file_y_test, pred_y_test)\n\ndef get_score_mod(pred):\n file_y_test = []\n for _ in range(10):\n file_y_test.append(0)\n for _ in range(10):\n file_y_test.append(1)\n pred_y_test = []\n for i in range(10):\n c = sum(x==0 for x in pred[50*i:50*(i+1)])\n if c > 48:\n pred_y_test.append(0)\n else:\n pred_y_test.append(1)\n for i in range(10):\n c = sum(x==1 for x in pred[500+50*i:500+50*(i+1)])\n if c > 48:\n pred_y_test.append(1)\n else:\n pred_y_test.append(0)\n return f1_score(file_y_test, pred_y_test)\n\ndef train_model(clf):\n train_time = [[] for i in range(5)]\n prediction_time = [[] for i in range(5)]\n score = [[] for i in range(5)]\n for i in range(1000):\n print(i)\n train_benign, test_benign = generate_benign_data(i)\n train_malware, test_malware = generate_malware_data(i)\n createDataSet(i)\n train_data = pd.read_csv(\"data/train_data\"+str(i)+\".txt\", header=None, sep=\" \").as_matrix()\n test_data = pd.read_csv(\"data/test_data\"+str(i)+\".txt\", header=None, sep=\" \").as_matrix()\n X_train = train_data[:, :-1]\n y_train = train_data[:, -1]\n X_test = test_data[:, :-1]\n for i in range(5):\n start = timeit.default_timer()\n model = clf[i].fit(X_train, y_train)\n end = timeit.default_timer()\n time_to_train = (end - start)*1e3\n train_time[i].append(time_to_train)\n \n start = timeit.default_timer()\n pred = model.predict(X_test)\n end = timeit.default_timer()\n time_to_predict = (end - start)*1e6\n prediction_time[i].append(time_to_predict)\n \n pred = model.predict(X_test)\n if i==0 or i==3:\n score[i].append(get_score_mod(pred))\n else:\n score[i].append(get_score(pred))\n return score, train_time, prediction_time\n\ndef draw_plot(ax, data, edge_color, fill_color):\n bp = ax.boxplot(data, patch_artist=True, sym=\"\")\n for element in ['boxes', 'whiskers', 'medians']:\n plt.setp(bp[element], color=edge_color)\n for patch in bp['boxes']:\n patch.set(facecolor=fill_color)\n\ndef main():\n create_benign_and_malware_files()\n clf = []\n clf.append(MLPClassifier())\n clf.append(LogisticRegression())\n clf.append(GaussianNB())\n clf.append(LinearSVC())\n clf.append(RandomForestClassifier())\n score, train_time, test_time = train_model(clf)\n fig, ax = plt.subplots()\n draw_plot(ax, [score[0], score[1], score[2], score[3], score[4]], 'blue', 'cyan')\n plt.xticks([1, 2, 3, 4, 5], [\"Multilayer Perceptron\", \"Logistic Regression\", \"Gaussian Naive Bayes\", \"Support Vector Machine\", \"Random Forest\"], rotation='vertical')\n plt.ylabel(\"F1-Score\")\n plt.show()\n for i in range(len(score)):\n print(np.mean(score[i]), np.mean(train_time[i]), np.mean(test_time[i]))\n np.savetxt(\"score_\"+str(i)+\".txt\", score[i], fmt=\"%0.10f\")\n np.savetxt(\"train_time_\"+str(i)+\".txt\", train_time[i], fmt=\"%0.10f\")\n np.savetxt(\"test_time_\"+str(i)+\".txt\", test_time[i], fmt=\"%0.10f\")\n# plt.savefig(\"f_scores.png\", dpi=1000, bbox_inches='tight')\n\nif __name__ == \"__main__\":\n main()", "sub_path": "machine_learning/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 8555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.exists", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "parameters.data_file", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "parameters.benign_directory", "line_number": 27, "usage_type": "attribute"}, {"api_name": "parameters.malware_directory", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "parameters.benign_directory", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "parameters.malware_directory", "line_number": 29, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 30, "usage_type": "call"}, {"api_name": "parameters.data_file", "line_number": 30, "usage_type": "attribute"}, {"api_name": "parameters.total_benign", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 32, "usage_type": "call"}, {"api_name": "parameters.benign_directory", "line_number": 32, "usage_type": "attribute"}, {"api_name": "parameters.total_malware", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.savetxt", "line_number": 34, "usage_type": "call"}, {"api_name": "parameters.malware_directory", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "parameters.temporary_directory", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 39, "usage_type": "call"}, {"api_name": "parameters.temporary_directory", "line_number": 39, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 40, "usage_type": "call"}, {"api_name": "parameters.total_benign", "line_number": 40, "usage_type": "attribute"}, {"api_name": "parameters.num_test_benign", "line_number": 40, "usage_type": "attribute"}, {"api_name": "parameters.total_benign", "line_number": 42, "usage_type": "attribute"}, {"api_name": "parameters.temporary_directory", "line_number": 47, "usage_type": "attribute"}, {"api_name": "parameters.temporary_directory", "line_number": 50, "usage_type": "attribute"}, {"api_name": "random.sample", "line_number": 57, "usage_type": "call"}, {"api_name": "parameters.total_malware", "line_number": 57, "usage_type": "attribute"}, {"api_name": "parameters.num_test_malware", "line_number": 57, "usage_type": "attribute"}, {"api_name": "parameters.total_malware", "line_number": 59, "usage_type": "attribute"}, {"api_name": "parameters.temporary_directory", "line_number": 64, "usage_type": "attribute"}, {"api_name": "parameters.temporary_directory", "line_number": 67, "usage_type": "attribute"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 149, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 160, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 161, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 166, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 168, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 172, "usage_type": "call"}, {"api_name": "timeit.default_timer", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 197, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 198, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 201, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 203, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 203, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 207, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 208, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.savetxt", "line_number": 210, "usage_type": "call"}]} +{"seq_id": "250451546", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 6 15:07:11 2017\n\n@author: hwj\n\"\"\"\n\n#import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata=pd.read_csv(\"/hwj/yahoo/python/data/A1Benchmark/real_4.csv\")\nvalue=data.value\nis_anomaly=data.is_anomaly\n\n#with open(\"/hwj/yahoo/python/data/A1Benchmark/real_5.csv\") as f:\n# reader = csv.DictReader(f)\n# value = [float(row['value']) for row in reader]\n \ndef Brute_Force(value,is_anomaly,n):\n# best_so_far_dist=0\n# best_so_far_loc=np.NaN\n T_length=len(value)\n T_dist=[-1]*(T_length-n+1)\n for i in range(0,T_length-n+1):\n nearest_neighbor_dist=np.infty\n for j in range(0,T_length-n+1):\n if abs(i-j)>=n:\n a=np.array(value[i:i+n])\n b=np.array(value[j:j+n])\n dist=np.linalg.norm(a - b)\n if distbest_so_far_dist:\n# best_so_far_dist=nearest_neighbor_dist\n# best_so_far_loc=i\n# print(T_dist)\n print(len(T_dist))\n x1=range(len(T_dist))\n x2=range(len(value))\n plt.figure(1)\n plt.plot(x1,T_dist)\n plt.savefig(\"/hwj/yahoo/python/BruteForce/plot/46_real_dist.jpg\")\n plt.figure(2)\n plt.plot(x2,value)\n for i in x2:\n if is_anomaly[i]==1:\n plt.plot(i,value[i],'ro-')\n plt.savefig(\"/hwj/yahoo/python/BruteForce/plot/46_real.jpg\")\n for i in range(0,len(T_dist)):\n if T_dist[i]>0.2:\n print(T_dist[i],i)\n for i in range(0,len(value)):\n if is_anomaly[i]==1:\n print(i)\n\nif __name__ == \"__main__\":\n Brute_Force(value,is_anomaly,5)\n ", "sub_path": "python/Brute_Force_0.py", "file_name": "Brute_Force_0.py", "file_ext": "py", "file_size_in_byte": 1772, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_csv", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.infty", "line_number": 28, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 45, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 46, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "500187998", "text": "\nimport json\nimport os\nimport shutil\nimport subprocess\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom io import open\n\n\nclass Build(object):\n spec_json = None\n parser = None\n paths = dict()\n\n def __init__(self, app_name, base_dir, settings=None,\n is_validation_required=True):\n self.app_name = app_name\n self.base_dir = base_dir\n self.settings = settings\n self.initiate_build(is_validation_required)\n\n def initiate_build(self, is_validation_required=True):\n # setup build specific paths\n self.setup_paths()\n\n # create settings file if not exists\n self.check_create_settings_file()\n\n # step1 check build dir exists\n # self.check_build_exists()\n\n # step3 validate specs_json\n if is_validation_required:\n # step2 load api_spec.json into spec_json\n self.load_spec_file(self.paths[\"api_specs_json\"])\n\n self.validate_swagger()\n\n # step4 custom spec validation\n self.custom_spec_validator()\n\n # step5 parser swagger specs\n self.parse_swagger_specs()\n\n def setup_paths(self):\n \"\"\"\n defines paths used in the project\n :return:\n \"\"\"\n # TODO: separate out paths based on android, json patch, server_gen.\n app_base_path = os.path.join(self.base_dir, self.app_name)\n build_dir = os.path.join(app_base_path, \"build\")\n api_spec_dir = os.path.join(app_base_path, \"api_specs\")\n api_spec_migrations_dir = os.path.join(api_spec_dir, \"migrations\")\n api_specs_json = os.path.join(api_spec_dir, \"api_spec.json\")\n request_response_dir = os.path.join(build_dir, \"request_response\")\n decorator_options_file = os.path.join(request_response_dir,\n \"decorator_options.py\")\n security_definitions_file = os.path.join(request_response_dir,\n \"security_definitions.py\")\n serializers_base_dir = os.path.join(build_dir, \"serializers\")\n definitions_serializers_base_dir = os.path.join(serializers_base_dir,\n \"definitions\")\n global_parameters_dir = os.path.join(build_dir, \"parameters\")\n global_response_dir = os.path.join(build_dir, \"responses\")\n url_file = os.path.join(build_dir, \"urls.py\")\n mobx_base_dir = os.path.join(build_dir, \"mobx_classes\")\n mobx_base_dir_models = os.path.join(mobx_base_dir, 'models')\n mobx_base_dir_responses = os.path.join(mobx_base_dir, 'responses')\n mobx_base_dir_endpoints = os.path.join(mobx_base_dir, 'endpoints')\n mobx_base_dir_parameters = os.path.join(mobx_base_dir, 'parameters')\n view_environments_dir = os.path.join(build_dir, \"view_environments\")\n sample_json_dir = os.path.join(app_base_path, \"conf\", \"responses\")\n settings_file = os.path.join(app_base_path, \"conf\", \"settings.py\")\n mock_views_dir = os.path.join(build_dir, \"mock_views\")\n views_dir = os.path.join(app_base_path, \"views\")\n api_environment_file = os.path.join(api_spec_dir, \"api_environment.py\")\n android_base_dir = os.path.join(build_dir,\n \"android_%s\" % self.app_name)\n api_doc_dir = os.path.join(build_dir, \"docs\")\n tests_dir = os.path.join(app_base_path, \"tests\")\n global_jars_dir = os.path.join(self.base_dir, \"android_jars\")\n zappa_settings = os.path.join(self.base_dir, \"zappa_settings.json\")\n apidoc = os.path.join(self.base_dir, \"apidoc.json\")\n docs = os.path.join(self.base_dir, \"docs\")\n static = os.path.join(self.base_dir, \"static\")\n static_docs = os.path.join(static, \"docs\")\n interface_dir = os.path.join(app_base_path, 'interfaces')\n service_interface_path = os.path.join(interface_dir, self.app_name\n + '_service_interface.py')\n client_app_base_path = app_base_path+\"_client\"\n client_interface_path = os.path.join(client_app_base_path,\n 'interface.py')\n client_api_client_path = os.path.join(client_app_base_path,\n 'api_client.py')\n client_constants_path = os.path.join(client_app_base_path,\n 'constants.py')\n client_app_init_file = os.path.join(client_app_base_path, '__init__.py')\n base_app_init_file = os.path.join(app_base_path, '__init__.py')\n\n client_setup_py_path = os.path.join(self.base_dir, \"setup.py\")\n client_manifest_path = os.path.join(self.base_dir, \"MANIFEST.in\")\n client_app_base_path_egg_info = client_app_base_path + \".egg-info\"\n pypi_dist_path = os.path.join(self.base_dir, \"dist\")\n package_json = os.path.join(self.base_dir, \"package.json\")\n docs_html_dir = os.path.join(app_base_path, \"docs_html\")\n md_file_name = os.path.join(docs_html_dir, \"docs.md\")\n utils_dir = os.path.join(app_base_path, \"constants\")\n const_file_name = os.path.join(utils_dir, \"generated_enums.py\")\n self.paths = {\n \"base_dir\": self.base_dir,\n \"app_base_path\": app_base_path,\n \"build_dir\": build_dir,\n \"api_spec_dir\": api_spec_dir,\n \"api_spec_migrations_dir\": api_spec_migrations_dir,\n \"api_specs_json\": api_specs_json,\n \"request_response_dir\": request_response_dir,\n \"decorator_options_file\": decorator_options_file,\n \"security_definitions_file\": security_definitions_file,\n \"serializers_base_dir\": serializers_base_dir,\n \"definitions_serializers_base_dir\": definitions_serializers_base_dir,\n \"global_parameters_dir\": global_parameters_dir,\n \"global_response_dir\": global_response_dir,\n \"url_file\": url_file,\n \"view_environments_dir\": view_environments_dir,\n \"sample_json_dir\": sample_json_dir,\n \"settings_file\": settings_file,\n \"mock_views_dir\": mock_views_dir,\n \"views_dir\": views_dir,\n \"api_environment_file\": api_environment_file,\n \"android_base_dir\": android_base_dir,\n \"api_doc_dir\": api_doc_dir,\n \"tests_dir\": tests_dir,\n \"global_jars_dir\": global_jars_dir,\n \"zappa_settings\": zappa_settings,\n \"apidoc\": apidoc,\n \"static\": static,\n \"static_docs\": static_docs,\n \"docs\": docs,\n \"interface_dir\": interface_dir,\n \"service_interface_path\": service_interface_path,\n \"client_app_base_path\": client_app_base_path,\n \"client_interface_path\":client_interface_path,\n \"client_api_client_path\":client_api_client_path,\n \"client_setup_py_path\":client_setup_py_path,\n \"client_app_base_path_egg_info\":client_app_base_path_egg_info,\n \"client_manifest_path\": client_manifest_path,\n \"client_app_init_file\":client_app_init_file,\n \"client_constants_path\": client_constants_path,\n \"base_app_init_file\":base_app_init_file,\n \"pypi_dist_path\":pypi_dist_path,\n \"mobx_base_dir\": mobx_base_dir,\n 'mobx_base_dir_models': mobx_base_dir_models,\n 'mobx_base_dir_responses': mobx_base_dir_responses,\n 'mobx_base_dir_endpoints': mobx_base_dir_endpoints,\n 'mobx_base_dir_parameters': mobx_base_dir_parameters,\n \"package_json\": package_json,\n \"docs_html_dir\": docs_html_dir,\n \"md_file_name\": md_file_name,\n \"const_file_name\": const_file_name\n }\n\n def create_package_json(self):\n \"\"\"\n creates package_json.json file\n :return:\n \"\"\"\n f = open(self.paths[\"package_json\"], \"w\")\n from django_swagger_utils.spec_client.get_package_json import \\\n package_json\n f.write(package_json)\n f.close()\n\n def delete_package_json(self):\n \"\"\"\n deletes package_json file\n :return:\n \"\"\"\n os.remove(self.paths[\"package_json\"])\n\n def install_for_spec(self):\n \"\"\"\n necessary packages for splitting and merging , 3rd package will be called only if splitting is taking place,\n hence will be installed only during splitting process\n :return:\n \"\"\"\n self.create_package_json()\n os.system('npm install json-refs')\n os.system('npm install json2yaml')\n os.system('npm install yamljs')\n os.system(\n 'npm install swagger-split') # package only required while splitting hence being installed here\n self.delete_package_json()\n\n def merge_spec(self):\n \"\"\"\n Merges the spec file if the api_spec folder cotains the spec folder which further contains the spec file as small parts\n divided into directories\n :return:\n \"\"\"\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'],\n self.paths['base_dir'])\n merge_spec.merge()\n\n def split_spec(self):\n \"\"\"\n splits the present api_spec.json into further spec folder divided into smaller bits\n :return:\n \"\"\"\n from django_swagger_utils.spec_client.split_spec import SplitSpec\n from django_swagger_utils.core.utils.check_path_exists import \\\n check_path_exists\n\n if check_path_exists(\n os.path.join(self.paths['api_spec_dir'], \"specs\")):\n from shutil import rmtree\n rmtree(os.path.join(self.paths['api_spec_dir'], \"specs\"))\n split_spec = SplitSpec(self.paths['api_spec_dir'],\n self.paths['base_dir'])\n split_spec.split()\n\n def generate_spec_from_sgui(self):\n\n \"\"\"\n gets the configuration from settings\n verifies the app by checking in swagger apps\n if app is verified, generates build based on the spec file from server\n\n :return:\n \"\"\"\n from django.conf import settings\n\n django_swagger_utils_settings = settings.SWAGGER_UTILS\n swagger_apps = django_swagger_utils_settings['APPS']\n app_settings = swagger_apps[self.app_name]\n\n config_list = self.parse_swagger_gui_config(app_settings)\n\n if not config_list:\n return\n access_token = config_list[0]\n organization_id = config_list[1]\n project_id = config_list[2]\n app_id = config_list[3]\n base_url = config_list[4]\n from django_swagger_utils.swagger_gui.swagger_spec import SwaggerSpec\n swagger_spec = SwaggerSpec(access_token, organization_id, project_id,\n app_id, base_url, self.paths)\n is_app = swagger_spec.verify_app()\n if is_app:\n spec_file = swagger_spec.get_spec_file()\n self.spec_json = spec_file\n self.parse_swagger_specs()\n self.generate_specs_build()\n return\n\n def parse_swagger_gui_config(self, app_settings):\n \"\"\"\n {\n \"SWAGGER_GUI_CONFIG\":{\n \"ACCESS_TOKEN\":\"\",\n \"ORGANIZATION_ID\":1,\n \"PROJECT_ID\":1,\n \"APP_ID\":1,\n \"SERVICE_URL\":\"\"\n }\n }\n :param app_settings:\n :return: [access_token, organization_id, project_id, app_id, base_url]\n \"\"\"\n sgui_config = app_settings.get('SWAGGER_GUI_CONFIG')\n from colored import fg, attr\n\n if not sgui_config:\n print('{}{}{}SGUI_CONFIG not found in settings. Please define the '\n 'configuration!'.format(fg(1), attr(1), attr(4)))\n return None\n\n access_token = sgui_config.get('ACCESS_TOKEN')\n if not access_token:\n print(\n '{}{}{}ACCESS_TOKEN not found in SWAGGER_GUI_CONFIG. Please '\n 'define the configuration!'.format(fg(1), attr(1), attr(4)))\n return None\n\n organization_id = sgui_config.get('ORGANIZATION_ID')\n if not organization_id:\n print(\n '{}{}{}ORGANIZATION_ID not found in SWAGGER_GUI_CONFIG. '\n 'Please define the configuration!'.format(\n fg(1), attr(1), attr(4)))\n return None\n\n project_id = sgui_config.get('PROJECT_ID')\n if not project_id:\n print(\n '{}{}{}PROJECT_ID not found in SWAGGER_GUI_CONFIG. Please define '\n 'the configuration!'.format(fg(1), attr(1), attr(4)))\n return None\n\n app_id = sgui_config.get('APP_ID')\n if not app_id:\n print(\n '{}{}{}APP_ID not found in SWAGGER_GUI_CONFIG. Please define '\n 'the configuration!'.format(fg(1), attr(1), attr(4)))\n return None\n\n base_url = sgui_config.get('SERVICE_URL')\n if not base_url:\n print(\n '{}{}{}SERVICE_URL not found in SWAGGER_GUI_CONFIG. '\n 'Please define the configuration!'.format(\n fg(1), attr(1), attr(4)))\n return None\n\n return [access_token, organization_id, project_id, app_id, base_url]\n\n def sync_spec_from_sgui(self):\n \"\"\"\n parse the swagger gui config\n verify the app using organization id and project id\n update the current project's spec file using server based spec file\n :return:\n \"\"\"\n from django.conf import settings\n\n django_swagger_utils_settings = settings.SWAGGER_UTILS\n swagger_apps = django_swagger_utils_settings['APPS']\n app_settings = swagger_apps[self.app_name]\n config_list = self.parse_swagger_gui_config(app_settings)\n\n if not config_list:\n return\n access_token = config_list[0]\n organization_id = config_list[1]\n project_id = config_list[2]\n app_id = config_list[3]\n base_url = config_list[4]\n from django_swagger_utils.swagger_gui.swagger_spec import SwaggerSpec\n swagger_spec = SwaggerSpec(access_token, organization_id, project_id,\n app_id, base_url, self.paths)\n is_app = swagger_spec.verify_app()\n if is_app:\n swagger_spec.sync_spec_file()\n return\n\n def create_mobx_from_templates(self):\n '''\n This method will create a MobxTemplateGenerator object , which will be helpful to generate\n definitions , responses and endpoints.\n :return:\n '''\n\n from django_swagger_utils.mobx_client.mobx_client import \\\n MobxTemplateGenerator\n mobxtemplategenerator = MobxTemplateGenerator(self.parser,\n self.app_name,\n self.paths[\n 'mobx_base_dir'],\n self.paths)\n mobxtemplategenerator.generate_definitions(\n self.paths['mobx_base_dir_models'])\n mobxtemplategenerator.generate_responses(\n self.paths['mobx_base_dir_responses'])\n mobxtemplategenerator.generate_endpoints(\n self.paths['mobx_base_dir_endpoints'])\n mobxtemplategenerator.generate_parameters(\n self.paths['mobx_base_dir_parameters'])\n\n def add_to_npm(self):\n '''\n Credentials to jfrog are needed to upload the mobx classes as npm package. credentials need to be uploaded\n in ~/.npmrc file.\n :return:\n '''\n self.generate_apidoc_patches()\n vnum = self.get_version()\n\n from django_swagger_utils.mobx_client.mobx_npm_deployment import \\\n MobxNpmDeployment\n mobnpmdeployment = MobxNpmDeployment(self.app_name, self.paths, vnum)\n mobnpmdeployment.delete_previous()\n mobnpmdeployment.create_template()\n mobnpmdeployment.compress_to_npm()\n mobnpmdeployment.delete_previous()\n\n def check_create_settings_file(self):\n \"\"\"\n checks if settings file is present else writes app name to settings file\n :return:\n \"\"\"\n path = self.paths[\"settings_file\"]\n from django_swagger_utils.core.utils.check_path_exists import \\\n check_path_exists\n settings_file = check_path_exists(path)\n if not settings_file:\n settings_file_contents = \"# '%s' settings\" % self.app_name\n from django_swagger_utils.core.utils.write_to_file import \\\n write_to_file\n write_to_file(settings_file_contents, path)\n\n def check_build_exists(self):\n \"\"\"\n checks if build folder exists\n :return:\n \"\"\"\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import \\\n check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\n \"Build Directory Already Exist, please run update_specs_build\")\n\n def generate_interfaces(self, override):\n \"\"\"\n class for Interface Generation and also to generate sample request and response.\n :return:\n \"\"\"\n from django_swagger_utils.interface_client.interface_generator import \\\n InterfaceGenerator\n interface_generator = InterfaceGenerator(self.app_name, self.parser,\n self.paths, override)\n interface_generator.generate_interfaces()\n\n def load_spec_file(self, spec_file):\n \"\"\"\n forms a dict from json and raises exception if not present\n :param spec_file:\n :return:\n \"\"\"\n from django_swagger_utils.core.utils.check_path_exists import \\\n check_path_exists\n spec_file_path = check_path_exists(spec_file)\n # print spec_file_path, spec_file, self.app_name\n if not spec_file_path:\n raise Exception(\"%s missing\" % spec_file)\n with open(spec_file) as f:\n json_text = f.read()\n try:\n self.spec_json = json.loads(json_text)\n except ValueError:\n print(\"The \\\"%s/api_specs/api_spec.json\\\" is not a proper JSON.\" % self.app_name)\n exit(1)\n\n def validate_swagger(self):\n from swagger_spec_validator.util import get_validator\n validator = get_validator(self.spec_json)\n validator.validate_spec(self.spec_json, spec_url='')\n\n def custom_spec_validator(self):\n # todo need to check for unsupported features present in the specs_json\n\n # content-type \"application/json\", \"application/x-www-form-urlencoded\", -- multipart/form-data not supported\n # parameter type \"formData\" not supported\n # custom header parameter name does not match standard http request / response headers\n # path parameters regex must be single group\n # file - parameter types not supported\n # path param value must be single word, no spaces allowed in param name\n # python keywords as key / properties names\n # allOff not supported yet\n # response headers - to _ convertion, naming convertion\n # not allowing 'default' key as response method\n self._validate_group_names_and_operation_ids()\n pass\n\n def _validate_group_names_and_operation_ids(self):\n paths = self.spec_json['paths']\n\n operation_ids = list()\n group_names = list()\n for path, path_dict in paths.items():\n for method, method_dict in path_dict.items():\n if method in [\"get\", \"put\", \"post\", \"delete\", \"options\",\n \"head\", \"patch\"]:\n operation_ids.append(method_dict['operationId'])\n if method_dict.get('x-group', ''):\n group_names.append(method_dict['x-group'])\n\n from django_swagger_utils.drf_server.exceptions import BadRequest\n for group_name in group_names:\n for operation_id in operation_ids:\n if group_name == operation_id:\n raise BadRequest(\n \"group name and operation_id can not be same\")\n\n\n def parse_swagger_specs(self):\n from django_swagger_utils.core.parsers.swagger_parser import \\\n SwaggerParser\n self.parser = SwaggerParser(spec_json=self.spec_json)\n\n def generate_apidoc_patches(self):\n \"\"\"\n generates patches for changes in spec\n :return:\n \"\"\"\n base_path = self.paths[\"api_doc_dir\"]\n from django_swagger_utils.core.utils.mk_dirs import MkDirs\n MkDirs().mk_dir_if_not_exits(file_name=base_path + \"/\")\n\n from django_swagger_utils.apidoc_gen.generators.patch_generator import \\\n PatchGenerator\n\n patch_generator = PatchGenerator(self.app_name, self.parser,\n self.paths, base_path)\n # generating api docs\n patch_generator.generate_json_patch()\n\n def get_version(self):\n \"\"\"\n :return: the version of spec file\n \"\"\"\n import os\n version = 0\n if os.path.exists(self.paths[\"api_spec_migrations_dir\"]):\n version_list = []\n dir_list = os.listdir(self.paths[\"api_spec_migrations_dir\"])\n for dl in dir_list:\n if '_patch.json' in dl:\n version_num = int(dl.replace(\"_patch.json\", \"\"))\n version_list.append(version_num)\n version_list.sort(reverse=False)\n if len(version_list) != 0:\n version = version_list[-1]\n version += 1\n return version\n\n def generate_patch_build(self, domain):\n # TODO change name of def\n \"\"\"\n generates docs for patches\n :param domain:\n :return:\n \"\"\"\n base_path = self.paths[\"api_doc_dir\"]\n self.generate_apidoc_patches()\n from django_swagger_utils.apidoc_gen.generators.patch_generator import \\\n PatchGenerator\n patch_generator = PatchGenerator(self.app_name, self.parser,\n self.paths, base_path)\n patch_generator.filter_for_deleted_apis()\n\n process = subprocess.Popen(['which', 'apidoc'], stdout=subprocess.PIPE)\n\n output = process.communicate()[0]\n if output:\n\n apidoc_json_path = os.path.join(self.paths[\"base_dir\"], \"apidoc.json\")\n if not os.path.exists(apidoc_json_path):\n with open(apidoc_json_path, 'w') as outfile:\n apidoc_content = {\n \"url\": \"https://ib-backend-dev.apigateway.in\",\n \"version\": \"0.0.1\",\n \"description\": \"\",\n \"name\": \"iBHubs_backend API Documentation\",\n \"title\": \"iBHubs_backend Documentation\"}\n json.dump(apidoc_content, outfile, indent=4)\n # by default we assume user is working at no specific branch so we fix\n # url to default above url as above , then we check if any specific parametr is given\n # and replace url with required url\n if domain != '' and domain:\n with open(self.paths[\"apidoc\"]) as src_json:\n apidoc_content = json.load(src_json)\n apidoc_content['url'] = \"https://\" + domain\n with open(self.paths[\"apidoc\"], 'w') as outfile:\n json.dump(apidoc_content, outfile, indent=2)\n # the below command is responsible for creating docs\n\n output_path = os.path.join(self.base_dir, 'docs_{}'.format(self.app_name))\n destination_path = os.path.join(self.base_dir, 'docs', self.app_name)\n\n api_doc_options = ['apidoc', '-i', os.path.join(self.base_dir, self.app_name, 'build', 'docs'),\n '-o',\n output_path,\n '-e', 'django_swagger_utils/*',\n '-e', 'static/*',\n '-e', 'node_modules/*']\n from django.conf import settings\n api_doc_exclude_dirs = getattr(settings,\n 'API_DOC_EXCLUDE_DIRS', [])\n if api_doc_exclude_dirs:\n for each_dir in api_doc_exclude_dirs:\n api_doc_options.extend(['-e', \"{}/*\".format(each_dir)])\n process = subprocess.Popen(api_doc_options, stdout=subprocess.PIPE)\n print(process.communicate()[0])\n shutil.move(output_path, destination_path)\n ################################################\n # hosting apidoc\n ################################################\n # obtaining the path of static folder of django-swagger-utils\n # django_swagger_utils_path = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n # static_folder_path = os.path.join(django_swagger_utils_path, \"static\")\n # import shutil\n # # create a folder apidoc , delete if previously exists\n # if os.path.exists(os.path.join(static_folder_path, \"apidoc\")):\n # shutil.rmtree(os.path.join(static_folder_path, \"apidoc\"))\n # apidoc_path = os.path.join(static_folder_path, \"apidoc\")\n #\n # os.mkdir(apidoc_path)\n\n # from distutils.dir_util import copy_tree\n # copydocs from docs to apidoc in swagger utils\n # try:\n # copy_tree(os.path.join(self.base_dir, 'docs'), apidoc_path)\n # except Exception as err:\n # print err\n\n # browse to localhost:/static/apidoc/index.html\n\n else:\n raise CommandError(\n \"Help: Install apidoc: [ sudo npm install -g apidoc ]\")\n\n def generate_specs_build(self):\n \"\"\"\n generates the elements present in spec file\n :return:\n \"\"\"\n from django_swagger_utils.drf_server.generators.swagger_generator import \\\n SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()\n\n def create_documentation(self, app, counter):\n \"\"\"\n documentation of the spec file - creates a .md file\n \"\"\"\n from django_swagger_utils.core.utils.write_to_file import write_to_file\n from django_swagger_utils.apidoc_gen.generators.markdown_generator import \\\n MarkdownGenerator\n\n filename = self.paths[\"md_file_name\"]\n\n a = \"\"\n if counter == 0:\n a += \"# \" + \"DOCUMENTATION\" + \"\\n\\n\"\n counter += 1\n\n a += \"## \" + app.title() + \" APIs\" + \"\\n\" + \"------\" + \"\\n\\n\"\n write_to_file(a, filename, init_required=False)\n\n base_path = os.path.join(self.base_dir, self.app_name)\n markdown_obj = MarkdownGenerator(self.app_name, self.parser,\n self.paths, base_path)\n filepath = markdown_obj.create_documentation(filename)\n # markdown_obj.convert_to_html(filepath)\n\n def constant_gen(self, override):\n \"\"\"\n creates the constants(.py) file for the variables in the spec file\n \"\"\"\n from django_swagger_utils.core.utils.check_path_exists import \\\n check_path_exists\n from django_swagger_utils.core.utils.write_to_file import write_to_file\n from django_swagger_utils.apidoc_gen.generators.constant_generator_v2 import \\\n ConstantGeneratorV2\n\n file_name = self.paths[\"const_file_name\"]\n file_exists = check_path_exists(file_name)\n\n # create the folder and file if either of them doesn't exist\n if not file_exists:\n write_to_file(\"\", file_name)\n\n # if the file already exists and override command are not given\n if file_exists and not override:\n print(\"can't perform the action as file already exists in the \" + self.app_name + \" app\")\n\n # if the file doesn't exist or override command is given\n if not file_exists or override:\n constant_obj = ConstantGeneratorV2(self.parser, file_name)\n constant_obj.constant_gen()\n\n def android_build(self):\n \"\"\"\n generate and deploys jar file\n firstfind thelates evrsion number following which generate jar and deploy it\n :return:\n \"\"\"\n\n self.generate_apidoc_patches()\n vnum = self.get_version()\n self.android_jar_genaration(vnum)\n self.android_jar_deployment(vnum)\n\n def android_jar_genaration(self, vnum):\n \"\"\"\n generates jar\n :param vnum: version number\n :return:\n \"\"\"\n base_path = self.paths[\"android_base_dir\"]\n from django_swagger_utils.android_client.generators.android_generator import \\\n AndroidGenerator\n android_gen = AndroidGenerator(self.app_name, self.parser, self.paths,\n base_path)\n\n # generating all android models\n android_gen.generate_all_models()\n\n # generating android requests\n android_gen.generate_android_requests_responses()\n\n # generating android server_gen commands\n android_gen.generate_android_server_commands()\n\n # generating jar files\n android_gen.generate_jars(vnum)\n\n def android_jar_deployment(self, vnum):\n \"\"\"\n deploys the jar in the remote artifactory\n :param vnum: version number\n :return:\n \"\"\"\n from django_swagger_utils.android_client.generators.android_deployment import \\\n AndroidJarDeployment\n base_path = self.paths[\"android_base_dir\"]\n android_deploy = AndroidJarDeployment(self.app_name, self.parser,\n self.paths, base_path)\n android_deploy.jar_deployment(vnum)\n\n def android_build_v2(self):\n \"\"\"\n generate and deploys jar file for version 2\n firstfind thelates evrsion number following which generate jar and deploy it\n :return:\n \"\"\"\n self.generate_apidoc_patches()\n vnum = self.get_version()\n self.android_jar_v2_genaration(vnum)\n self.android_jar_v2_deployment(vnum)\n\n def android_jar_v2_genaration(self, vnum):\n \"\"\"\n generates the jar for the spec\n :param vnum: version number\n :return:\n \"\"\"\n base_path = self.paths[\"android_base_dir\"]\n from django_swagger_utils.android_client_v2.generators_v2.android_generator_v2 import \\\n AndroidGeneratorV2\n android_gen = AndroidGeneratorV2(self.app_name, self.parser,\n self.paths, base_path)\n\n # generating all android models\n android_gen.generate_all_models_v2()\n\n # generating android requests\n android_gen.generate_android_requests_responses_v2()\n\n # generating android server_gen commands\n android_gen.generate_android_server_commands_v2()\n\n # generating jar files\n android_gen.generate_jars_v2(vnum)\n\n def android_jar_v2_deployment(self, vnum):\n \"\"\"\n deploys the jar in remote artifactory\n :param vnum: version number\n :return:\n \"\"\"\n base_path = self.paths[\"android_base_dir\"]\n from django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2 import \\\n AndroidJarDeploymentV2\n android_deploy = AndroidJarDeploymentV2(self.app_name, self.parser,\n self.paths, base_path)\n android_deploy.jar_deployment_v2(vnum)\n\n def clean(self):\n \"\"\"\n deletes the build and docs\n :return:\n \"\"\"\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))\n os.system(\"find . -name \\*.pyc -delete\")\n\n @property\n def swagger_generator(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import \\\n SwaggerGenerator\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n return swagger_gen\n\n def generate_api_client_interface(self):\n\n from django_swagger_utils.interface_client.interface_generator import \\\n InterfaceGenerator\n interface_generator = InterfaceGenerator(self.app_name, self.parser,\n self.paths, True)\n interface_generator.generate_interfaces(\n self.paths[\"client_interface_path\"])\n\n def generate_api_client_constants(self):\n\n from django_swagger_utils.apidoc_gen.generators.constant_generator_v2 \\\n import ConstantGeneratorV2\n constant_obj = ConstantGeneratorV2(self.parser,\n self.paths[\"client_constants_path\"])\n constant_obj.constant_gen()\n\n def generate_api_client(self):\n from django_swagger_utils.api_client.api_client_generator import \\\n APIClientGenerator\n api_client_generator = APIClientGenerator(self.app_name, self.parser, self.paths)\n api_client_generator.generate()\n\n def generate_api_client_setup_py(self):\n from django_swagger_utils.api_client.setup_py_generator import \\\n SetupPyGenerator\n setup_py_generator = SetupPyGenerator(self.app_name, self.paths)\n setup_py_generator.setup_template()\n setup_py_generator.generate_init_file()\n\n def deploy_api_client(self):\n\n # dist command\n os.system(\"python setup.py sdist upload -r local\")\n\n # deleting the generated files\n from shutil import rmtree\n try:\n os.rename(self.paths[\"client_manifest_path\"]+\".old\",\n self.paths[\"client_manifest_path\"])\n\n os.rename(self.paths[\"client_setup_py_path\"]+\".old\",\n self.paths[\"client_setup_py_path\"])\n except OSError:\n pass\n try:\n rmtree(self.paths['client_app_base_path'])\n except OSError:\n pass\n try:\n rmtree(self.paths['client_app_base_path_egg_info'])\n\n except OSError:\n pass\n # try:\n # rmtree(self.paths['pypi_dist_path'])\n # except OSError:\n # pass\n\n def generate_deploy_api_client(self):\n\n try:\n os.rename(self.paths[\"client_manifest_path\"],\n self.paths[\"client_manifest_path\"]+\".old\")\n os.rename(self.paths[\"client_setup_py_path\"],\n self.paths[\"client_setup_py_path\"]+\".old\")\n except OSError:\n pass\n\n self.generate_api_client_interface()\n self.generate_api_client()\n self.generate_api_client_constants()\n self.generate_api_client_setup_py()\n self.deploy_api_client()\n\n\nclass Command(BaseCommand):\n can_import_settings = True\n help = 'Generate views and docs from swagger spec files'\n\n def add_arguments(self, parser):\n parser.add_argument('-a', '--apis', action='store_true',\n help='Build API Views')\n parser.add_argument('-t', '--thirdparty', action='store_true',\n help='Build Third Party API Views')\n parser.add_argument('-l', '--lib', action='store_true',\n help='Build Third Party API Views in lib directory for google ape')\n parser.add_argument('-co', '--cons', action='store_true',\n help='Build Constants')\n parser.add_argument('--f', action='store_true', help='Build Constants')\n parser.add_argument('-d', '--docs', action='store_true',\n help='Build Docs')\n parser.add_argument('-md', '--markdown', action='store_true',\n help='Build Documentation')\n\n parser.add_argument('-j', '--jars', action='store_true',\n help='Build Android Jars')\n parser.add_argument('-j2', '--jarsv2', action='store_true',\n help='Build Android Jars V2')\n parser.add_argument('-m', '--mobx3', action='store_true',\n help='To generate mobx classes from templates')\n parser.add_argument('-n', '--npm', action='store_true',\n help='To upload generated mobx classes to npm library')\n parser.add_argument('-j1_gen', '--jars_v1_generation',\n action='store_true',\n help='Build Android Jars Genaration')\n parser.add_argument('-j1_deploy', '--jars_v1_deployment',\n action='store_true',\n help='Build Android Jars Deployment')\n parser.add_argument('-j2_gen', '--jars_v2_generation',\n action='store_true',\n help='Build Android Jars Genaration')\n parser.add_argument('-j2_deploy', '--jars_v2_deployment',\n action='store_true',\n help='Build Android Jars Deployment')\n parser.add_argument('-c', '--clean', action='store_true',\n help='Clean Builds')\n parser.add_argument('-I', '--install', action='store_true',\n help='install requirements for merge file splitting and merging')\n parser.add_argument('-M', '--merge', action='store_true',\n help='Merge the spec file structure present in spec folder in api_spec folder')\n parser.add_argument('-S', '--split', action='store_true',\n help='Split the present api_spec.json into further folders')\n parser.add_argument('app', nargs='*', type=str)\n parser.add_argument('-i', '--interfaces', action='store_true',\n help='generate interfaces from spec files')\n parser.add_argument('-b', nargs=1, type=str)\n parser.add_argument('-sc', '--sync_spec', action='store_true',\n help='If api spec needs to sync with dev')\n parser.add_argument('-sb', '--sgui_build', action='store_true',\n help='generate api spec from swagger gui spec files')\n\n parser.add_argument('-api_client', '--api_client', action='store_true',\n help='generate api client from spec files')\n\n def handle(self, *args, **options):\n '''\n Handles the concerned activity\n :param args: aruguments user give in command line\n :param options: options to arguments given\n :return:\n '''\n\n from django.conf import settings\n import os\n base_dir = settings.BASE_DIR\n # obtain path of zappa_settings\n zappa_settings = os.path.join(base_dir, \"zappa_settings.json\")\n # set default domain as empty string\n domain = ''\n django_swagger_utils_settings = settings.SWAGGER_UTILS\n swagger_apps = list(django_swagger_utils_settings['APPS'].keys())\n\n third_party_swagger_apps = getattr(settings,\n 'THIRD_PARTY_SWAGGER_APPS', [])\n # if domain specific url is required\n if options['b']:\n # check for existence of zappas_settings.json\n if os.path.exists(zappa_settings):\n with open(zappa_settings) as src_json:\n zappa_settings_dict = json.load(src_json)\n # checking if given branch exists\n if options['b'][0] in zappa_settings_dict:\n # replacing defaul domain with branch domain\n req_branch = options['b'][0]\n domain = zappa_settings_dict[req_branch]['domain']\n else:\n\n # terminating\n print(\"Given branch %s is not found\" % options['b'][0])\n exit(1)\n else:\n print(\"zappa_settings.json not found\")\n exit(1)\n\n try:\n apps = options['app']\n if not apps:\n apps = swagger_apps\n\n for app in apps:\n if app in swagger_apps:\n if options['sgui_build']:\n # if option is to build the swagger spec from server\n # validation not required because there might not be any spec file\n build = Build(app, base_dir,\n django_swagger_utils_settings,\n is_validation_required=False)\n build.generate_spec_from_sgui()\n else:\n build = Build(app, base_dir,\n django_swagger_utils_settings)\n\n if options['apis'] or options['docs']:\n build.clean()\n\n counter = 0\n # calling the concerned build methods for each app\n for app in apps:\n if app in swagger_apps:\n if options['sgui_build']:\n # if option is to build the swagger spec from server\n # validation not required because there might not be any spec file\n build = Build(app, base_dir,\n django_swagger_utils_settings,\n is_validation_required=False)\n build.generate_spec_from_sgui()\n else:\n build = Build(app, base_dir,\n django_swagger_utils_settings)\n\n if options['sync_spec']:\n build.sync_spec_from_sgui()\n\n if options['apis']:\n build.generate_specs_build()\n\n if options['docs']:\n build.generate_patch_build(domain)\n\n if options['markdown']:\n build.create_documentation(app, counter)\n\n if options['cons']:\n override = False\n if options['f']:\n override = True\n\n build.constant_gen(override)\n\n if options['jars']:\n Build(app, base_dir,\n django_swagger_utils_settings).android_build()\n if options['jarsv2']:\n Build(app, base_dir,\n django_swagger_utils_settings).android_build_v2()\n\n if options['jars_v1_generation']:\n vnum = build.get_version()\n build.android_jar_genaration(vnum)\n if options['jars_v1_deployment']:\n vnum = build.get_version()\n build.android_jar_deployment(vnum)\n if options['jars_v2_generation']:\n vnum = build.get_version()\n build.android_jar_v2_genaration(vnum)\n if options['jars_v2_deployment']:\n vnum = build.get_version()\n build.android_jar_v2_deployment(vnum)\n if options['clean']:\n build.clean()\n if options['interfaces']:\n override = False\n if options['f']:\n override = True\n build.generate_interfaces(override)\n if options['api_client']:\n from colored import fg, attr\n print(\"{}{}{}Generating API clients \"\n \"for : {}\".format(fg(2), attr(1), attr(4), app))\n build.generate_deploy_api_client()\n if options['mobx3']:\n # to generate mobx classes in a folder mobx_classes in build folder\n build.create_mobx_from_templates()\n if options['npm']:\n # to deploy the generated mobx classes under the name ib_appname_mobx\n build.add_to_npm()\n\n if options['install']:\n # ->install necessary packages for merging and splitting the spec file\n build.install_for_spec()\n if options['merge']:\n # ->merge the spec file which is present in parts in api_spec/specs folder\n build.merge_spec()\n if options['split']:\n # ->split the spec file api_spec.json to specs/ folder\n build.split_spec()\n\n else:\n print (\n \"Ignoring %s app. Please add it in SWAGGER_UTILS['APPS'] first.\")\n\n if options['thirdparty']:\n\n for third_party_app in third_party_swagger_apps:\n\n third_party_base_dir = None\n if options[\"lib\"]:\n third_party_base_dir = base_dir + \"/lib\"\n else:\n try:\n third_party_base_dir = os.path.abspath(\n os.path.join(__import__(third_party_app).\n __path__[0], os.pardir))\n except ImportError:\n raise\n\n build = Build(app_name=third_party_app,\n base_dir=third_party_base_dir)\n build.clean()\n build.generate_specs_build()\n except Exception as err:\n print(err)\n raise\n\n\n\"\"\"\nOpen API Specs (swagger.io) - DRF Server - IB Group\n\n1. git clone https://bitbucket.org/rahulsccl/ib_service/\n2. pip install -r requirements.txt\n3. Open API Specs defined for app_b in app_b/api_specs/api_spec.json Ref\n[Open API Specs](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md)\n4. python common/swagger/utils/management/build_common.py [ this will generate spces base api server_gen ]\n5. manage.py test .build.tests\n6. python manage.py runserver\n7. look at 127.0.0.1:8000/api/app_b/user/1234/\n\"\"\"\n", "sub_path": "lib/python3.8/site-packages/django_swagger_utils/management/commands/build.py", "file_name": "build.py", "file_ext": "py", "file_size_in_byte": 47399, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.path.join", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 60, "usage_type": "call"}, {"api_name": "os.path", "line_number": 60, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path", "line_number": 63, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 65, "usage_type": "call"}, {"api_name": "os.path", "line_number": 65, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 77, "usage_type": "call"}, {"api_name": "os.path", "line_number": 77, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 87, "usage_type": "call"}, {"api_name": "os.path", "line_number": 87, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 95, "usage_type": "call"}, {"api_name": "os.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path", "line_number": 97, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path", "line_number": 100, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 105, "usage_type": "call"}, {"api_name": "os.path", "line_number": 105, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path", "line_number": 106, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 109, "usage_type": "call"}, {"api_name": "os.path", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 169, "usage_type": "call"}, {"api_name": "django_swagger_utils.spec_client.get_package_json.package_json", "line_number": 172, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 180, "usage_type": "call"}, {"api_name": "os.system", "line_number": 189, "usage_type": "call"}, {"api_name": "os.system", "line_number": 190, "usage_type": "call"}, {"api_name": "os.system", "line_number": 191, "usage_type": "call"}, {"api_name": "os.system", "line_number": 192, "usage_type": "call"}, {"api_name": "django_swagger_utils.spec_client.merge_spec.MergeSpec", "line_number": 203, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.check_path_exists.check_path_exists", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path", "line_number": 217, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 219, "usage_type": "call"}, {"api_name": "os.path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "django_swagger_utils.spec_client.split_spec.SplitSpec", "line_number": 220, "usage_type": "call"}, {"api_name": "django.conf.settings.SWAGGER_UTILS", "line_number": 235, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 235, "usage_type": "name"}, {"api_name": "django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec", "line_number": 249, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 278, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 278, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 285, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 285, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 293, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 293, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 300, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 300, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 307, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 307, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 315, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 315, "usage_type": "call"}, {"api_name": "django.conf.settings.SWAGGER_UTILS", "line_number": 329, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 329, "usage_type": "name"}, {"api_name": "django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec", "line_number": 342, "usage_type": "call"}, {"api_name": "django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator", "line_number": 358, "usage_type": "call"}, {"api_name": "django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment", "line_number": 383, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.check_path_exists.check_path_exists", "line_number": 397, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.write_to_file.write_to_file", "line_number": 402, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.check_path_exists.check_path_exists", "line_number": 412, "usage_type": "call"}, {"api_name": "django_swagger_utils.interface_client.interface_generator.InterfaceGenerator", "line_number": 424, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.check_path_exists.check_path_exists", "line_number": 436, "usage_type": "call"}, {"api_name": "io.open", "line_number": 440, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 443, "usage_type": "call"}, {"api_name": "swagger_spec_validator.util.get_validator", "line_number": 450, "usage_type": "call"}, {"api_name": "django_swagger_utils.drf_server.exceptions.BadRequest", "line_number": 486, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.parsers.swagger_parser.SwaggerParser", "line_number": 493, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.mk_dirs.MkDirs", "line_number": 502, "usage_type": "call"}, {"api_name": "django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 518, "usage_type": "call"}, {"api_name": "os.path", "line_number": 518, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 520, "usage_type": "call"}, {"api_name": "django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator", "line_number": 542, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 546, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 546, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 551, "usage_type": "call"}, {"api_name": "os.path", "line_number": 551, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 552, "usage_type": "call"}, {"api_name": "os.path", "line_number": 552, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 553, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 560, "usage_type": "call"}, {"api_name": "io.open", "line_number": 565, "usage_type": "call"}, {"api_name": "json.load", "line_number": 566, "usage_type": "call"}, {"api_name": "io.open", "line_number": 568, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 569, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 572, "usage_type": "call"}, {"api_name": "os.path", "line_number": 572, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 573, "usage_type": "call"}, {"api_name": "os.path", "line_number": 573, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 575, "usage_type": "call"}, {"api_name": "os.path", "line_number": 575, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 582, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 587, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 587, "usage_type": "attribute"}, {"api_name": "shutil.move", "line_number": 589, "usage_type": "call"}, {"api_name": "django.core.management.base.CommandError", "line_number": 614, "usage_type": "call"}, {"api_name": "django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator", "line_number": 625, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.write_to_file.write_to_file", "line_number": 653, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 655, "usage_type": "call"}, {"api_name": "os.path", "line_number": 655, "usage_type": "attribute"}, {"api_name": "django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator", "line_number": 656, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.check_path_exists.check_path_exists", "line_number": 672, "usage_type": "call"}, {"api_name": "django_swagger_utils.core.utils.write_to_file.write_to_file", "line_number": 676, "usage_type": "call"}, {"api_name": "django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2", "line_number": 684, "usage_type": "call"}, {"api_name": "django_swagger_utils.android_client.generators.android_generator.AndroidGenerator", "line_number": 708, "usage_type": "call"}, {"api_name": "django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment", "line_number": 732, "usage_type": "call"}, {"api_name": "django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2", "line_number": 756, "usage_type": "call"}, {"api_name": "django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2", "line_number": 780, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 789, "usage_type": "call"}, {"api_name": "os.path", "line_number": 789, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 790, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 791, "usage_type": "call"}, {"api_name": "os.path", "line_number": 791, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 791, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 792, "usage_type": "call"}, {"api_name": "os.path", "line_number": 792, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 793, "usage_type": "call"}, {"api_name": "django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator", "line_number": 799, "usage_type": "call"}, {"api_name": "django_swagger_utils.interface_client.interface_generator.InterfaceGenerator", "line_number": 806, "usage_type": "call"}, {"api_name": "django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2", "line_number": 815, "usage_type": "call"}, {"api_name": "django_swagger_utils.api_client.api_client_generator.APIClientGenerator", "line_number": 822, "usage_type": "call"}, {"api_name": "django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator", "line_number": 828, "usage_type": "call"}, {"api_name": "os.system", "line_number": 835, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 840, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 843, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 848, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 852, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 864, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 866, "usage_type": "call"}, {"api_name": "django.core.management.base.BaseCommand", "line_number": 878, "usage_type": "name"}, {"api_name": "django.conf.settings.BASE_DIR", "line_number": 947, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 947, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 949, "usage_type": "call"}, {"api_name": "os.path", "line_number": 949, "usage_type": "attribute"}, {"api_name": "django.conf.settings.SWAGGER_UTILS", "line_number": 952, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 952, "usage_type": "name"}, {"api_name": "django.conf.settings", "line_number": 955, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 960, "usage_type": "call"}, {"api_name": "os.path", "line_number": 960, "usage_type": "attribute"}, {"api_name": "io.open", "line_number": 961, "usage_type": "call"}, {"api_name": "json.load", "line_number": 962, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 987, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 992, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 1005, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 1010, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 1033, "usage_type": "call"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 1036, "usage_type": "call"}, {"api_name": "colored.fg", "line_number": 1061, "usage_type": "call"}, {"api_name": "colored.attr", "line_number": 1061, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 1093, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1093, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 1094, "usage_type": "call"}, {"api_name": "os.path", "line_number": 1094, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 1095, "usage_type": "attribute"}, {"api_name": "{'package_json': 'django_swagger_utils.spec_client.get_package_json.package_json', 'MergeSpec': 'django_swagger_utils.spec_client.merge_spec.MergeSpec', 'SplitSpec': 'django_swagger_utils.spec_client.split_spec.SplitSpec', 'check_path_exists': 'django_swagger_utils.core.utils.check_path_exists.check_path_exists', 'rmtree': 'shutil.rmtree', 'settings': 'django.conf.settings', 'SwaggerSpec': 'django_swagger_utils.swagger_gui.swagger_spec.SwaggerSpec', 'fg': 'colored.fg', 'attr': 'colored.attr', 'MobxTemplateGenerator': 'django_swagger_utils.mobx_client.mobx_client.MobxTemplateGenerator', 'MobxNpmDeployment': 'django_swagger_utils.mobx_client.mobx_npm_deployment.MobxNpmDeployment', 'write_to_file': 'django_swagger_utils.core.utils.write_to_file.write_to_file', 'InterfaceGenerator': 'django_swagger_utils.interface_client.interface_generator.InterfaceGenerator', 'get_validator': 'swagger_spec_validator.util.get_validator', 'BadRequest': 'django_swagger_utils.drf_server.exceptions.BadRequest', 'SwaggerParser': 'django_swagger_utils.core.parsers.swagger_parser.SwaggerParser', 'MkDirs': 'django_swagger_utils.core.utils.mk_dirs.MkDirs', 'PatchGenerator': 'django_swagger_utils.apidoc_gen.generators.patch_generator.PatchGenerator', 'os': 'os', 'SwaggerGenerator': 'django_swagger_utils.drf_server.generators.swagger_generator.SwaggerGenerator', 'MarkdownGenerator': 'django_swagger_utils.apidoc_gen.generators.markdown_generator.MarkdownGenerator', 'ConstantGeneratorV2': 'django_swagger_utils.apidoc_gen.generators.constant_generator_v2.ConstantGeneratorV2', 'AndroidGenerator': 'django_swagger_utils.android_client.generators.android_generator.AndroidGenerator', 'AndroidJarDeployment': 'django_swagger_utils.android_client.generators.android_deployment.AndroidJarDeployment', 'AndroidGeneratorV2': 'django_swagger_utils.android_client_v2.generators_v2.android_generator_v2.AndroidGeneratorV2', 'AndroidJarDeploymentV2': 'django_swagger_utils.android_client_v2.generators_v2.android_deployment_v2.AndroidJarDeploymentV2', 'APIClientGenerator': 'django_swagger_utils.api_client.api_client_generator.APIClientGenerator', 'SetupPyGenerator': 'django_swagger_utils.api_client.setup_py_generator.SetupPyGenerator'}", "line_number": 1099, "usage_type": "call"}]} +{"seq_id": "89711213", "text": "from pygments.lexer import RegexLexer\nfrom pygments.token import (Comment, Keyword, Text, Name, String)\n\n\nclass RysGitLexer(RegexLexer):\n builtins = [\n 'add',\n 'am',\n 'apply',\n 'archive',\n 'bisect',\n 'blame',\n 'branch',\n 'bundle',\n 'cat-file',\n 'checkout',\n 'cherry-pick',\n 'citool',\n 'clean',\n 'clone',\n 'commit-tree',\n 'commit',\n 'config',\n 'describe',\n 'diff',\n 'difftool',\n 'fetch',\n 'format-patch',\n 'fsck',\n 'gc',\n 'grep',\n 'gui',\n 'help',\n 'init',\n 'log',\n 'ls-tree',\n 'merge',\n 'mv',\n 'notes',\n 'pull',\n 'push',\n 'read-tree',\n 'rebase',\n 'reflog',\n 'remote',\n 'request-pull',\n 'rerere',\n 'reset',\n 'revert',\n 'rm',\n 'send-email',\n 'shortlog',\n 'show',\n 'show-branch',\n 'stash',\n 'status',\n 'svn',\n 'submodule',\n 'tag',\n 'update-index',\n 'update-server-info',\n 'write-tree',\n ]\n tokens = {\n 'root': [\n (r'#.*', Comment),\n (r'[a-zA-Z][a-zA-Z0-9_\\-]*', Keyword, 'args'),\n ],\n 'args': [\n (r'[ ]+', Text),\n (r'\\n', Text, 'root'),\n ('|'.join(builtins), Keyword), # Could be a Builtin\n (r'--?\\w+', Name.Attribute),\n (r'(\\'|\").+(\\'|\")', String),\n (r'\\S+', Name),\n ]\n }\n", "sub_path": "rydown/codesyntax/RysGitLexer.py", "file_name": "RysGitLexer.py", "file_ext": "py", "file_size_in_byte": 1590, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygments.lexer.RegexLexer", "line_number": 5, "usage_type": "name"}, {"api_name": "pygments.token.Comment", "line_number": 66, "usage_type": "name"}, {"api_name": "pygments.token.Keyword", "line_number": 67, "usage_type": "name"}, {"api_name": "pygments.token.Text", "line_number": 70, "usage_type": "name"}, {"api_name": "pygments.token.Text", "line_number": 71, "usage_type": "name"}, {"api_name": "pygments.token.Keyword", "line_number": 72, "usage_type": "name"}, {"api_name": "pygments.token.Name.Attribute", "line_number": 73, "usage_type": "attribute"}, {"api_name": "pygments.token.Name", "line_number": 73, "usage_type": "name"}, {"api_name": "pygments.token.String", "line_number": 74, "usage_type": "name"}, {"api_name": "pygments.token.Name", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "92410679", "text": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport math\r\n\r\n# loop de loop? # Yes.\r\nL = 10\r\ndiam = 10\r\nA = np.pi*L*diam\r\nRa = 100\r\nv_init = -70\r\nlong = True\r\n \r\n# Values of membrane capacitance (parameters):\r\nif long==True:\r\n cms = [0.01,0.1,0.5,0.8,1.0,1.2,1.5,2.0,3.0,5.0,7.0,10.0,15.0]\r\nelse:\r\n cms = [0.01,0.1,0.5,0.8,1.0,1.2,1.5,2.0]\r\noutcms = []\r\nouttaus = []\r\n\r\n# Change current:\r\nidur = 1000 # ms\r\niamp = -0.1 # nA \r\nidelay = 10 \r\ntestit = False # True # If I am unsure about the results, I can check the fit.\r\n\r\nfolder = 'Results/IStim/Soma%i/current_idur%i_iamp'% (L,idur)+str(iamp)+'/'\r\noutfilename = folder +'somaonly_cms_idur%i_iamp' % idur+str(iamp)+'_Ra'+str(Ra)+'_vinit'+str(v_init)+'_pas_Cm'\r\noutfilename_tau = folder +'somaonly_cms_idur%i_iamp' % idur+str(iamp)+'_Ra'+str(Ra)+'_vinit'+str(v_init)+'_pas_tau'\r\nif long==True:\r\n plotname = outfilename + '_highCms.png'\r\n plotname_tau = outfilename_tau + '_highCms.png'\r\n outfilename = outfilename + '_highCms.txt'\r\n outfilename_tau = outfilename_tau + '_highCms.txt'\r\nelse:\r\n plotname = outfilename + '.png'\r\n plotname_tau = outfilename_tau + '.png'\r\n outfilename = outfilename + '.txt'\r\n outfilename_tau = outfilename_tau + '.txt'\r\noutfile = open(outfilename,'w')\r\noutfile_tau = open(outfilename_tau,'w')\r\n\r\nfor cm in cms:\r\n infilename = folder+'somaonly_cm'+str(cm)+'_idur%i_iamp' % idur+str(iamp)+'_Ra'+str(Ra)+'_vinit'+str(v_init)+'_pas_V.txt'\r\n infile = open(infilename,'r')\r\n lines = infile.readlines()\r\n N = len(lines) # Do I need this?\r\n \r\n time = []\r\n V = []\r\n Ca = []\r\n \r\n for line in lines:\r\n words = line.split()\r\n time.append(float(words[0]))\r\n V.append(float(words[1]))\r\n infile.close()\r\n \r\n Vmax = max(V)\r\n Vmin = min(V)\r\n dV = Vmax-Vmin\r\n dV1e = dV*math.exp(-1)\r\n V1e = Vmin+dV1e\r\n print('Vmax:',Vmax)\r\n print('Vmin:',Vmin)\r\n print('V1e:',V1e)\r\n\r\n # Will perform linear interpolation near the crossing with V1e.\r\n tbef = 0\r\n taft = 0\r\n Vbef = 0\r\n Vaft = 0\r\n for i in range(len(V)):\r\n if V[i]-V1e<0:\r\n tbef = time[i-1]\r\n taft = time[i]\r\n Vbef = V[i-1]\r\n Vaft = V[i]\r\n print('V:',V[i-1],'i-1:',i-1,'; t:', time[i-1])\r\n print('V:',V[i],'i:',i,'; t:', time[i])\r\n break\r\n \r\n a = (Vaft-Vbef)/(taft-tbef)\r\n t_interpolated = (V1e-Vbef+a*tbef)/a-idelay\r\n \r\n print('tbef',tbef)\r\n print('taft',taft)\r\n print('t_interpolated',t_interpolated)\r\n \r\n tau1 = tbef-idelay\r\n tau2 = taft-idelay\r\n tau3 = t_interpolated\r\n \r\n print('dV+Vmax:',dV+Vmax)\r\n testit=True\r\n if testit==True:\r\n N = 100*idur\r\n timecut = np.linspace(0,idur,N)\r\n fit1 = np.zeros(N)\r\n fit2 = np.zeros(N)\r\n fit3 = np.zeros(N)\r\n for i in range(N):\r\n fit1[i] = dV*math.exp(-timecut[i]/tau1)+Vmin\r\n fit2[i] = dV*math.exp(-timecut[i]/tau2)+Vmin\r\n fit3[i] = dV*math.exp(-timecut[i]/tau3)+Vmin\r\n timecut = np.linspace(idelay,idur+idelay,N)\r\n \r\n plt.figure(figsize=(6,5))\r\n plt.plot(time,V, label='Data')\r\n plt.plot(timecut,fit1, 'r--', label='tbef')\r\n plt.plot(timecut,fit2, 'g--', label='taft')\r\n plt.plot(timecut,fit3, 'b--', label='tinterpolated')\r\n plt.plot(tau1+idelay,V1e, 'ro', label='tbef')\r\n plt.plot(tau2+idelay,V1e, 'go', label='taft')\r\n plt.plot(tau3+idelay,V1e, 'bo', label='tinterpolated')\r\n plt.xlabel(r'$t$ [ms]')\r\n plt.ylabel(r'$V$ [mV]')\r\n plt.title(r'$V$ vs $t$')\r\n plt.tight_layout()\r\n plt.axis([idelay-0.5,idelay+10,min(V)-0.01,max(V)+0.01])\r\n plt.legend(loc='upper right')\r\n plt.show()\r\n \r\n tau = tau3\r\n \r\n # Conversions (to SI):\r\n iamp_SI=iamp*1e-9 # Current now in Amperes\r\n tau*=1e-3 # Time now in seconds\r\n \r\n dV*= 1e-3 # Voltage now in Volts\r\n R = abs(dV/iamp_SI) # Resistance in Ohms\r\n C = tau/R # Capacitance in Farads\r\n Cm = C/A # Specific capacitance. C in Farads, A in (mu m)^2\r\n Cm*= 1e14 # Conversion to get muF/cm^2\r\n \r\n outcms.append(Cm)\r\n outtaus.append(tau)\r\n outfile.write('%.2f %.12f\\n' % (cm,Cm))\r\n outfile_tau.write('%.2f %.12f\\n' % (cm,tau))\r\noutfile.close()\r\n\r\nplt.figure(figsize=(6,5))\r\nplt.plot(cms,outcms,'-o')\r\nplt.xlabel(r'$C_m$ [$\\mu$F/cm$^2$] (Cell parameter)')\r\nplt.ylabel(r'$C_m$ [$\\mu$F/cm$^2$] (Measured value)')\r\nplt.title(r'Measured $C_m$ vs cell parameter $C_m$')\r\nplt.tight_layout()\r\nplt.savefig(plotname)\r\n\r\nplt.figure(figsize=(6,5))\r\nplt.plot(cms,outtaus,'-o')\r\nplt.xlabel(r'$C_m$ [$\\mu$F/cm$^2$] (Cell parameter)')\r\nplt.ylabel(r'$\\tau_m$ [ms] (Measured value)')\r\nplt.title(r'Measured $\\tau_m$ vs cell parameter $C_m$')\r\nplt.tight_layout()\r\nplt.savefig(plotname_tau)", "sub_path": "P3_NEURON/findCm_manual_somaonly_pas_loop_setsize.py", "file_name": "findCm_manual_somaonly_pas_loop_setsize.py", "file_ext": "py", "file_size_in_byte": 4987, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "numpy.pi", "line_number": 8, "usage_type": "attribute"}, {"api_name": "math.exp", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 101, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 103, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 104, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 108, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}]} +{"seq_id": "363632412", "text": "import fetch_usercodes\nimport token_type_analyzer\nimport config\nimport json\nimport os\n\ndef main():\n lectures = fetch_usercodes.fetch_lectures(config.COURSE['course_id'])\n\n for lecture in lectures:\n lecture_id = lecture['id']\n print('Lecture(%d): %s' % (lecture['id'], lecture['title']))\n for exercise in lecture['exercises']:\n exercise_id = exercise['id']\n print('* Exercise(%d): %s' % (exercise['id'], exercise['title']))\n user_token_types = {}\n json_filename = './data/user_token_types_%d.json' % exercise_id\n if os.path.exists(json_filename):\n continue\n for doc in fetch_usercodes.fetch_docs_iter(lecture_id, exercise_id):\n print('.', end=\"\", flush=True)\n user_id = doc['user_id']\n filename = doc['filename']\n if user_id not in user_token_types:\n user_token_types[user_id] = {}\n if exercise_id not in user_token_types[user_id]:\n user_token_types[user_id][exercise_id] = {}\n if filename not in user_token_types[user_id][exercise_id]:\n user_token_types[user_id][exercise_id][filename] = {}\n for ver in doc['versions_iter']:\n content = ver['content']\n token_type_analyzer.get_java_token_stats(content)\n user_token_types[user_id][exercise_id][filename][ver['version']] = {\n 'timestamp': ver['timestamp'],\n 'version': ver['version'],\n 'token_stats': token_type_analyzer.get_java_token_stats(content)\n }\n print('')\n with open(json_filename, 'w') as f:\n json.dump(user_token_types, f)\n \n\nif __name__ == '__main__':\n main()", "sub_path": "parse_token_types.py", "file_name": "parse_token_types.py", "file_ext": "py", "file_size_in_byte": 1876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "fetch_usercodes.fetch_lectures", "line_number": 8, "usage_type": "call"}, {"api_name": "config.COURSE", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "fetch_usercodes.fetch_docs_iter", "line_number": 20, "usage_type": "call"}, {"api_name": "token_type_analyzer.get_java_token_stats", "line_number": 32, "usage_type": "call"}, {"api_name": "token_type_analyzer.get_java_token_stats", "line_number": 36, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "58544342", "text": "#! python3\n# chase.py\n\n# Coding the Chase sequence of the game.\n\n# Create a map, preferably a maze, make three exits, it does not need to perfectly match the text game.\n\n# The player must chase and hit the runaway to catch them.\n\n# Make sure there are enough places to go so that the player has a chance to catch the runaway.\n\n# Create a mini AI for the runaway so that he isn’t just moving in relation to the player.\n\nimport pygame\nimport escape\n\npygame.init()\n\n# CONSTANTS --------\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 600\nSCREEN_SIZE = (SCREEN_WIDTH, SCREEN_HEIGHT)\nWINDOW_TITLE = \"Chase\"\nREFRESH_RATE = 60\nBGCOLOUR = (16, 16, 16)\nWHITE = (0xFF, 0xFF, 0xFF)\nBLACK = ( 0x0, 0x0, 0x0)\nRED = (0xFF, 0x0, 0x0)\nGREEN = ( 0x0, 0xFF, 0x0)\nBLUE = ( 0x0, 0x0, 0xFF)\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n\n self.image = pygame.Surface([20, 20])\n self.image.fill(RED)\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.walls = None\n\n self.vel_x = 0\n self.vel_y = 0\n\n \n\n def update(self):\n \n self.rect.x += self.vel_x\n wall_hit_list = pygame.sprite.spritecollide(\n self, self.walls, False\n )\n\n for wall in wall_hit_list:\n \n if self.vel_x > 0:\n self.rect.right = wall.rect.left\n else:\n self.rect.left = wall.rect.right\n\n self.rect.y += self.vel_y\n\n wall_hit_list = pygame.sprite.spritecollide(\n self, self.walls, False\n )\n\n for wall in wall_hit_list:\n if self.vel_y > 0:\n self.rect.bottom = wall.rect.top\n else:\n self.rect.top = wall.rect.bottom\n\n def change_vel(self, x, y):\n self.vel_x += x\n self.vel_y += y\n\nclass Wall(pygame.sprite.Sprite):\n def __init__(self, x, y, width, height):\n super().__init__()\n\n self.image = pygame.Surface([width, height])\n self.image.fill(BLUE)\n\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n\nclass Enemy(pygame.sprite.Sprite):\n \n # where the enemy code will be placed.\n def __init__(self, x, y):\n super().__init__()\n\n self.image = pygame.Surface([20, 20])\n self.image.fill(WHITE)\n self.velocity = 3\n self.rect = self.image.get_rect()\n\n self.rect.x = x\n self.rect.y = y\n\n self.wall = None\n\n self.direction = \"right\"\n\n self.velx = 0\n self.vely = 0\n\n \n\n\n # def update(self):\n \n # define the way the enemy moves in the world.\n # set out a path system to give the enemy a list of choices \n # so that it can select one of the paths to choose\n def update(self):\n self.rect.x += self.velx\n wall_hit = pygame.sprite.spritecollide(\n self, self.wall, False\n )\n\n for walls in wall_hit:\n\n if self.velx > 0:\n self.rect.right = walls.rect.left\n self.direction = \"up\"\n else:\n self.rect.left = self.rect.right\n self.direction = \"right\"\n \n self.rect.y += self.vely\n wall_hit = pygame.sprite.spritecollide(\n self, self.wall, False\n )\n\n for walls in wall_hit:\n if self.vely > 0:\n self.rect.bottom = walls.rect.top\n self.direction = \"left\"\n else:\n self.rect.top = walls.rect.bottom\n self.direction = \"down\"\n\n if self.direction == \"up\":\n self.vely = -self.velocity\n \n\n elif self.direction == \"down\":\n self.vely = self.velocity\n \n\n elif self.direction == \"right\":\n self.velx = self.velocity\n\n\n elif self.direction == \"left\":\n self.velx = -self.velocity\n \n def change_velocity(self, x, y):\n self.vely += y\n self.velx += x\n \n\n\n\ndef mains():\n\n PLAYER_SPEED = 5\n # LOCAL variables ----------\n screen = pygame.display.set_mode(SCREEN_SIZE)\n clock = pygame.time.Clock()\n done = False\n pygame.display.set_caption(WINDOW_TITLE)\n\n map_sprite_list = pygame.sprite.Group()\n all_sprite_list = pygame.sprite.Group()\n enemy_list = pygame.sprite.Group()\n\n wall_top = Wall(10, SCREEN_HEIGHT-10, SCREEN_WIDTH-20, 10)\n wall_bottom = Wall(0, 0, 10, 600)\n wall_side = Wall(10, 0, 790, 10)\n wall_side2 = Wall(SCREEN_WIDTH-10, 0, 10, SCREEN_HEIGHT)\n\n \n \n\n map_sprite_list.add(wall_top, wall_bottom, wall_side, wall_side2)\n all_sprite_list.add(wall_top, wall_bottom, wall_side, wall_side2)\n\n player = Player(50, 50)\n\n player.walls = map_sprite_list\n all_sprite_list.add(player)\n\n enemy = Enemy(20, 20)\n \n enemy.wall = map_sprite_list\n enemy_list.add(enemy)\n all_sprite_list.add(enemy)\n\n # main loop\n while not done:\n # event handler\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n player.change_vel(0, -PLAYER_SPEED)\n \n elif event.key == pygame.K_s:\n player.change_vel(0, PLAYER_SPEED)\n elif event.key == pygame.K_a:\n player.change_vel(-PLAYER_SPEED, 0)\n elif event.key == pygame.K_d:\n player.change_vel(PLAYER_SPEED, 0)\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_w:\n player.change_vel(0, PLAYER_SPEED)\n elif event.key == pygame.K_s:\n player.change_vel(0, -PLAYER_SPEED)\n elif event.key == pygame.K_a:\n player.change_vel(PLAYER_SPEED, 0)\n elif event.key == pygame.K_d:\n player.change_vel(-PLAYER_SPEED, 0)\n \n # game logic -------------\n\n # get the player's position in relation to \n # \"left\" \"right\" \"up\" and \"down\" in relation to the enemy,\n # remove the direction the player occupies from the enemy's \n # movement list at that time.\n\n #for enemy in enemy_list:\n #for mapped_area in map_sprite_list:\n #if pygame.sprite.collide_rect(enemy, mapped_area):\n #if enemy.direction == \"right\": enemy.direction == \"up\"\n #elif enemy.direction == \"up\": enemy.direction == \"down\"\n #elif enemy.direction == \"down\": enemy.direction == \"left\"\n #else: enemy.direction == \"right\"\n\n all_sprite_list.update()\n\n # drawing --------------\n screen.fill(BGCOLOUR)\n all_sprite_list.draw(screen)\n pygame.display.flip()\n\n # clock tick -------------\n clock.tick(REFRESH_RATE)\n\n", "sub_path": "chase.py", "file_name": "chase.py", "file_ext": "py", "file_size_in_byte": 7030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.init", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 52, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 65, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 65, "usage_type": "attribute"}, {"api_name": "pygame.sprite", "line_number": 79, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 90, "usage_type": "attribute"}, {"api_name": "pygame.Surface", "line_number": 96, "usage_type": "call"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 121, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 121, "usage_type": "attribute"}, {"api_name": "pygame.sprite.spritecollide", "line_number": 135, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 173, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 174, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 176, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 178, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 178, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.sprite.Group", "line_number": 180, "usage_type": "call"}, {"api_name": "pygame.sprite", "line_number": 180, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 207, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 210, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 216, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 218, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 220, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 221, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 227, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 250, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 250, "usage_type": "attribute"}]} +{"seq_id": "345393413", "text": "__author__ = 'root'\n\nimport argparse\nimport socket\n\nx = argparse.ArgumentParser(description=\"Here should be some kind of description\")\nx.add_argument('--name',type=str,help=\"This should be the website name e.g example.com\",required=True)\nx.add_argument('-o',type=str,help=\"This should show/print the ip addr of the entered website\",required=False)\n\n#var y as cmdargs\ny = x.parse_args()\ni_var= y.name\n\nipaddr = socket.gethostbyname(i_var);\nprint(ipaddr);\n", "sub_path": "knowwebipaddr.py", "file_name": "knowwebipaddr.py", "file_ext": "py", "file_size_in_byte": 454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 6, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "445742874", "text": "import json\nimport os\nimport random\nimport re\nimport string\nfrom collections import OrderedDict\n\nOBJECT_TYPES = (dict, list)\nINCLUDE_KEYS = ['...', '$ref']\nINCLUDE_VALUE_PATTERNS = [\n re.compile(r'^#/(.+)$'), # simple local definition\n re.compile(r'^include\\((.+)\\)$'), # include\n re.compile(r'^file:(.+)?#/(.+)$'), # remote definition inclusion\n re.compile(r'^file:(.+)$'), # remote file inclusion\n re.compile(r'^(.+)?#/(.+)$'), # remote definition inclusion without `file:` pattern\n]\nINCLUDE_INDEX_LOCAL = [0]\nINCLUDE_INDEX_DEFINITION = [2, 4]\nINCLUDE_TEXT_PATTERN = re.compile(r'^include_text\\((.+)\\)$')\n\n\nclass JSONInclude(object):\n def __init__(self):\n self._included_cache = None\n self._original_schemas = None\n\n def _random_string(self, length=9):\n return ''.join(\n random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(length)\n )\n\n @staticmethod\n def _read_file(filePath):\n with open(filePath) as f:\n return f.read()\n\n def _get_include_name(self, value, regex_list):\n if not isinstance(regex_list, list):\n # passing single regex only\n return self._get_include_name([value], regex_list)[0]\n else:\n # passing list of regex`s\n for idx, regex in enumerate(regex_list):\n if isinstance(value, str):\n rv = regex.search(value)\n if rv:\n return rv.groups(), idx\n return None, None\n\n def _lookup(self, dic, key, *keys):\n if keys:\n return self._lookup(dic.get(key, {}), *keys)\n return dic.get(key)\n\n def _make_unique(self, obj, key, original=None, replacement=None):\n \"\"\"\n Walk through the dict and add random string to the value at key\n and all other occurrences of the same value.\n \"\"\"\n if key in obj and isinstance(obj[key], str):\n original = obj[key]\n replacement = obj[key] + \"-\" + self._random_string()\n obj[key] = replacement\n for k, v in obj.items():\n if original and v == original:\n obj[k] = replacement\n if isinstance(v, dict):\n self._make_unique(v, key, original, replacement)\n return obj\n\n def _include_definition(self, include_name, schema):\n attr = include_name.split(\"/\")\n return self._lookup(schema, *attr)\n\n def _include_remote_file(self, dirpath, include_name):\n _f = os.path.join(dirpath, include_name)\n if include_name not in self._included_cache:\n remote_schema = self._parse_json_include(os.path.dirname(_f), os.path.basename(_f))\n self._cleanup_before_inclusion(remote_schema)\n return remote_schema\n else:\n return self._included_cache[include_name]\n\n def _cleanup_before_inclusion(self, data):\n if isinstance(data, list):\n for item in data:\n self._cleanup_before_inclusion(item)\n return\n elif isinstance(data, dict):\n data.pop('$schema', None) # remove $schema property before inclusion\n\n def _walk_through_to_include(self, o, dirpath):\n if isinstance(o, dict):\n is_include_exp = False\n make_unique_key = o.pop('makeUnique', None)\n # if a key match a INCLUDE_KEYS\n if any(map(lambda x: x in o, INCLUDE_KEYS)):\n include_key = [y for y in map(lambda x: x if x in o else None, INCLUDE_KEYS) if y][0] # get key that match\n include_info, include_idx = self._get_include_name(o[include_key], INCLUDE_VALUE_PATTERNS)\n if include_info:\n is_include_exp = True\n include_name = include_info[0]\n if include_idx in INCLUDE_INDEX_LOCAL:\n # include local definitions\n self._included_cache[include_name] = self._include_definition(\n include_name,\n self._original_schemas[-1]\n )\n elif include_idx in INCLUDE_INDEX_DEFINITION:\n # include remote definitions\n include_name = include_info[1]\n remote_file_schema = self._include_remote_file(dirpath, include_info[0])\n self._included_cache[include_name] = self._include_definition(include_name, remote_file_schema)\n else:\n # enable relative directory references: `../../`\n self._included_cache[include_name] = self._include_remote_file(dirpath, include_name)\n # remove \"key : include-pattern\" from dict\n\n _data = self._included_cache[include_name]\n o.pop(include_key)\n # add data under include_key if it is not a dictionary\n if not isinstance(_data, dict):\n _data = {include_key: _data}\n if make_unique_key:\n o.update(self._make_unique(_data, make_unique_key))\n else:\n o.update(_data)\n\n if isinstance(0, dict):\n # if a key match INCLUDE_TEXT_PATTERN\n include_text_keys = [key for key in o.keys() if isinstance(o[key], str) and INCLUDE_TEXT_PATTERN.search(o[key])]\n for key in include_text_keys:\n include_filename = self._get_include_name(o[key], INCLUDE_TEXT_PATTERN)\n if include_filename:\n _f = os.path.join(dirpath, include_filename)\n o[key] = self._read_file(os.path.join(_f))\n\n if is_include_exp:\n # don't recurse\n return\n\n if isinstance(o, dict):\n for k, v in o.items():\n self._walk_through_to_include(v, dirpath)\n elif isinstance(o, list):\n for i in o:\n self._walk_through_to_include(i, dirpath)\n\n def _parse_json_include(self, dirpath, filename):\n filepath = os.path.join(dirpath, filename)\n json_str = self._read_file(filepath)\n d = self._resolve_extend_replace(json_str, filepath)\n\n self._original_schemas.append(d)\n self._walk_through_to_include(d, dirpath)\n self._original_schemas.pop()\n return d\n\n def build_json_include(self, dirpath, filename=None, indent=4):\n \"\"\"Parse a json file and build it by the include expression recursively.\n\n :param str dirpath: The directory path of source json files.\n :param str filename: The name of the source json file.\n :return: A json string with its include expression replaced by the indicated data.\n :rtype: str\n \"\"\"\n self._included_cache = {}\n self._original_schemas = []\n d = self._parse_json_include(dirpath, filename)\n return json.dumps(d, indent=indent, separators=(',', ': '))\n\n\n def _resolve_extend_replace(self, str, filepath):\n \"\"\"\n Resolve the content `$extend` and `$replace` keys:\n\n {\n \"$extend\": {\n \"name\": \"parent.json\"\n },\n \"$replace\": [\n {\n \"where\": {\n \"key\": \"units\",\n \"idx\": 4\n },\n \"with\": \"$this.units\"\n },\n\n :param str str: json string with file content\n :param str filepath: path to the file\n :rtype: dict\n \"\"\"\n obj = json.loads(str, object_pairs_hook=OrderedDict)\n if not isinstance(obj, dict):\n return obj\n extend = obj.get(\"$extend\", {})\n replace = obj.get(\"$replace\", {})\n filename = extend.get(\"name\", None)\n if filename:\n json_string = self._read_file(os.path.join(os.path.dirname(filepath), filename))\n json_data = json.loads(json_string, object_pairs_hook=OrderedDict)\n for entry in replace:\n key = entry[\"where\"][\"key\"]\n idx = entry[\"where\"].get(\"idx\", None)\n idx_cache = 0\n _with = entry[\"with\"]\n _replacement = obj.get(_with.replace(\"$this.\", \"\")) if _with and \"$this.\" in _with else _with\n _current_value = json_data[key]\n if (idx or idx == 0) and isinstance(_current_value, list):\n del _current_value[idx]\n if isinstance(_replacement, list):\n for _in, _el in enumerate(_replacement):\n _current_value.insert(idx + _in, _el)\n idx_cache += 1\n else:\n _current_value.insert(idx, _replacement)\n _replacement = _current_value\n json_data[key] = _replacement\n obj = json_data\n return obj\n\n\ndef build_str(dirpath, filename=None, indent=4):\n if filename is None:\n dirpath = os.path.abspath(os.path.join(os.getcwd(), dirpath))\n dirpath, filename = os.path.split(dirpath)\n return JSONInclude().build_json_include(dirpath, filename, indent=indent)\n\n\ndef build_json(*args, **kwargs):\n str_data = build_str(*args, **kwargs)\n return json.loads(str_data)", "sub_path": "json_include/json_include.py", "file_name": "json_include.py", "file_ext": "py", "file_size_in_byte": 9440, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "re.compile", "line_number": 11, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 12, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 13, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 14, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 15, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 19, "usage_type": "call"}, {"api_name": "random.SystemRandom", "line_number": 29, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 29, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path", "line_number": 78, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path", "line_number": 136, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 170, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 194, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 194, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 201, "usage_type": "call"}, {"api_name": "os.path", "line_number": 201, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 201, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 202, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 202, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 227, "usage_type": "call"}, {"api_name": "os.path", "line_number": 227, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 233, "usage_type": "call"}]} +{"seq_id": "436940271", "text": "import random\nimport numpy as np\nimport joommfutil.typesystem as ts\nimport discretisedfield.util as dfu\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n@ts.typesystem(p1=ts.RealVector(size=3),\n p2=ts.RealVector(size=3),\n cell=ts.PositiveRealVector(size=3),\n name=ts.ObjectName,\n l=ts.PositiveRealVector(size=3),\n pmin=ts.RealVector(size=3),\n pmax=ts.RealVector(size=3),\n n=ts.PositiveIntVector(size=3))\nclass Mesh(object):\n def __init__(self, p1, p2, cell, name=\"mesh\"):\n \"\"\"\n Creates a rectangular finite difference mesh.\n\n Args:\n p1 (tuple, list, np.ndarray): First mesh domain point\n p1 is of length 3 (xmin, ymin, zmax).\n p2 (tuple, list, np.ndarray): Second mesh domain point\n p2 is of length 3 (xmin, ymin, zmax).\n cell (tuple, list, np.ndarray): Discretisation cell size\n cell is of length 3 and defines the discretisation steps in\n x, y, and z directions: (dx, dy, dz).\n name (Optional[str]): Mesh name\n\n Attributes:\n p1 (tuple): First mesh domain point\n\n p2 (tuple): Second mesh domain point\n\n cell (tuple): Discretisation cell size\n\n name (str): Mesh name\n\n pmin (tuple): Minimum mesh domain point\n\n pmax (tuple): Maximum mesh domain point\n\n l (tuple): length of domain x, y, and z edges (lx, ly, lz):\n\n lx = abs(p2[0] - p1[0])\n\n ly = abs(p2[1] - p1[2])\n\n lz = abs(p2[2] - p1[2])\n\n n (tuple): The number of cells in three dimensions (nx, ny, nz):\n\n nx = lx/dx\n\n ny = ly/dy\n\n nz = lz/dz\n\n \"\"\"\n self.p1 = tuple(p1)\n self.p2 = tuple(p2)\n self.cell = tuple(cell)\n self.name = name\n\n # Compute domain edge lengths.\n self.l = (abs(self.p2[0]-self.p1[0]),\n abs(self.p2[1]-self.p1[1]),\n abs(self.p2[2]-self.p1[2]))\n\n # Compute minimum and maximum mesh domain points.\n self.pmin = (min(self.p1[0], self.p2[0]),\n min(self.p1[1], self.p2[1]),\n min(self.p1[2], self.p2[2]))\n self.pmax = (max(self.p1[0], self.p2[0]),\n max(self.p1[1], self.p2[1]),\n max(self.p1[2], self.p2[2]))\n\n tol = 1e-12 # picometer tolerance\n # Check if the discretisation cell size is greater than the domain.\n for i in range(3):\n if self.cell[i] > self.l[i]:\n raise ValueError((\"Discretisation cell is greater than \"\n \"the domain dimension: cell[{}] > \"\n \"abs(p2[{}]-p1[{}]).\").format(i, i, i))\n\n # Check if the domain is not an aggregate of discretisation cell.\n for i in range(3):\n if tol < self.l[i] % self.cell[i] < self.cell[i] - tol:\n raise ValueError((\"Domain is not a multiple (aggregate) of \"\n \"the discretisation cell: \"\n \"abs(p2[{}]-p1[{}]) % \"\n \"cell[{}].\").format(i, i, i))\n\n # Compute the number of cells in all three dimensions.\n self.n = (int(round(self.l[0]/self.cell[0])),\n int(round(self.l[1]/self.cell[1])),\n int(round(self.l[2]/self.cell[2])))\n\n def __repr__(self):\n \"\"\"Mesh representation method.\n\n Returns:\n A mesh representation string.\n\n \"\"\"\n p1str = \"p1=({}, {}, {})\".format(self.p1[0], self.p1[1], self.p1[2])\n p2str = \"p2=({}, {}, {})\".format(self.p2[0], self.p2[1], self.p2[2])\n cellstr = \"cell=({}, {}, {})\".format(self.cell[0],\n self.cell[1],\n self.cell[2])\n namestr = \"name=\\\"{}\\\"\".format(self.name)\n\n return \"Mesh({}, {}, {}, {})\".format(p1str, p2str, cellstr, namestr)\n\n def _ipython_display_(self):\n \"\"\"Shows a matplotlib figure of sample range and discretisation.\"\"\"\n # TODO: plt.show() works only with nbagg\n fig = self.plot() # pragma: no cover\n plt.show() # pragma: no cover\n\n def centre(self):\n \"\"\"Compute and return the mesh centre point.\n\n Returns:\n A mesh centre point tuple of coordinates.\n\n \"\"\"\n return (self.pmin[0] + 0.5*self.l[0],\n self.pmin[1] + 0.5*self.l[1],\n self.pmin[2] + 0.5*self.l[2])\n\n def random_point(self):\n \"\"\"Generate a random mesh point.\n\n Returns:\n A random mesh point tuple of coordinates.\n\n \"\"\"\n return (self.pmin[0] + random.random()*self.l[0],\n self.pmin[1] + random.random()*self.l[1],\n self.pmin[2] + random.random()*self.l[2])\n\n def index2point(self, i):\n \"\"\"Convert the discretisation cell index to its centre point coordinate.\n\n The finite difference domain is disretised in x, y, and z directions\n in dx, dy, and dz steps, respectively. Accordingly, there are\n nx, ny, and nz discretisation steps. This method converts the cell\n index (ix, iy, iz) to the cell's centre point coordinate.\n\n This method raises ValueError if the index is out of range.\n\n Args:\n i (tuple): A length 3 tuple of integers (ix, iy, iz)\n\n Returns:\n A length 3 tuple of x, y, and z coodinates\n\n \"\"\"\n for j in range(3):\n if i[j] < 0 or i[j] > self.n[j] - 1:\n raise ValueError((\"Index i[{}]={} out of \"\n \"range.\").format(j, i[j]))\n\n return (self.pmin[0] + (i[0]+0.5)*self.cell[0],\n self.pmin[1] + (i[1]+0.5)*self.cell[1],\n self.pmin[2] + (i[2]+0.5)*self.cell[2])\n\n def point2index(self, p):\n \"\"\"Compute the index of a cell containing point p.\n\n This method is an inverse function of index2point method.\n (For details on index, please refer to the index2point method.)\n\n It raises ValueError if the point is outside the mesh.\n\n Args:\n p (tuple): A length 3 tuple of Real numbers (px, py, pz)\n\n Returns:\n A length 3 cell index tuple (ix, iy, iz).\n\n \"\"\"\n for j in range(3):\n if p[j] < self.pmin[j] or p[j] > self.pmax[j]:\n raise ValueError((\"Point coordinate p[{}]={} outside \"\n \"the mesh domain.\"). format(j, p[j]))\n\n i = []\n for j in range(3):\n ij = int(round((p[j]-self.pmin[j])/self.cell[j] - 0.5))\n\n # If rounded to the out-of-range mesh index.\n if ij < 0:\n ij = 0 # pragma: no cover\n elif ij > self.n[j] - 1:\n ij = self.n[j] - 1\n\n i.append(ij)\n\n return tuple(i)\n\n def cell_centre(self, p):\n \"\"\"Computes the centre of cell containing (or nearest) to point p.\n\n Args:\n p (tuple): A length 3 tuple of point coordinates\n\n Returns:\n A length 3 tuple of cell's centre coordinates\n\n \"\"\"\n return self.index2point(self.point2index(p))\n\n def plot(self):\n \"\"\"Creates a figure of a mesh range and discretisation cell.\"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111, projection=\"3d\")\n ax.set_aspect(\"equal\")\n\n cell_point = (self.pmin[0] + self.cell[0],\n self.pmin[1] + self.cell[1],\n self.pmin[2] + self.cell[2])\n\n dfu.plot_box(ax, self.pmin, self.pmax)\n dfu.plot_box(ax, self.pmin, cell_point, props=\"r-\", linewidth=1)\n\n ax.set(xlabel=r\"$x$\", ylabel=r\"$y$\", zlabel=r\"$z$\")\n\n return fig\n\n def cells(self):\n \"\"\"Generator iterating through all mesh cells and\n yielding mesh indices and centre coordinates.\"\"\"\n for k in range(self.n[2]):\n for j in range(self.n[1]):\n for i in range(self.n[0]):\n yield (i, j, k), self.index2point((i, j, k))\n\n def line_intersection(self, l, l0, n=100):\n \"\"\"Generator yielding mesh cell indices and their centre coordinates,\n along the line defined with l and l0 in n points.\"\"\"\n try:\n p1, p2 = dfu.box_line_intersection(self.pmin, self.pmax, l, l0)\n except TypeError:\n raise ValueError(\"Line does not intersect mesh in two points.\")\n\n p1, p2 = np.array(p1), np.array(p2)\n dl = (p2-p1) / (n-1)\n for i in range(n):\n point = p1 + i*dl\n yield np.linalg.norm(i*dl), tuple(point)\n\n def script(self):\n \"\"\"This method should be implemented by a specific\n micromagnetic calculator.\"\"\"\n raise NotImplementedError\n", "sub_path": "discretisedfield/mesh.py", "file_name": "mesh.py", "file_ext": "py", "file_size_in_byte": 8909, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.show", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "random.random", "line_number": 141, "usage_type": "call"}, {"api_name": "random.random", "line_number": 142, "usage_type": "call"}, {"api_name": "random.random", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "discretisedfield.util.plot_box", "line_number": 227, "usage_type": "call"}, {"api_name": "discretisedfield.util", "line_number": 227, "usage_type": "name"}, {"api_name": "discretisedfield.util.plot_box", "line_number": 228, "usage_type": "call"}, {"api_name": "discretisedfield.util", "line_number": 228, "usage_type": "name"}, {"api_name": "discretisedfield.util.box_line_intersection", "line_number": 246, "usage_type": "call"}, {"api_name": "discretisedfield.util", "line_number": 246, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 254, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 254, "usage_type": "attribute"}, {"api_name": "joommfutil.typesystem.typesystem", "line_number": 9, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 9, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.RealVector", "line_number": 9, "usage_type": "call"}, {"api_name": "joommfutil.typesystem.RealVector", "line_number": 10, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 10, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.PositiveRealVector", "line_number": 11, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 11, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.ObjectName", "line_number": 12, "usage_type": "attribute"}, {"api_name": "joommfutil.typesystem", "line_number": 12, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.PositiveRealVector", "line_number": 13, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 13, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.RealVector", "line_number": 14, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 14, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.RealVector", "line_number": 15, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 15, "usage_type": "name"}, {"api_name": "joommfutil.typesystem.PositiveIntVector", "line_number": 16, "usage_type": "call"}, {"api_name": "joommfutil.typesystem", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "109626369", "text": "# coding:utf-8\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom numpy import *\r\nimport numpy as np\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn import metrics\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n\r\ntrain_file = pd.read_csv('rdata\\train_data_10_percent_corrected_classify.csv') # 读取已处理的训练集文件\r\nprint(\"训练集维度:\", train_file.shape)\r\ncol_num = train_file.shape[1]\r\ntrain_file.columns = [i+1 for i in range(col_num)] # 命名训练集文件的每列名称\r\n\r\nfeature_num = 41\r\n# x = kdd99[[i+1 for i in range(col_num-1)]] # 切分训练集的41列特征\r\nx = train_file[[i+1 for i in range(feature_num)]]\r\ny = train_file[[col_num]] # 训练集最后一列\r\n\r\nunlabeled_test_file = pd.read_csv(r'data\\test_data_10_percent_corrected_classify_unlabeled.csv')\r\ncol_num1 = unlabeled_test_file.shape[1]\r\nunlabeled_test_file.columns = [k+1 for k in range(col_num1)]\r\nprint(\"无标签的测试集维度:\", unlabeled_test_file.shape)\r\n\r\nlabeled_test_file = pd.read_csv(r'data\\test_data_10_percent_corrected_classify_labeled.csv')\r\nlabeled_test_file.columns = [j+1 for j in range(labeled_test_file.shape[1])]\r\ncol_num2 = labeled_test_file.shape[1]\r\ny_test = labeled_test_file[[col_num2]]\r\nprint(\"带标签的测试集维度:\", labeled_test_file.shape)\r\n\r\nx_train = x\r\nx_test = unlabeled_test_file\r\ny_train = y\r\n\r\nvec = DictVectorizer(sparse=False)\r\nx_train = vec.fit_transform(x_train.to_dict(orient='record'))\r\nx_test = vec.transform(x_test.to_dict(orient='record'))\r\nprint(vec.feature_names_, \"\\n\", x_train[:-1])\r\n\r\ndtc_accuracy_score = []\r\nnbc_accuracy_score = []\r\nrfc_accuracy_score = []\r\nlgr_accuracy_score = []\r\n\r\ndtc_auc = []\r\nnbc_auc = []\r\nrfc_auc = []\r\nlgr_auc = []\r\n\r\nfor i in range(3):\r\n print('第' + str(i + 1) + '次实验:')\r\n\r\n # 决策树分类器\r\n dtc = DecisionTreeClassifier()\r\n dtc = dtc.fit(x_train, y_train)\r\n dtc_y_pre = dtc.predict(x_test)\r\n\r\n fpr1, tpr1, thresholds = metrics.roc_curve(y_test, dtc_y_pre)\r\n auc = metrics.auc(fpr1, tpr1)\r\n plt.subplot(2, 2, 1)\r\n plt.plot(fpr1, tpr1, c='r', lw=2, alpha=0.7, label=u'AUC=%.3f' % auc)\r\n plt.xlabel('False Positive Rate', fontsize=10)\r\n plt.ylabel('True Positive Rate', fontsize=10)\r\n plt.title('Decision Tree AUC', fontsize=10)\r\n plt.grid(b=True, ls=':')\r\n plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=10)\r\n dtc_accuracy_score.append(accuracy_score(y_test, dtc_y_pre))\r\n dtc_auc.append(auc)\r\n\r\n # print(\"决策树分类精确度:\", accuracy_score(y_pre, y_test))\r\n print(\"决策树:\", \"\\n\", classification_report(dtc_y_pre, y_test, target_names=[\"normal\", \"abnormal\"]))\r\n print(\"决策树AUC:\", roc_auc_score(y_test, dtc_y_pre))\r\n\r\n # 朴素贝叶斯分类器\r\n nbc = GaussianNB()\r\n nbc = nbc.fit(x_train, y)\r\n nbc_y_pre = nbc.predict(x_test)\r\n\r\n fpr2, tpr2, thresholds1 = metrics.roc_curve(y_test, nbc_y_pre)\r\n auc = metrics.auc(fpr2, tpr2)\r\n plt.subplot(2, 2, 2)\r\n plt.plot(fpr2, tpr2, c='r', lw=2, alpha=0.7, label=u'AUC=%.3f' % auc)\r\n plt.xlabel('False Positive Rate', fontsize=10)\r\n plt.ylabel('True Positive Rate', fontsize=10)\r\n plt.title('Naive Bayes AUC', fontsize=10)\r\n plt.grid(b=True, ls=':')\r\n plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=10)\r\n nbc_accuracy_score.append(accuracy_score(y_test, nbc_y_pre))\r\n nbc_auc.append(auc)\r\n\r\n # print(\"朴素贝叶斯分类精确度:\", accuracy_score(y_test, nbc_y_pre))\r\n print(\"朴素贝叶斯:\", \"\\n\", classification_report(nbc_y_pre, y_test, target_names=[\"normal\", \"abnormal\"]))\r\n print(\"朴素贝叶斯AUC:\", roc_auc_score(y_test, nbc_y_pre))\r\n\r\n # 随机森林\r\n rfc = RandomForestClassifier()\r\n rfc = rfc.fit(x_train, np.array(y).ravel())\r\n rfc_y_pre = rfc.predict(x_test)\r\n fpr3, tpr3, thresholds = metrics.roc_curve(y_test, rfc_y_pre)\r\n auc = metrics.auc(fpr3, tpr3)\r\n plt.subplot(2, 2, 3)\r\n plt.plot(fpr3, tpr3, c='r', lw=2, alpha=0.7, label=u'AUC=%.3f' % auc)\r\n plt.xlabel('False Positive Rate', fontsize=10)\r\n plt.ylabel('True Positive Rate', fontsize=10)\r\n plt.title('Random Forest AUC', fontsize=10)\r\n plt.grid(b=True, ls=':')\r\n plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=10)\r\n rfc_accuracy_score.append(accuracy_score(y_test, rfc_y_pre))\r\n rfc_auc.append(auc)\r\n\r\n # print(\"随机森林分类精确度:\", accuracy_score(y_test, rfc_y_pre))\r\n print(\"随机森林:\", \"\\n\", classification_report(rfc_y_pre, y_test, target_names=[\"normal\", \"abnormal\"]))\r\n print(\"随机森林AUC:\", roc_auc_score(y_test, rfc_y_pre))\r\n\r\n # 逻辑回归\r\n lgr = LogisticRegression()\r\n lgr = lgr.fit(x_train, y)\r\n lgr_y_pre = lgr.predict(x_test)\r\n fpr4, tpr4, thresholds = metrics.roc_curve(y_test, lgr_y_pre)\r\n auc = metrics.auc(fpr4, tpr4)\r\n plt.subplot(2, 2, 4)\r\n plt.plot(fpr4, tpr4, c='r', lw=2, alpha=0.7, label=u'AUC=%.3f' % auc)\r\n plt.xlabel('False Positive Rate', fontsize=10)\r\n plt.ylabel('True Positive Rate', fontsize=10)\r\n plt.title('Logistic Regression AUC', fontsize=10)\r\n plt.grid(b=True, ls=':')\r\n plt.legend(loc='lower right', fancybox=True, framealpha=0.8, fontsize=10)\r\n lgr_accuracy_score.append(accuracy_score(y_test, lgr_y_pre))\r\n lgr_auc.append(auc)\r\n\r\n # print(\"逻辑回归分类精确度:\", accuracy_score(y_test, lgr_y_pre))\r\n print(\"逻辑回归:\", \"\\n\", classification_report(lgr_y_pre, y_test, target_names=[\"normal\", \"abnormal\"]))\r\n print(\"逻辑回归AUC:\", roc_auc_score(y_test, lgr_y_pre))\r\n\r\n\r\nprint(\"决策树分类精确度:\", mean(dtc_accuracy_score))\r\nprint(\"朴素贝叶斯分类精确度:\", mean(nbc_accuracy_score))\r\nprint(\"随机森林分类精确度:\", mean(rfc_accuracy_score))\r\nprint(\"逻辑回归分类精确度:\", mean(lgr_accuracy_score))\r\nprint('--------')\r\nprint(\"决策树AUC:\", mean(dtc_auc))\r\nprint(\"朴素贝叶斯AUC:\", mean(nbc_auc))\r\nprint(\"随机森林AUC:\", mean(rfc_auc))\r\nprint(\"逻辑回归AUC:\", mean(lgr_auc))\r\nplt.subplots_adjust(left=0.125, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.5)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "classification.py", "file_name": "classification.py", "file_ext": "py", "file_size_in_byte": 6511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.DictVectorizer", "line_number": 43, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 62, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 66, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 74, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 74, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 75, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 80, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 83, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 87, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 95, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 95, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 96, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 100, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 101, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 105, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 107, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 108, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 108, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 116, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 121, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LogisticRegression", "line_number": 124, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 127, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 127, "usage_type": "name"}, {"api_name": "sklearn.metrics.auc", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 128, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 129, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 129, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 140, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_auc_score", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}]} +{"seq_id": "582344948", "text": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nimport os\n__author__ = 'adamkoziol'\nsetup(\n name=\"COWBAT\",\n version=\"0.5.0.15\",\n include_package_data=True,\n packages=find_packages(),\n scripts=[os.path.join('cowbat', 'assembly_pipeline.py'),\n os.path.join('cowbat', 'assembly_typing.py'),\n os.path.join('cowbat', 'validation', 'validate_cowbat.py')\n ],\n license='MIT',\n author='Adam Koziol',\n author_email='adam.koziol@canada.ca',\n description='CFIA OLC Workflow for Bacterial Assembly and Typing',\n url='https://github.com/OLC-Bioinformatics/COWBAT',\n long_description=open('README.md').read(),\n install_requires=['interop']\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "setuptools.setup", "line_number": 5, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}]} +{"seq_id": "410806462", "text": "'''helper class of methods for telegram bot to reduce duplicated calls'''\nfrom telegram import InlineKeyboardMarkup as iMarkup\nfrom telegram import Bot\n\n\nclass Bot_(object):\n '''Shortened telegram bot'''\n\n def __init__(self, bot_=Bot, update=None):\n self.bot_ = bot_\n self.update = update\n\n # query\n self.query = update.callback_query\n self.qmessage_id = self.query.message.message_id\n self.qchat_id = self.query.message.chat_id\n\n def edit_message_text_query(self, *,\n text=None,\n parse_mode=None,\n keyboard_base=None):\n\n reply_markup = keyboard_base\n\n if keyboard_base is not None:\n reply_markup = iMarkup(keyboard_base)\n\n self.bot_.edit_message_text(\n text=text,\n parse_mode=parse_mode,\n reply_markup=reply_markup,\n chat_id=self.qchat_id,\n message_id=self.qmessage_id\n )\n\n def send_photo_query_reply(self, photo_path, capion=None):\n self.bot_.send_photo(\n reply_to_message_id=self.qmessage_id,\n chat_id=self.qchat_id,\n photo=open(photo_path, 'rb'),\n capion=capion\n )\n\n def send_chat_action_query(self, action):\n self.bot_.send_chat_action(\n action=action,\n chat_id=self.qchat_id\n )\n", "sub_path": "HKO_bot/helpers/bot_.py", "file_name": "bot_.py", "file_ext": "py", "file_size_in_byte": 1422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "telegram.Bot", "line_number": 9, "usage_type": "name"}, {"api_name": "telegram.InlineKeyboardMarkup", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "72928029", "text": "import pygame\nimport os\n\n\nclass MapTile(pygame.image):\n def __init__(self, path, file_name, data):\n # Image dimensions will be static and handled by the map class\n\n self.load(os.path.join(path, file_name))\n self.top_left = data.topl\n self.bottom_right = data.bottomr\n self.center = data.center\n", "sub_path": "Prototyping/BaseStation/Map/MapTile.py", "file_name": "MapTile.py", "file_ext": "py", "file_size_in_byte": 332, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pygame.image", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "62246129", "text": "# Copyright 2020 The Kubric Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport sklearn.utils\n\n\ndef mm3hash(name):\n \"\"\" Compute the uint32 hash that Blenders Cryptomatte uses.\n https://github.com/Psyop/Cryptomatte/blob/master/specification/cryptomatte_specification.pdf\n \"\"\"\n hash_32 = sklearn.utils.murmurhash3_32(name, positive=True)\n exp = hash_32 >> 23 & 255\n if (exp == 0) or (exp == 255):\n hash_32 ^= 1 << 23\n return hash_32\n\n\ndef random_rotation(rnd=np.random.RandomState()):\n \"\"\" Compute a random rotation as a quaternion that is uniform over orientations.\"\"\"\n\n z = 2\n while z > 1:\n x, y = rnd.rand(2)\n z = x*x + y*y\n\n w = 2\n while w > 1:\n u, v = rnd.rand(2)\n w = u*u + v*v\n\n s = np.sqrt((1-z) / w)\n return x, y, s*u, s*v\n", "sub_path": "kubric/assets/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1288, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sklearn.utils.utils.murmurhash3_32", "line_number": 23, "usage_type": "call"}, {"api_name": "sklearn.utils.utils", "line_number": 23, "usage_type": "attribute"}, {"api_name": "sklearn.utils", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "581205457", "text": "import socket\nimport base64\nimport time\nimport random\n\nimport conmysql\n# from MailService import conmysql\nfrom multiprocessing import Process\n\n\nclass UserServer(object):\n def __init__(self,):\n self.server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n self.port = -1\n\n def bind(self, port):\n self.server_socket.bind((\"\",port))\n self.port = int(port)\n\n def start(self):\n self.server_socket.listen(128)\n while True:\n #查询服务状态\n print(self.port)\n\n client_socket, client_address = self.server_socket.accept()\n print(\"[%s,%s]用户已连接\" % client_address)\n # port = int(client_address[1])\n # 端口号为50000到59999为服务器连接\n # if port >= 50000 and port <= 59999:\n handle_server_process = Process(target=self.smtpMailRecv, args=(client_socket,client_address,))\n handle_server_process.start()\n # # 端口号为40000到49999为客户端的SMTP连接\n # elif port >= 40000 and port <= 49999 :\n # handle_client_process = Process(target=self.handle_client_smtp, args=(client_socket,client_address,))\n # handle_client_process.start()\n # #端口号为30000到39999为客户端的POP连接\n # elif port >= 30000 and port <= 39999 :\n # handle_client_pop = Process(target=self.handle_client_pop,args = (client_socket,client_address,))\n # handle_client_pop.start()\n # else:\n # client_socket.send(\"Your service is stopped!\".encode(\"utf-8\"))\n\n #客户端到服务器的SMTP服务\n def handle_client_smtp(self,client_socket,client_address):\n welcome_state = 0 #欢迎状态\n login_state = 0 #记录用户认证状态\n source_state = 0 #记录是否填写邮件双方\n subject_state = 0 #记录是否填写邮件主题\n des_state = 0 #记录是否填写邮件双方\n mail_cont = \"\"#记录邮件内容\n mail_subject = \"\"#记录邮件主题\n mail_source = \"\" #记录信的来源\n mail_des = \"\" #记录信的去处\n\n while True:\n request_data = client_socket.recv(1024)\n str_data = request_data.decode(\"utf-8\")\n print(\"From:[%s,%s]\"%client_address,end=\" \")\n print(str_data)\n sstr = str_data.split(\" \")\n try:\n if sstr[0] == \"EHLO\":\n response = \"250-smtp.qq.com\\n\" \\\n \"250-PIPELINING\\n\" \\\n \"250-STARTTLS\\n\" \\\n \"250-AUTH LOGIN PLAIN\\n\" \\\n \"250-AUTH=LOGIN\\n\"\\\n \"250-MAILCOMPRESS\\n\"\\\n \"250 8BITMIME\"\n welcome_state = 1 #可以进行用户认证\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"DATA\" and len(sstr) == 1 and des_state == 1 and source_state == 1 and subject_state == 1:\n response = \"354 Enter mail,end with '.' on a line by itself\"\n client_socket.send(response.encode(\"utf-8\"))\n while True:\n recv_data = client_socket.recv(1024)\n strr = recv_data.decode(\"utf-8\")\n print(strr)\n if strr == '.':\n break\n else:\n reply = \"$$\"\n client_socket.send(reply.encode(\"utf-8\"))\n mail_cont += strr\n mail_cont += \"$$\"\n\n # 拼接信的内容\n mail = {}\n mail['From'] = mail_source\n mail['To'] = mail_des\n mail['Subject'] = mail_subject\n mail['Cont'] = mail_cont\n mail_cont = \" \" #刷新mail_cout 避免连续发信内容叠加\n self.smtpMailDeliver(mail)\n\n response = \"250 ok\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"QUIT\":\n # 关闭客户端连接\n break\n elif sstr[0] + sstr[1] == \"AUTHLOGIN\" and welcome_state == 1 and len(sstr) == 2:\n #用户名认证\n response = \"username=\"\n # response = base64.b64encode(str.encode(\"utf-8\"))\n client_socket.send(response.encode(\"utf-8\"))\n recv_data = client_socket.recv(1024)\n # username = base64.b64decode(recv_data).decode(\"utf-8\")\n username = recv_data.decode(\"utf-8\")\n print(\"From:[%s,%s]\" % client_address, end=\" \")\n print(username)\n\n #用户密码认证\n response = \"Password=\"\n # response = base64.b64encode(str.encode(\"utf-8\"))\n client_socket.send(response.encode(\"utf-8\"))\n recv_data = client_socket.recv(1024)\n # password = base64.b64decode(recv_data).decode(\"utf-8\")\n password = recv_data.decode(\"utf-8\")\n print(\"From:[%s,%s]\" % client_address, end=\" \")\n print(password)\n\n #服务校验\n state = conmysql.state_port(username)\n print(state)\n smtp_state = state[0]\n pop_state = state[1]\n\n #用户名密码数据库验证\n if conmysql.user_identified(username,password) == 1 and smtp_state=='1':\n response = \"235 auth successfully\"#认证成功\n login_state = 1\n else:\n if conmysql.user_identified(username,password) == 1:\n response = \"535 auth failed\" #认证失败\n else:\n response = \"536 smtp failed\" #smtp服务被禁用\n client_socket.send(response.encode(\"utf-8\"))\n\n elif sstr[0] + sstr[1] == \"MAILFROM:\" and len(sstr) == 3 and login_state ==1:\n mail_source = sstr[2]\n source_state = 1\n response = \"250 ok\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] + sstr[1] == \"RCPTTO:\" and len(sstr) == 3 and login_state ==1:\n mail_des = sstr[2]\n des_state = 1\n response = \"250 ok\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"SUBJECT:\" and login_state == 1:\n sentence = sstr[1:]\n for i in sentence:\n mail_subject += i\n mail_subject += \" \"\n subject_state = 1\n response = \"250 ok\"\n client_socket.send(response.encode(\"utf-8\"))\n else:\n response = \"502 Error: command not implemented\"\n client_socket.send(response.encode(\"utf-8\"))\n except IndexError:\n response = \"502 Error: command not implemented\"\n client_socket.send(response.encode(\"utf-8\"))\n\n #关闭客户端连接\n client_socket.close()\n\n\n\n #客户端到服务器的POP服务\n def handle_client_pop(self,client_socket,client_address):\n login_state = 0 #记录用户登录状态\n user_name = \"\" #记录用户名\n pass_word = \"\" #记录密码\n while True:\n request_data = client_socket.recv(1024)\n str_data = request_data.decode(\"utf-8\")\n print(\"From:[%s,%s]\"%client_address,end = \" \")\n print(str_data)\n sstr = str_data.split(\" \")\n try:\n if sstr[0] == \"USER\" and len(sstr) == 2:\n user_name= sstr[1]\n response = \"+OK\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"PASS\" and len(sstr) == 2:\n pass_word = sstr[1]\n #服务校验\n state = conmysql.state_port(user_name)\n print(state)\n smtp_state = state[0]\n pop_state = state[1]\n if conmysql.user_identified(user_name,pass_word) == 1 and pop_state == '1':\n #认证成功\n response = \"+OK user successfully logged on\"\n login_state = 1\n else:\n if conmysql.user_identified(user_name,pass_word) != 1:\n #认证失败\n response = \"-ERR user identify failed\"\n else :\n #pop被禁用\n response = \"-ERR user pop failed\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"LIST\" and len(sstr) == 1 and login_state == 1:\n info = conmysql.list_email(user_name+\"@qq.com\")\n response = \"\"\n for i in info:\n response += str(i[0])\n response += \" \"\n response += str(i[1])\n response += \"\\n\"\n response += \".\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"RETR\" and len(sstr) == 2 and login_state == 1:\n li_id = int(sstr[1])\n cont = conmysql.cont_email(user_name+\"@qq.com\",li_id)\n if cont == -1:\n response = \"-ERR input email id doesn't exist\"\n else:\n response = cont\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"DELE\" and len(sstr) == 2 and login_state == 1:\n del_id = int(sstr[1])\n conmysql.dele_mail(user_name+\"@qq.com\",del_id)\n response = \"+OK\"\n client_socket.send(response.encode(\"utf-8\"))\n elif sstr[0] == \"QUIT\" and len(sstr) == 1:\n response = \"+OK POP3 server signing off\"\n client_socket.send(response.encode(\"utf-8\"))\n break\n else:\n response = \"-ERR Unknown command\"\n client_socket.send(response.encode(\"utf-8\"))\n except IndexError:\n response = \"-ERR Unknown command\"\n client_socket.send(response.encode(\"utf-8\"))\n #关闭客户端\n client_socket.close()\n\n #服务器发送邮件到服务器\n def smtpMailDeliver(self,mail):\n print(mail)\n mail_des = mail['To']\n hostip = \"127.0.0.1\"\n port = conmysql.des_port(mail_des) # 另一个服务器程序的端口号\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n flag = 0\n while flag != 1:\n try:\n cli_port = random.randint(50000, 59999)\n client_socket.bind((\"\", cli_port))\n flag = 1\n print(cli_port)\n except socket.error:\n print(\"端口号被占用\")\n flag = 0\n i = 0\n while i != 1:\n try:\n client_socket.connect((hostip, port))\n i = 1\n except socket.error:\n print(\"目的服务器程序未启动\")\n time.sleep(1800)\n client_socket.send(str(mail).encode(\"utf-8\"))\n client_socket.close()\n\n\n #服务器接收从服务器发送而来的邮件\n def smtpMailRecv(self,client_socket,client_address):\n mail_data = client_socket.recv(1024)\n print(\"From:[%s,%s]\" % client_address, end=\" \")\n print(mail_data.decode(\"utf-8\"))\n mail = eval(mail_data.decode(\"utf-8\"))\n response = \"250 ok\"\n client_socket.send(response.encode(\"utf-8\"))\n #将邮件存入数据库中\n conmysql.save_mail(mail)\n\n\n\nif __name__ == '__main__':\n mailServer = UserServer()\n mailServer.bind(58002)\n mailServer.start()\n\n\n\n\n", "sub_path": "MailServer/UserServera.py", "file_name": "UserServera.py", "file_ext": "py", "file_size_in_byte": 12528, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "socket.socket", "line_number": 13, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 13, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 13, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 31, "usage_type": "call"}, {"api_name": "conmysql.state_port", "line_number": 124, "usage_type": "call"}, {"api_name": "conmysql.user_identified", "line_number": 130, "usage_type": "call"}, {"api_name": "conmysql.user_identified", "line_number": 134, "usage_type": "call"}, {"api_name": "conmysql.state_port", "line_number": 189, "usage_type": "call"}, {"api_name": "conmysql.user_identified", "line_number": 193, "usage_type": "call"}, {"api_name": "conmysql.user_identified", "line_number": 198, "usage_type": "call"}, {"api_name": "conmysql.list_email", "line_number": 206, "usage_type": "call"}, {"api_name": "conmysql.cont_email", "line_number": 217, "usage_type": "call"}, {"api_name": "conmysql.dele_mail", "line_number": 225, "usage_type": "call"}, {"api_name": "conmysql.des_port", "line_number": 246, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 247, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 247, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 247, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 251, "usage_type": "call"}, {"api_name": "socket.error", "line_number": 255, "usage_type": "attribute"}, {"api_name": "socket.error", "line_number": 263, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 265, "usage_type": "call"}, {"api_name": "conmysql.save_mail", "line_number": 279, "usage_type": "call"}]} +{"seq_id": "385317718", "text": "# coding: utf-8\n#!/usr/bin/python3\n\"\"\"\nAuthors Jane Liu and Meng Li\nClasses:\n LDATokenizer: Tokenizes the text\n LDAPreprocessor: Cleans and prepares text for preprocessing and LDA.\n LDA: Gets the corpus of documents and determines the words that appear with the highest frequency\n LDAtopics: Outputs the results of the LDA algorithm to Ltopics.txt\n\n\"\"\"\n\n# preprocessing:\nimport string\nimport gensim\nfrom gensim import corpora\nfrom gensim.models.ldamodel import LdaModel\nimport pprint\nimport nltk\nfrom nltk.stem import WordNetLemmatizer\n\n\nclass LDATokenizer:\n def __init__(self, text):\n self._text = text\n self._tokenized_doc = []\n\n def LDAtokenize(self):\n for item in self._text:\n data = item.split()\n for i in data:\n i = i.lower()\n self._tokenized_doc.append(i)\n\n return self._tokenized_doc\n\n\nclass LDAPreprocessor:\n def __init__(self, article):\n self._article = article\n self._cleantext = []\n self._temptext = []\n\n def LDApreprocess(self):\n\n # Tokenize the text file\n self._temptext = LDATokenizer(self._article)\n self._cleantext = self._temptext.LDAtokenize()\n\n # Remove stop words\n with open('src/stopwords.txt', 'r') as g:\n stopwords = g.read().splitlines()\n\n for word in list(self._cleantext):\n if word in stopwords:\n self._cleantext.remove(word)\n\n # Remove punctuation and empty strings\n self._cleantext = [''.join(c for c in s if c not in string.punctuation) for s in self._cleantext]\n self._cleantext = [s for s in self._cleantext if s]\n\n # Lemmatize the text\n lemma_text = []\n lemmatizer = WordNetLemmatizer()\n\n for word in list(self._cleantext):\n new_word = lemmatizer.lemmatize(word)\n lemma_text.append(new_word)\n\n return lemma_text\n\n\n # LDA function:\n\n\nclass LDA:\n def __init__(self, textlist):\n self._textlists = textlist\n\n def get_lda(self):\n dictionary = corpora.Dictionary(self._textlists)\n\n corpus = [dictionary.doc2bow(text) for text in self._textlists]\n\n ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=1, id2word=dictionary, passes=20)\n\n a = ldamodel.print_topics(num_words=20)\n\n (num, topic) = a[0]\n\n return topic\n\n\nclass LDAtopics:\n def __init__(self, topics):\n self._topics = topics\n\n def Ltopics(self):\n with open('Ltopics.txt', 'w') as f:\n for item in self._topics:\n f.write(item)\n\n\n", "sub_path": "hw2_py/venv/ldapreprocessor.py", "file_name": "ldapreprocessor.py", "file_ext": "py", "file_size_in_byte": 2613, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "string.punctuation", "line_number": 59, "usage_type": "attribute"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 64, "usage_type": "call"}, {"api_name": "gensim.corpora.Dictionary", "line_number": 81, "usage_type": "call"}, {"api_name": "gensim.corpora", "line_number": 81, "usage_type": "name"}, {"api_name": "gensim.models.ldamodel.LdaModel", "line_number": 85, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 85, "usage_type": "attribute"}]} +{"seq_id": "468222364", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport smbus\nimport time\n\nimport spidev as SPI\nimport SSD1306\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nimport numpy as np\n\n# Raspberry Pi pin configuration:\nRST = 19\n# Note the following are only used with SPI:\nDC = 16\nbus = 0\ndevice = 0\n\n# 128x64 display with hardware SPI:\ndisp = SSD1306.SSD1306(RST, DC, SPI.SpiDev(bus,device))\n\n# Initialize library.\ndisp.begin()\n\n# Clear display.\ndisp.clear()\ndisp.display()\n\n# Create blank image for drawing.\n# Make sure to create image with mode '1' for 1-bit color.\nwidth = disp.width\nheight = disp.height\nimage = Image.new('1', (width, height))\nlogo = Image.open('pku_logo1.bmp').convert('1')\n#logo = Image.open('pku_logo1.png').convert('1')\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\n#draw.rectangle((0,0,width,height), outline=0, fill=0)\n\n# Draw some shapes.\n# First define some constants to allow easy resizing of shapes.\npadding = 1\ntop = padding\n#x = padding\nx = 80 \n# Load default font.\nfont = ImageFont.load_default()\n\n# Alternatively load a TTF font.\n# Some other nice fonts to try: http://www.dafont.com/bitmap.php\n#font = ImageFont.truetype('Minecraftia.ttf', 8)\n\ncurtime = '2018122'\n\n# Write two lines of text.\ndraw.text((x, top), 'cur time is ', font=font, fill=255)\ndraw.text((x, top+10), '-----------------',font=font, fill=255)\ndraw.text((x, top+20), curtime, font=font, fill=255)\ndraw.text((x, top+30), '-----------------',font=font, fill=255)\n\n# Display image.\ndisp.image(image)\ndisp.display()\n\n# dis time\naddress = 0x68\nregister = 0x00\n#sec min hour week day mout year\nNowTime = [0x00,0x00,0x18,0x03,0x16,0x01,0x18]\nw = [\"SUN\",\"Mon\",\"Tues\",\"Wed\",\"Thur\",\"Fri\",\"Sat\"];\n#/dev/i2c-1\nbus = smbus.SMBus(1)\ndef ds3231SetTime():\n bus.write_i2c_block_data(address,register,NowTime)\n\ndef ds3231ReadTime():\n return bus.read_i2c_block_data(address,register,7);\n\n\"\"\"\nweek = 0\nx0 = 0\ny0 = 0\nx1 = x0+1\ny1 = y0+1\nx_mid = x0 + (width-x)/2\na = 1\nprint(width,x0,x_mid)\nwhile 1 :\n draw.rectangle((0,0,width,height), outline=0, fill=0)\n# draw.bitmap((0,0),logo,fill=1)\n# draw.text((x, top+20), w[week], font=font, fill=255)\n# draw.line((x,top+20,x+20,top+40),fill=255,width=1)\n while x1 < disp.width:\n draw.line((x0,y0,x1,y1),fill=255,width=1)\n x0 = x1\n y0 = y1\n x1 = x1 + 1\n y1 = y1 + a*1\n\n if x1 < x_mid :\n a = 1\n else :\n a = -1\n\n disp.image(image)\n disp.display()\n break\n\ntime.sleep(1)\n\"\"\"\npi = 3.14\nx0 = np.linspace(0,pi*2,128)\nx1 = np.linspace(0,127,128)\ny0 = 30 - np.round(np.sin(x0)*30)\nprint(x1)\nprint(y0)\nprint(x1[0],x1[127])\ndraw.rectangle((0,0,width,height), outline=0, fill=0)\nfor i in range(126):\n# print(x1(i),y0(i),x1(i+1),y0(i+1))\n draw.line((x1[i],y0[i],x1[i+1],y0[i+1]),fill=255,width=1)\ndisp.image(image)\ndisp.display()\n", "sub_path": "lab_4/test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 2939, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "SSD1306.SSD1306", "line_number": 23, "usage_type": "call"}, {"api_name": "spidev.SpiDev", "line_number": 23, "usage_type": "call"}, {"api_name": "PIL.Image.new", "line_number": 36, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 36, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 37, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 37, "usage_type": "name"}, {"api_name": "PIL.ImageDraw.Draw", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.ImageDraw", "line_number": 41, "usage_type": "name"}, {"api_name": "PIL.ImageFont.load_default", "line_number": 53, "usage_type": "call"}, {"api_name": "PIL.ImageFont", "line_number": 53, "usage_type": "name"}, {"api_name": "smbus.SMBus", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "467165006", "text": "#This is a python script to inject an artificial transit signal.\nimport os\nimport pyfits\nimport matplotlib.pyplot as plt\nimport numpy as np\n#functions is another python file\nimport functions as f\n\nkplr_id = '006116605'\nkplr_file = 'kplr006116605-2009259160929_llc.fits'\n\njdadj, obsobject, lightdata = f.openfile(kplr_id, kplr_file)\ntime, flux, flux_err = f.fix_data(lightdata)\nflux, variance = f.rescale(flux, flux_err)\ntime -= np.median(time)\n\nperiod = 300.00\noffset = 20.0\ndepth = 0.008\nwidth = 0.09\n\nflux = f.raw_injection(period,offset,depth,width,time,flux)\n\noffset_interval = np.arange(0.00, 30.00, 0.01)\nchi2 = [f.sum_chi_squared(flux, f.box(period, o, depth, 0.09, time), variance) for o in offset_interval]\nbest_offset = offset_interval[np.argmin(chi2)]\n\nfig1 = plt.figure()\nsub1 = fig1.add_subplot(121)\nsub1.plot(time ,flux, color=\"black\", marker=\",\", linestyle = 'None')\nsub1.plot(time , f.box(period, best_offset, depth, 0.9, time), 'r')\nxlab = \"Time (days, Kepler Barycentric Julian date - %s)\"%jdadj\nsub1.set_xlabel(xlab)\nsub1.set_ylabel(\"Relative Brightness (electron flux)\")\nplottitle=\"Light Curve for %s\"%obsobject\nsub1.set_title(plottitle)\n\nsub2 = fig1.add_subplot(122)\nsub2.plot(offset_interval, chi2, 'b')\nsub2.ticklabel_format(style = 'sci')\nxlab2 = 'Offset (days)'\nsub2.set_xlabel(xlab2)\nylab = r'$\\chi^2$'\nsub2.set_ylabel(ylab)\ntitle = r'$\\chi^2 = \\sum_{i = 1}^N \\frac{(D_i - M_i)^2}{\\sigma^2_i}$'\nsub2.set_title(title)\n\n# period_interval = np.linspace(15.00, 22.00, 300)\n# offset_interval = np.linspace(0.0, 22.00, 600)\n\n# #Change to numpy arrays to optimize.\n# z = [[f.sum_chi_squared(flux, f.box(p,o,depth,width,time),variance) for o in offset_interval]\n# for p in period_interval]\n\n# z = np.asarray(z)\n# plt.imshow(z, cmap = 'gray', extent = [offset_interval[0], offset_interval[-1], period_interval[0], period_interval[-1]], origin = 'lower', interpolation='nearest')\n# plt.colorbar()\n# plt.xlabel('Offset (days)')\n# plt.ylabel('Period (days)')\n# plt.show()\n\nplt.show()\n", "sub_path": "nonmainprograms/injection.py", "file_name": "injection.py", "file_ext": "py", "file_size_in_byte": 1998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "functions.openfile", "line_number": 12, "usage_type": "call"}, {"api_name": "functions.fix_data", "line_number": 13, "usage_type": "call"}, {"api_name": "functions.rescale", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 15, "usage_type": "call"}, {"api_name": "functions.raw_injection", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 24, "usage_type": "call"}, {"api_name": "functions.sum_chi_squared", "line_number": 25, "usage_type": "call"}, {"api_name": "functions.box", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "functions.box", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "379393233", "text": "import pandas as pd\nimport datetime as dt\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport urllib.request as webInfo\nimport requests\nfrom bs4 import BeautifulSoup\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Section 2\ndef financial_table(ticker, end_diff_format, req_headers):\n financials = 'https://finance.yahoo.com/quote/' + ticker + '/financials?p=' + ticker\n with requests.Session() as s:\n url = financials\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'lxml')\n\n all_titls = soup_is.find_all('div', class_='D(tbr)')\n if (len(all_titls) == 0):\n print(\"Data for {} is currently unavailable, plrease try again later!\".format(ticker))\n return pd.DataFrame()\n\n column_titles = []\n temp_list = []\n final = []\n index = 0\n for title in all_titls[0].find_all('div', class_='D(ib)'):\n column_titles.append(title.text)\n while index <= len(all_titls)-1:\n temp = all_titls[index].find_all('div', class_='D(tbc)')\n for line in temp:\n temp_list.append(line.text)\n final.append(temp_list)\n temp_list = []\n index+=1\n\n df = pd.DataFrame(final[1:])\n df.columns = column_titles\n df = df[:-5]\n df = df.T\n\n new_header = df. iloc[0]\n df = df[1:]\n df.columns = new_header\n\n df_1 = df.rename(columns = {'Breakdown': 'Year'}, inplace = False)\n df_1.index.name = '' # Remove the index name\n df_1.rename(index={'ttm': end_diff_format},inplace=True) #Rename ttm in index columns to end of the year\n df_1 = df_1.reset_index()\n df_1 = df_1.rename(columns = {'': 'Year'}, inplace = False)\n df_1[\"Year\"] = df_1[\"Year\"].apply(lambda x: dt.datetime.strptime(str(x), '%m/%d/%Y'))\n df_1[\"Year\"] = df_1[\"Year\"].apply(lambda x: x.year)\n return df_1\n\n# Section 3\ndef sector_industry(ticker, req_headers):\n sector_var = \"\"\n industry_var = \"\"\n\n # Sector\n sector = 'https://finance.yahoo.com/quote/' + ticker + '/profile?p=' + ticker\n\n with requests.Session() as s:\n url = sector\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n chooser = 0\n for g in soup_is.find_all('p', attrs={'class': 'D(ib) Va(t)'}):\n for spans in g.find_all('span'):\n if (chooser == 0):\n chooser += 1\n continue;\n elif (chooser == 1):\n chooser += 1\n sector_var = spans.text\n continue;\n elif (chooser == 2):\n chooser += 1\n continue;\n elif (chooser == 3):\n chooser += 1\n industry_var = spans.text\n continue;\n return [sector_var, industry_var]\n\n# Section 4\ndef roe_roa(ticker, req_headers):\n ## ROE\n key_stats = 'https://finance.yahoo.com/quote/' + ticker + '/key-statistics?p=' + ticker\n roa = \"\"\n roe = \"\"\n\n with requests.Session() as s:\n url = key_stats\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n counter = 0\n finding = soup_is.find('div', attrs={'class': 'Mb(10px) Pend(20px) smartphone_Pend(0px)'})\n if (finding is None): return ['0%','0%']\n for each in finding.find_all('td', attrs={'class': \"Fw(500) Ta(end) Pstart(10px) Miw(60px)\"}):\n if (counter == 0):\n counter+=1\n continue\n elif (counter == 1):\n counter+=1\n continue\n elif (counter == 2):\n counter+=1\n continue\n elif (counter == 3):\n counter+=1\n continue\n elif (counter == 4):\n counter+=1\n roa = each.text\n continue\n elif (counter == 5):\n counter+=1\n roe = each.text\n continue\n else:\n break\n return [roa, roe]\n\n# Section 5.1\ndef four_year_increasing(df, title, globalResultsDict):\n df_small = df[[\"Year\", title]][::-1]\n df_small[title] = df_small[title].apply(lambda x: float(x.replace(',', '')))\n\n plt.bar(df_small[\"Year\"], df_small[title])\n\n m, b = np.polyfit(df_small[\"Year\"], df_small[title], 1)\n plt.plot(df_small[\"Year\"], m * df_small[\"Year\"] + b, 'black')\n\n globalResultsDict[title] = \"Sell\" # sell signal\n if m > 0:\n globalResultsDict[title] = \"Buy\"\n\n plt.title(title)\n plt.xlabel(\"Years\")\n plt.savefig(\"{}.png\".format(title), transparent=True)\n plt.close()\n\n# Section 5.2\ndef four_year_increasing_noTTM(df, title, globalResultsDict):\n df_small = df[[\"Year\", title]][1:5]\n df_small = df_small[::-1]\n df_small[title] = df_small[title].apply(lambda x: float(x.replace(',', '')))\n\n plt.bar(df_small[\"Year\"], df_small[title])\n\n m, b = np.polyfit(df_small[\"Year\"], df_small[title], 1)\n plt.plot(df_small[\"Year\"], m * df_small[\"Year\"] + b, 'black')\n\n globalResultsDict[title] = \"Sell\" # sell signal\n if m > 0:\n globalResultsDict[title] = \"Buy\"\n\n plt.title(title)\n plt.xlabel(\"Years\")\n plt.savefig(\"{}.png\".format(title), transparent=True)\n plt.close()\n\n# Section 5.3\ndef four_year_decreasing(df, title, globalResultsDict):\n df_small = df[[\"Year\", title]][::-1]\n df_small[title] = df_small[title].apply(lambda x: float(x.replace(',', '')))\n\n plt.bar(df_small[\"Year\"], df_small[title])\n\n m, b = np.polyfit(df_small[\"Year\"], df_small[title], 1)\n plt.plot(df_small[\"Year\"], m * df_small[\"Year\"] + b, 'black')\n\n globalResultsDict[title] = \"Sell\" # sell signal\n if m < 0:\n globalResultsDict[title] = \"Buy\"\n\n plt.title(title)\n plt.xlabel(\"Years\")\n plt.savefig(\"{}.png\".format(title), transparent=True)\n plt.close()\n\n# Section 6\n# Getting competitors\ndef competitor_func(ticker, req_headers):\n competitors = 'https://csimarket.com/stocks/competitionNO3.php?code={}'.format(ticker)\n with requests.Session() as s:\n url = competitors\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n competitor_list = []\n for v_1 in soup_is.find_all('table', attrs={'class': 'osnovna_tablica_bez_gifa'}):\n for v_2 in v_1.find_all('tr', attrs={'onmouseover': \"this.className='bgplv'\"}):\n for v_3 in v_2.find_all('td', attrs={'class': \"plavat svjetlirub dae al\"}):\n competitor_list.append(v_3.text)\n\n #COMPARISONS\n watchList_comps = competitor_list[0:5]\n watchList_comps.append(ticker)\n\n compare_list = ['Trailing P/E', 'Forward P/E', 'PEG Ratio (5 yr expected)', 'Price/Sales (ttm)', 'Price/Book (mrq)', 'Enterprise Value/Revenue', 'Enterprise Value/EBITDA', 'Return on Assets(%)', 'Return on Equity(%)']\n competitor_df = pd.DataFrame(compare_list)\n\n for i in watchList_comps:\n comparisons = 'https://finance.yahoo.com/quote/' + i + '/key-statistics?p=' + i\n with requests.Session() as s:\n url = comparisons\n try:\n r = s.get(url, headers=req_headers)\n except:\n time.sleep(10)\n r = s.get(url, headers=req_headers)\n\n soup_is = BeautifulSoup(r.content, 'html.parser')\n data_list = []\n ent_skipper = 0\n for g in soup_is.find_all('tr', attrs={'class': 'Bxz(bb) H(36px) BdB Bdbc($seperatorColor) fi-row Bgc($hoverBgColor):h'}):\n if (len(g) == 0): break\n for forward_pe in g.find_all('td', attrs={'class': \"Fw(500) Ta(end) Pstart(10px) Miw(60px)\"}):\n data_stock = forward_pe.text\n if(ent_skipper == 0):\n ent_skipper+=1\n break\n if(data_stock == \"N/A\"):\n data_stock = '0';\n data_list.append(float(data_stock.replace(',','')))\n roa_roe_list = roe_roa(i, req_headers)\n data_list.append(float(roa_roe_list[0].replace('N/A','0').replace('%','')))\n data_list.append(float(roa_roe_list[1].replace('N/A','0').replace('%','')))\n\n if (len(competitor_df) != len(data_list)):\n continue;\n\n competitor_df[i] = data_list\n\n competitor_df[\"Peer Average\"] = competitor_df.mean(axis=1).apply(lambda x: round(x, 2))\n competitor_df[\"Peer Median\"] = competitor_df.median(axis=1).apply(lambda x: round(x, 2))\n return competitor_df\n\n# Section 7\ndef news_df_create(ticker, req_headers):\n news = 'https://www.marketwatch.com/investing/stock/' + ticker\n with requests.Session() as s:\n url = news\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n news_list = []\n for roe in soup_is.find_all('div', attrs={'class': 'collection__elements j-scrollElement'}):\n for ro_1 in roe.find_all('div'):\n for a in ro_1.find_all('a', attrs={'class': 'figure__image'}):\n\n img = a.find('img', alt=True)\n if (img is not None and img['alt'] == 'Read full story'):\n news_list.append(a['href'])\n\n # Headlines\n headlines = []\n relevant_h = news_list[0:5];\n for article in relevant_h:\n with requests.Session() as s:\n url = article\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n for headline in soup_is.find_all('h1', attrs={'class': 'article__headline'}):\n headlines.append(headline.text)\n\n # DF\n news_df = pd.DataFrame()\n news_df[\"Headline\"] = headlines\n news_df[\"Article Link\"] = relevant_h\n news_df[\"Positivity Score\"] = np.array(1)\n\n news_df[\"Headline\"] = news_df[\"Headline\"].apply(lambda x: x.replace('\\n', \"\"))\n news_df[\"Article Link\"] = news_df[\"Article Link\"].apply(\n lambda x: \"{}\".format(x, x))\n\n return news_df\n\ndef insider_df_creation(ticker, req_headers):\n insiders = 'http://openinsider.com/screener?s={}&o=&pl=&ph=&ll=&lh=&fd=730&fdr=&td=0&tdr=&fdlyl=&fdlyh=&daysago=&xp=1&xs=1&vl=&vh=&ocl=&och=&sic1=-1&sicl=100&sich=9999&grp=0&nfl=&nfh=&nil=&nih=&nol=&noh=&v2l=&v2h=&oc2l=&oc2h=&sortcol=0&cnt=100&page=1'.format(\n ticker)\n with requests.Session() as s:\n url = insiders\n try:\n r = s.get(url, headers=req_headers)\n except:\n time.sleep(5)\n r = s.get(url, headers=req_headers)\n\n soup_is = BeautifulSoup(r.content, 'html.parser')\n insider_columns = ['Filing Date', 'Trade Date', 'Ticker', 'Insider Name', 'Title', 'Trade Type', 'Price', 'Qty',\n 'Owned', 'Own', 'Value']\n\n insider_df = pd.DataFrame(columns=insider_columns)\n skip = 0\n for trades in soup_is.find_all('table', attrs={'class': 'tinytable'}):\n for trs in trades.find_all('tr'):\n curr_list = []\n for tds in trs.find_all('td'):\n if (len(tds.text) > 1):\n curr_list.append(tds.text)\n if (len(curr_list) == len(insider_columns)):\n insider_df.loc[len(insider_df)] = curr_list\n\n indices_to_remove = []\n for idx, i in insider_df.iterrows():\n if (str(i['Ticker']).replace(' ', '') != ticker):\n indices_to_remove.append(idx)\n\n insider_df = insider_df.drop(indices_to_remove)\n insider_df[\"Trade Date\"] = insider_df[\"Trade Date\"].apply(lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))\n insider_df_curr_year = insider_df[insider_df[\"Trade Date\"].apply(lambda x: x.year == dt.datetime.now().year)]\n if(insider_df_curr_year.empty):\n insider_df_curr_year = insider_df\n insider_df_curr_year['Qty'] = insider_df_curr_year['Qty'].apply(lambda x: round(float(x.replace(',', '')), 2))\n insider_df_curr_year.loc[insider_df_curr_year['Qty'] <= 0, 'Type'] = 'Sale'\n insider_df_curr_year.loc[insider_df_curr_year['Qty'] > 0, 'Type'] = 'Buy'\n insider_df_curr_year_small = insider_df_curr_year[\n [\"Trade Date\", \"Insider Name\", \"Title\", \"Type\", \"Price\", \"Qty\", \"Owned\", \"Own\", \"Value\"]]\n insider_df_curr_year_small = insider_df_curr_year_small[0:10]\n return insider_df_curr_year_small\n\ndef etf_exposure_create(ticker, req_headers):\n etf_exposure = 'https://etfdb.com/stock/{}/'.format(ticker)\n with requests.Session() as s:\n url = etf_exposure\n r = s.get(url, headers=req_headers)\n soup_is = BeautifulSoup(r.content, 'html.parser')\n\n ticker_list = []\n etf_name_list = []\n category_list = []\n expense_ratio_list = []\n weightage_list = []\n for etf_finder in soup_is.find_all('table', attrs={\n 'class': 'table mm-mobile-table table-module2 table-default table-striped table-hover table-pagination'}):\n for tbody in etf_finder.find_all('tbody'):\n for tr in tbody.find_all('tr'):\n if (tr.find(\"td\", attrs={'data-th': 'Ticker'}).a is not None):\n ticker = tr.find(\"td\", attrs={'data-th': 'Ticker'}).a.text\n else:\n ticker = tr.find(\"td\", attrs={'data-th': 'Ticker'}).text\n if (tr.find(\"td\", attrs={'data-th': 'ETF'}).a is not None):\n etf_name = tr.find(\"td\", attrs={'data-th': 'ETF'}).a.text\n else:\n etf_name = tr.find(\"td\", attrs={'data-th': 'ETF'}).text\n if (tr.find(\"td\", attrs={'data-th': 'ETFdb.com Category'}).a is not None):\n category = tr.find(\"td\", attrs={'data-th': 'ETFdb.com Category'}).a.text\n else:\n category = tr.find(\"td\", attrs={'data-th': 'ETFdb.com Category'}).text\n if (tr.find(\"td\", attrs={'data-th': 'Expense Ratio'}).a is not None):\n expense_ratio = tr.find(\"td\", attrs={'data-th': 'Expense Ratio'}).a.text\n else:\n expense_ratio = tr.find(\"td\", attrs={'data-th': 'Expense Ratio'}).text\n if (tr.find(\"td\", attrs={'data-th': 'Weighting'}).a is not None):\n weightage = tr.find(\"td\", attrs={'data-th': 'Weighting'}).a.text\n else:\n weightage = tr.find(\"td\", attrs={'data-th': 'Weighting'}).text\n ticker_list.append(ticker)\n etf_name_list.append(etf_name)\n category_list.append(category)\n expense_ratio_list.append(expense_ratio)\n weightage_list.append(weightage)\n\n ETF_exposure_df = pd.DataFrame()\n ETF_exposure_df['Ticker'] = ticker_list\n ETF_exposure_df['ETF Name'] = etf_name_list\n ETF_exposure_df['Category'] = category_list\n ETF_exposure_df['Expense Ratio'] = expense_ratio_list\n ETF_exposure_df['Weightage'] = weightage_list\n\n ETF_exposure_df_10 = ETF_exposure_df[0:10]\n return ETF_exposure_df_10\n\ndef analytics_helper(avg, med, switcher_avg, switcher_med):\n if (switcher_avg == \"greater\" and switcher_med == \"greater\" and avg > 100 or med > 100): return \"Buy (But too high - kinda risky, kinda sus)\"\n if (switcher_avg == \"greater\" and switcher_med == \"greater\"): return \"Buy\"\n if (switcher_avg == \"lesser\" and switcher_med == \"greater\" and avg < 2 and med > 5): return \"Buy\"\n if (switcher_avg == \"greater\" and switcher_med == \"lesser\" and avg > 5 and med < 2): return \"Buy\"\n return \"Sell\"\n\ndef competitor_analysis(df, ticker):\n new_df = df[[ticker, \"Peer Average\", \"Peer Median\"]]\n information_list = ['Trailing P/E', 'Forward P/E', 'PEG Ratio (5 yr expected)', 'Price/Sales (ttm)', 'Price/Book (mrq)','Enterprise Value/Revenue', 'Enterprise Value/EBITDA', 'Return on Assets(%)', 'Return on Equity(%)']\n results_list = []\n\n switcher_avg = \"\"\n switcher_med = \"\"\n for i in range(len(information_list)):\n new = new_df.at[i,ticker]\n old_avg = new_df.at[i,\"Peer Average\"]\n old_median = new_df.at[i, \"Peer Median\"]\n avg_per = (new - old_avg)/(old_avg) * 100\n median_per = (new - old_median) / (old_median) * 100\n if (avg_per < 0):\n switcher_avg = \"lesser\"\n elif (avg_per >= 0):\n switcher_avg = \"greater\"\n if (median_per < 0):\n switcher_med = \"lesser\"\n elif (median_per >= 0):\n switcher_med = \"greater\"\n results_list.append(\"{} {} is {}% {} than industry average, and {}% {} than industry median --> {}\".format(ticker, information_list[i], round(abs(avg_per),2), switcher_avg, round(abs(median_per),2), switcher_med, analytics_helper(abs(avg_per), abs(median_per), switcher_avg, switcher_med)))\n\n return results_list\n", "sub_path": "MHF_fundamentals.py", "file_name": "MHF_fundamentals.py", "file_ext": "py", "file_size_in_byte": 16532, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "warnings.filterwarnings", "line_number": 10, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 18, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 53, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 65, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 68, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 96, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 136, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 163, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 163, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 165, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 165, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 166, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "numpy.polyfit", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 182, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 182, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "requests.Session", "line_number": 191, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 194, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 207, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 211, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 216, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 219, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 248, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 251, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 266, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 269, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 278, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 289, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 294, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 297, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 301, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 318, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 318, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 319, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 319, "usage_type": "attribute"}, {"api_name": "requests.Session", "line_number": 332, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 335, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 372, "usage_type": "call"}]} +{"seq_id": "25905161", "text": "import json\nimport logging\nimport os\nimport re\nimport time\n\nfrom future.backports.urllib.parse import urlparse, unquote_plus, quote_plus\n\nfrom oic.federation.file_system import FileSystem\nfrom oic.oauth2.base import PBase\n\nfrom oic.federation.bundle import JWKSBundle\n\nfrom oic.utils.keyio import KeyJar\nfrom oic.federation import ClientMetadataStatement\nfrom oic.federation.operator import Operator\n\n__author__ = 'roland'\n\nlogger = logging.getLogger(__name__)\n\n\nclass FederationEntity(Operator):\n def __init__(self, srv, jwks_file=None, iss='', keyjar=None,\n signed_metadata_statements_dir='.', fo_bundle=None,\n ms_cls=ClientMetadataStatement):\n\n if jwks_file:\n keyjar = self.read_jwks_file(jwks_file)\n\n Operator.__init__(self, iss=iss, keyjar=keyjar, httpcli=srv)\n\n # FO keys\n self.fo_bundle = fo_bundle\n\n # Signed metadata statements\n self.signed_metadata_statements = FileSystem(\n signed_metadata_statements_dir,\n key_conv={'to': quote_plus, 'from': unquote_plus})\n self.signed_metadata_statements.sync()\n\n self.ms_cls = ms_cls\n\n def read_jwks_file(self, jwks_file):\n _jwks = open(jwks_file, 'r').read()\n _kj = KeyJar()\n _kj.import_jwks(json.loads(_jwks), '')\n return _kj\n\n def pick_by_priority(self, req, priority=None):\n if not priority:\n return req.values()[0] # Just return any\n\n for iss in priority:\n try:\n return req[iss]\n except KeyError:\n pass\n return None\n\n def pick_signed_metadata_statements(self, pattern):\n \"\"\"\n Pick signed metadata statements based on ISS pattern matching\n :param pattern: A regular expression to match the iss against\n :return: list of signed metadata statements\n \"\"\"\n comp_pat = re.compile(pattern)\n res = []\n for iss, vals in self.signed_metadata_statements.items():\n if comp_pat.search(iss):\n res.extend(vals)\n return res\n\n def get_metadata_statement(self, json_ms):\n \"\"\"\n Unpack and evaluate a compound metadata statement\n :param json_ms: The metadata statement as a JSON document\n :return: A dictionary with metadata statements per FO\n \"\"\"\n _cms = self.unpack_metadata_statement(json_ms=json_ms,\n cls=self.ms_cls)\n ms_per_fo = self.evaluate_metadata_statement(_cms)\n\n return ms_per_fo\n", "sub_path": "src/oic/federation/entity.py", "file_name": "entity.py", "file_ext": "py", "file_size_in_byte": 2565, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "oic.federation.operator.Operator", "line_number": 23, "usage_type": "name"}, {"api_name": "oic.federation.ClientMetadataStatement", "line_number": 26, "usage_type": "name"}, {"api_name": "oic.federation.operator.Operator.__init__", "line_number": 31, "usage_type": "call"}, {"api_name": "oic.federation.operator.Operator", "line_number": 31, "usage_type": "name"}, {"api_name": "oic.federation.file_system.FileSystem", "line_number": 37, "usage_type": "call"}, {"api_name": "future.backports.urllib.parse.quote_plus", "line_number": 39, "usage_type": "name"}, {"api_name": "future.backports.urllib.parse.unquote_plus", "line_number": 39, "usage_type": "name"}, {"api_name": "oic.utils.keyio.KeyJar", "line_number": 46, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 47, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "122979101", "text": "from dotenv import load_dotenv\nload_dotenv()\n\nimport argparse\nimport os\nimport requests\nfrom urllib.parse import urlparse\n\nTOKEN= os.getenv('BITLY_TOKEN')\n\nURLS= {\n 'base_url': 'https://api-ssl.bitly.com/v4/',\n 'short_url': 'bitlinks/',\n 'count_clicks': '/clicks/summary'\n}\nHEADER= {'Authorization': TOKEN}\n\ndef shorten_link(url):\n payload= {'long_url': url}\n response= requests.post(\n f\"{URLS['base_url']}{URLS['short_url']}\",\n headers= HEADER,\n json= payload\n )\n response.raise_for_status()\n bitlink= response.json() \n return bitlink['id']\n\ndef get_arguments():\n parser= argparse.ArgumentParser('программа выведет короткую ссылку')\n parser.add_argument('link', help='link')\n args= parser.parse_args()\n return args.link\n\ndef summary_clicks(bitly_link): \n payload= {'units': -1}\n response= requests.get(\n f\"{URLS['base_url']}{URLS['short_url']}{bitly_link}{URLS['count_clicks']}\",\n params= payload,\n headers= HEADER\n )\n response.raise_for_status()\n bitlink= response.json()\n return bitlink['total_clicks']\n\ndef check_link(url,bitly_link):\n if not check_bitly(bitly_link):\n try: \n bitlink= shorten_link(url) \n print('сокращенная ссылка: ',bitlink)\n print('количество кликов: ', summary_clicks(bitlink))\n except requests.exceptions.HTTPError: \n print('неверная ссылка1')\n exit()\n else:\n try:\n bitlink= summary_clicks(bitly_link)\n print('количество кликов: ',bitlink)\n except requests.exceptions.HTTPError:\n print('1неверная ссылка')\n exit()\n\ndef check_bitly(url):\n response= requests.get(\n f\"{URLS['base_url']}{URLS['short_url']}{url}\",\n headers= HEADER\n ) \n return response.ok\n\ndef main():\n # url= input('введите ссылку:\\n')\n url= get_arguments()\n parsed_url= urlparse(url)\n bitly_link= parsed_url.netloc+ parsed_url.path \n check_link(url, bitly_link)\n\nif __name__ == '__main__': \n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2229, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 2, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 20, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 37, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 52, "usage_type": "attribute"}, {"api_name": "requests.exceptions", "line_number": 59, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 64, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "604828098", "text": "from ..lib.csvUtilities import csvResponse\nfrom ..modules.fund_module import fund\nfrom ..modules.portfolio_module import portfolio\nfrom ..exposure.categorization.categorization import CategoryName\nfrom .tables.exposureReportTables import CategoryExposureTable, ManagerCategoryExposureTable, StrategyCategoryExposureTable\n###################################################################################\nclass ExposureReport(fund,csvResponse):\n \n categoryNames = [CategoryName.Strategy,CategoryName.RCGGeoBucket, CategoryName.RCGCustomAssetClass, CategoryName.RCGCustomInstrument, CategoryName.Sector]\n def __init__(self,fund_id,snapshot_date):\n\n self.fund_id = fund_id\n self.snapshot_date = snapshot_date\n self.categoryObjects = []\n\n self.dataSets = [] ### Series of Data Tables\n self.report_data = [] ### Array Format - Used by csvUtilities to Create CSV Response\n self.arrayName = 'report_data' ## For CSV Response\n \n self.reportName = None\n self.numSpaceRows = 3\n self.portfolioNames = {}\n self.portfolioStrategies = {}\n \n fund.__init__(self,self.fund_id,self.snapshot_date)\n self.filename = str(self.fund_name.split(' ')[0])+'.csv'\n ### Get Portfolio Name for Report CSV\n self.reportName = self.fund_name\n csvResponse.__init__(self)\n\n ### Applies Spacing\n def space(self):\n for i in range(self.numSpaceRows):\n self.report_data.append([])\n return\n\n ### Generates Data Tables for Each Category\n def generate(self):\n \n self.run()\n self.report_data.append([self.fund_name,' ',self.snapshot_date.strftime(\"%Y-%m-%d\")])\n \n managerCategory = self.categories[CategoryName.Manager.name]\n managerTable = ManagerCategoryExposureTable(self,managerCategory)\n managerTable.generate() \n\n self.dataSets.append(managerTable.data)\n self.report_data.extend(managerTable.data)\n self.space()\n\n ### Generate Data Table for Each Category\n for categoryName in ExposureReport.categoryNames:\n ### Find Categorization Object from Portfolio\n category = self.categories[categoryName.name]\n \n dataTable = CategoryExposureTable(self,category)\n if categoryName == CategoryName.Strategy:\n dataTable = StrategyCategoryExposureTable(self,category)\n dataTable.generate()\n \n self.dataSets.append(dataTable.data)\n self.report_data.extend(dataTable.data)\n self.space()\n return\n\n#########################################################################\nclass ManagerExposureReport(portfolio,csvResponse):\n \n categoryNames = [CategoryName.RCGGeoBucket, CategoryName.RCGCustomAssetClass, CategoryName.RCGCustomInstrument, CategoryName.Sector]\n def __init__(self,portfolio_id,snapshot_date):\n\n self.portfolio_id = portfolio_id\n self.snapshot_date = snapshot_date\n self.categoryObjects = []\n\n self.dataSets = [] ### Series of Data Tables\n self.report_data = [] ### Array Format - Used by csvUtilities to Create CSV Response\n self.arrayName = 'report_data' ## For CSV Response\n \n self.reportName = None\n self.numSpaceRows = 3\n\n portfolio.__init__(self,self.portfolio_id,self.snapshot_date)\n self.filename = str(self.portfolio_name.split(' ')[0])+'.csv'\n \n ### Get Portfolio Name for Report CSV\n self.reportName = self.portfolio_name\n csvResponse.__init__(self)\n return\n\n ### Applies Spacing\n def space(self):\n for i in range(self.numSpaceRows):\n self.report_data.append([])\n return\n\n ### Generates Data Tables for Each Category\n def generate(self):\n\n self.run()\n ### Give Dataset Top Row\n self.report_data.append([self.portfolio_name,' ',self.snapshot_date.strftime(\"%Y-%m-%d\")])\n\n ### Generate Set of Category Object for Each Category Name\n for categoryName in ManagerExposureReport.categoryNames:\n category = self.categories[categoryName.name]\n dataTable = CategoryExposureTable(self,category)\n dataTable.generate()\n \n self.dataSets.append(dataTable.data)\n self.report_data.extend(dataTable.data)\n self.space()\n\n return\n", "sub_path": "app/reporting/exposureReport.py", "file_name": "exposureReport.py", "file_ext": "py", "file_size_in_byte": 4439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "modules.fund_module.fund", "line_number": 7, "usage_type": "name"}, {"api_name": "lib.csvUtilities.csvResponse", "line_number": 7, "usage_type": "name"}, {"api_name": "exposure.categorization.categorization.CategoryName.Strategy", "line_number": 9, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName", "line_number": 9, "usage_type": "name"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGGeoBucket", "line_number": 9, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGCustomAssetClass", "line_number": 9, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGCustomInstrument", "line_number": 9, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName.Sector", "line_number": 9, "usage_type": "attribute"}, {"api_name": "modules.fund_module.fund.__init__", "line_number": 25, "usage_type": "call"}, {"api_name": "modules.fund_module.fund", "line_number": 25, "usage_type": "name"}, {"api_name": "lib.csvUtilities.csvResponse.__init__", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.csvUtilities.csvResponse", "line_number": 29, "usage_type": "name"}, {"api_name": "exposure.categorization.categorization.CategoryName.Manager", "line_number": 43, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName", "line_number": 43, "usage_type": "name"}, {"api_name": "tables.exposureReportTables.ManagerCategoryExposureTable", "line_number": 44, "usage_type": "call"}, {"api_name": "tables.exposureReportTables.CategoryExposureTable", "line_number": 56, "usage_type": "call"}, {"api_name": "exposure.categorization.categorization.CategoryName.Strategy", "line_number": 57, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName", "line_number": 57, "usage_type": "name"}, {"api_name": "tables.exposureReportTables.StrategyCategoryExposureTable", "line_number": 58, "usage_type": "call"}, {"api_name": "modules.portfolio_module.portfolio", "line_number": 67, "usage_type": "name"}, {"api_name": "lib.csvUtilities.csvResponse", "line_number": 67, "usage_type": "name"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGGeoBucket", "line_number": 69, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName", "line_number": 69, "usage_type": "name"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGCustomAssetClass", "line_number": 69, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName.RCGCustomInstrument", "line_number": 69, "usage_type": "attribute"}, {"api_name": "exposure.categorization.categorization.CategoryName.Sector", "line_number": 69, "usage_type": "attribute"}, {"api_name": "modules.portfolio_module.portfolio.__init__", "line_number": 83, "usage_type": "call"}, {"api_name": "modules.portfolio_module.portfolio", "line_number": 83, "usage_type": "name"}, {"api_name": "lib.csvUtilities.csvResponse.__init__", "line_number": 88, "usage_type": "call"}, {"api_name": "lib.csvUtilities.csvResponse", "line_number": 88, "usage_type": "name"}, {"api_name": "tables.exposureReportTables.CategoryExposureTable", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "402620787", "text": "#!/usr/bin/env python\n\"\"\"Implement client side components.\n\nClient components are managed, versioned modules which can be loaded at runtime.\n\"\"\"\nimport importlib\nimport logging\nimport os\nimport site\nimport StringIO\nimport zipfile\n\nfrom grr.client import actions\nfrom grr.lib import config_lib\nfrom grr.lib import utils\nfrom grr.lib.rdfvalues import client as rdf_client\nfrom grr.lib.rdfvalues import crypto as rdf_crypto\n\n\nclass LoadComponent(actions.ActionPlugin):\n \"\"\"Launches an external client action through a component.\"\"\"\n in_rdfvalue = rdf_client.LoadComponent\n out_rdfvalue = rdf_client.LoadComponent\n\n def LoadComponent(self, summary):\n \"\"\"Import all the required modules as specified in the request.\"\"\"\n for mod_name in summary.modules:\n logging.debug(\"Will import %s\", mod_name)\n importlib.import_module(mod_name)\n\n def Run(self, request):\n \"\"\"Load the component requested.\n\n The component defines a set of python imports which should be imported into\n the running program. The purpose of this client action is to ensure that the\n imports are available and of the correct version. We ensure this by:\n\n 1) Attempt to import the relevant modules.\n\n 2) If that fails checks for the presence of a component installed at the\n require path. Attempt to import the modules again.\n\n 3) If no component is installed, we fetch and install the component from the\n server. We then attempt to use it.\n\n If all imports succeed we return a success status, otherwise we raise an\n exception.\n\n Args:\n request: The LoadComponent request.\n\n Raises:\n RuntimeError: If the component is invalid.\n \"\"\"\n summary = request.summary\n # Just try to load the required modules.\n try:\n self.LoadComponent(summary)\n # If we succeed we just report this component is done.\n self.SendReply(request)\n return\n except ImportError:\n pass\n\n # Try to add an existing component path.\n component_path = utils.JoinPath(\n config_lib.CONFIG.Get(\"Client.component_path\"),\n summary.name, summary.version)\n\n # Add the component path to the site packages:\n site.addsitedir(component_path)\n\n try:\n self.LoadComponent(summary)\n logging.info(\"Component %s already present.\", summary.name)\n self.SendReply(request)\n return\n\n except ImportError:\n pass\n\n # Could not import component - will have to fetch it.\n logging.info(\"Unable to import component %s.\", summary.name)\n\n # Derive the name of the component that we need depending on the current\n # architecture. The client build system should have burned its environment\n # into the client config file. This is the best choice because it will\n # choose the same component that was built together with the client\n # itself (on the same build environment).\n build_environment = config_lib.CONFIG.Get(\"Client.build_environment\")\n if not build_environment:\n # Failing this we try to get something similar to the running system.\n build_environment = rdf_client.Uname.FromCurrentSystem().signature()\n\n url = \"%s/%s\" % (summary.url, build_environment)\n logging.info(\"Fetching component from %s\", url)\n crypted_data = self.grr_worker.http_manager.OpenServerEndpoint(url).data\n\n # Decrypt and check signature. The cipher is created when the component is\n # uploaded and contains the key to decrypt it.\n signed_blob = rdf_crypto.SignedBlob(summary.cipher.Decrypt(crypted_data))\n\n # Ensure the blob is signed with the correct key.\n signed_blob.Verify(config_lib.CONFIG[\n \"Client.executable_signing_public_key\"])\n\n component = rdf_client.ClientComponent(signed_blob.data)\n\n # Make sure its the component we actually want.\n if (component.summary.name != summary.name or\n component.summary.version != summary.version):\n raise RuntimeError(\"Downloaded component is not the correct version\")\n\n # Make intermediate directories.\n try:\n os.makedirs(component_path)\n except (OSError, IOError):\n pass\n\n # Unzip the component into the path.\n logging.info(\"Installing component to %s\", component_path)\n component_zip = zipfile.ZipFile(StringIO.StringIO(component.raw_data))\n component_zip.extractall(component_path)\n\n # Add the component to the site packages:\n site.addsitedir(component_path)\n\n # If this does not work now, we just fail.\n self.LoadComponent(summary)\n\n # If we succeed we just report this component is done.\n self.SendReply(request)\n", "sub_path": "client/client_actions/components.py", "file_name": "components.py", "file_ext": "py", "file_size_in_byte": 4543, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "grr.client.actions.ActionPlugin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "grr.client.actions", "line_number": 20, "usage_type": "name"}, {"api_name": "grr.lib.rdfvalues.client.LoadComponent", "line_number": 22, "usage_type": "attribute"}, {"api_name": "grr.lib.rdfvalues.client", "line_number": 22, "usage_type": "name"}, {"api_name": "grr.lib.rdfvalues.client.LoadComponent", "line_number": 23, "usage_type": "attribute"}, {"api_name": "grr.lib.rdfvalues.client", "line_number": 23, "usage_type": "name"}, {"api_name": "logging.debug", "line_number": 28, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 29, "usage_type": "call"}, {"api_name": "grr.lib.utils.JoinPath", "line_number": 66, "usage_type": "call"}, {"api_name": "grr.lib.utils", "line_number": 66, "usage_type": "name"}, {"api_name": "grr.lib.config_lib.CONFIG.Get", "line_number": 67, "usage_type": "call"}, {"api_name": "grr.lib.config_lib.CONFIG", "line_number": 67, "usage_type": "attribute"}, {"api_name": "grr.lib.config_lib", "line_number": 67, "usage_type": "name"}, {"api_name": "site.addsitedir", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 75, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 83, "usage_type": "call"}, {"api_name": "grr.lib.config_lib.CONFIG.Get", "line_number": 90, "usage_type": "call"}, {"api_name": "grr.lib.config_lib.CONFIG", "line_number": 90, "usage_type": "attribute"}, {"api_name": "grr.lib.config_lib", "line_number": 90, "usage_type": "name"}, {"api_name": "grr.lib.rdfvalues.client.Uname.FromCurrentSystem", "line_number": 93, "usage_type": "call"}, {"api_name": "grr.lib.rdfvalues.client.Uname", "line_number": 93, "usage_type": "attribute"}, {"api_name": "grr.lib.rdfvalues.client", "line_number": 93, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "grr.lib.rdfvalues.crypto.SignedBlob", "line_number": 101, "usage_type": "call"}, {"api_name": "grr.lib.rdfvalues.crypto", "line_number": 101, "usage_type": "name"}, {"api_name": "grr.lib.config_lib.CONFIG", "line_number": 104, "usage_type": "attribute"}, {"api_name": "grr.lib.config_lib", "line_number": 104, "usage_type": "name"}, {"api_name": "grr.lib.rdfvalues.client.ClientComponent", "line_number": 107, "usage_type": "call"}, {"api_name": "grr.lib.rdfvalues.client", "line_number": 107, "usage_type": "name"}, {"api_name": "os.makedirs", "line_number": 116, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 121, "usage_type": "call"}, {"api_name": "zipfile.ZipFile", "line_number": 122, "usage_type": "call"}, {"api_name": "StringIO.StringIO", "line_number": 122, "usage_type": "call"}, {"api_name": "site.addsitedir", "line_number": 126, "usage_type": "call"}]} +{"seq_id": "63669401", "text": "from yolo import YOLO\nfrom timeit import default_timer as timer\nfrom yolo3.utils import resize_image, resize_image_2\nfrom PIL import Image\nimport numpy as np\nimport keras.backend as K\nimport glob\nimport os\nimport os.path as osp\nfrom tqdm import tqdm, trange\nimport cv2\nimport h5py\nimport sys\nimport pickle\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n\ndef intersection_area(boxes1, boxes2, mode='outer_product', border_pixels='half'):\n \"\"\"\n Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.\n\n Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively. They must be in corner format.\n\n In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible combinations of the\n boxes in `boxes1` and `boxes2`.\n In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation of the `mode` argument\n for details.\n\n Arguments:\n boxes1 (np.array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the\n corner format or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.\n If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.\n boxes2 (np.array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the\n corner format or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.\n If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.\n mode (str, optional): Can be one of 'outer_product' and 'element-wise'.\n In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible combinations\n of the `m` boxes in `boxes1` with the `n` boxes in `boxes2`.\n In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2` must be\n broadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of length\n `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`.\n border_pixels (str, optional): How to treat the border pixels of the bounding boxes.\n Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong\n to the boxes. If 'exclude', the border pixels do not belong to the boxes.\n If 'half', then one of each of the two horizontal and vertical borders belong\n to the boxes, but not the other.\n\n Returns:\n A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with\n the intersection areas of the boxes in `boxes1` and `boxes2`.\n \"\"\"\n\n # Make sure the boxes have the right shapes.\n if boxes1.ndim > 2:\n raise ValueError(\"boxes1 must have rank either 1 or 2, but has rank {}.\".format(boxes1.ndim))\n if boxes2.ndim > 2:\n raise ValueError(\"boxes2 must have rank either 1 or 2, but has rank {}.\".format(boxes2.ndim))\n\n if boxes1.ndim == 1:\n boxes1 = np.expand_dims(boxes1, axis=0)\n if boxes2.ndim == 1:\n boxes2 = np.expand_dims(boxes2, axis=0)\n\n if not (boxes1.shape[1] == boxes2.shape[1] == 4):\n raise ValueError(\"All boxes must consist of 4 coordinates, \"\n \"but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.\"\n .format(boxes1.shape[1], boxes2.shape[1]))\n if mode not in {'outer_product', 'element-wise'}:\n raise ValueError(\"`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.\", format(mode))\n\n # The number of boxes in `boxes1`\n m = boxes1.shape[0]\n # The number of boxes in `boxes2`\n n = boxes2.shape[0]\n\n if border_pixels == 'half':\n d = 0\n # If border pixels are supposed to belong to the bounding boxes,\n # we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.\n elif border_pixels == 'include':\n d = 1\n # If border pixels are not supposed to belong to the bounding boxes,\n # we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.\n elif border_pixels == 'exclude':\n d = -1\n else:\n raise ValueError('`border_pixels` must be one of half, include and exclude')\n\n # Compute the intersection areas.\n if mode == 'outer_product':\n # For all possible box combinations, get the greater 0 and 1 values.\n # This is a tensor of shape (m,n,2).\n # np.expand_dims 先把 boxes 变成三维的, boxes1 变成 (m, 1, 2), boxes2 变成 (1, n, 2)\n # np.tile 把 boxes1 和 boxes2 变成相同的 shape (m, n, 2)\n # np.maximum 进行 element-wise 的比较\n min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [0, 1]], axis=1), reps=(1, n, 1)),\n np.tile(np.expand_dims(boxes2[:, [0, 1]], axis=0), reps=(m, 1, 1)))\n\n # For all possible box combinations, get the smaller 2 and 3 values.\n # This is a tensor of shape (m,n,2).\n max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [2, 3]], axis=1), reps=(1, n, 1)),\n np.tile(np.expand_dims(boxes2[:, [2, 3]], axis=0), reps=(m, 1, 1)))\n\n # Compute the side lengths of the intersection rectangles.\n side_lengths = np.maximum(0, max_xy - min_xy + d)\n # side_lengths[:, :, 0] 表示 width, side_lengths[:, :, 1] 表示 height, 相乘就表示 area\n return side_lengths[:, :, 0] * side_lengths[:, :, 1]\n\n elif mode == 'element-wise':\n # 假设此时 boxes1[:, [0, 1]] shape 是 (1, 2), boxes2[:, [0, 1]] 的 shape 是 (n, 2),\n # 在做 maximum, minimum 操作时, 先把 boxes1 广播, 变成 (n, 2) 在逐个比较每个位置上的元素, 返回结果的 shape 也是 (n, 2)\n min_xy = np.maximum(boxes1[:, [0, 1]], boxes2[:, [0, 1]])\n max_xy = np.minimum(boxes1[:, [2, 3]], boxes2[:, [2, 3]])\n\n # Compute the side lengths of the intersection rectangles.\n side_lengths = np.maximum(0, max_xy - min_xy + d)\n return side_lengths[:, 0] * side_lengths[:, 1]\n\n\ndef iou(boxes1, boxes2, mode='outer_product', border_pixels='half'):\n \"\"\"\n Computes the intersection-over-union similarity (also known as Jaccard similarity) of two sets of axis-aligned 2D\n rectangular boxes.\n\n Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.\n\n In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible combinations of the boxes in\n `boxes1` and `boxes2`.\n In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation of the `mode` argument\n for details.\n\n Arguments:\n boxes1 (np.array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the\n corner format or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.\n If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.\n boxes2 (np.array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the\n corner format or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.\n If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.\n mode (str, optional): Can be one of 'outer_product' and 'element-wise'.\n In 'outer_product' mode, returns an `(m,n)` matrix with the IoU overlaps for all possible combinations of\n the `m` boxes in `boxes1` with the `n` boxes in `boxes2`.\n In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2` must be\n broadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of\n length `m` where the i-th position contains the IoU overlap of `boxes1[i]` with `boxes2[i]`.\n border_pixels (str, optional): How to treat the border pixels of the bounding boxes.\n Can be 'include', 'exclude', or 'half'.\n If 'include', the border pixels belong to the boxes.\n If 'exclude', the border pixels do not belong to the boxes.\n If 'half', then one of each of the two horizontal and vertical borders belong to the boxes, but not the\n other.\n\n Returns:\n A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values in [0,1],\n the Jaccard similarity of the boxes in `boxes1` and `boxes2`.\n 0 means there is no overlap between two given boxes,\n 1 means their coordinates are identical.\n \"\"\"\n\n #########################################################################################\n # Check for arguments' validation\n #########################################################################################\n # Make sure the boxes have the right shapes.\n if boxes1.ndim > 2:\n raise ValueError(\"boxes1 must have rank either 1 or 2, but has rank {}.\".format(boxes1.ndim))\n if boxes2.ndim > 2:\n raise ValueError(\"boxes2 must have rank either 1 or 2, but has rank {}.\".format(boxes2.ndim))\n\n if boxes1.ndim == 1:\n boxes1 = np.expand_dims(boxes1, axis=0)\n if boxes2.ndim == 1:\n boxes2 = np.expand_dims(boxes2, axis=0)\n\n if not (boxes1.shape[1] == boxes2.shape[1] == 4):\n raise ValueError(\"All boxes must consist of 4 coordinates, \"\n \"but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.\".format(\n boxes1.shape[1], boxes2.shape[1]))\n if mode not in {'outer_product', 'element-wise'}:\n raise ValueError(\"`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.\".format(mode))\n\n #########################################################################################\n # Compute the IoU\n #########################################################################################\n\n # Compute the intersection areas.\n intersection_areas = intersection_area(boxes1, boxes2, mode=mode)\n\n # The number of boxes in `boxes1`\n m = boxes1.shape[0]\n # The number of boxes in `boxes2`\n n = boxes2.shape[0]\n\n # Compute the union areas.\n # Set the correct coordinate indices for the respective formats.\n if border_pixels == 'half':\n d = 0\n # If border pixels are supposed to belong to the bounding boxes,\n # we have to add one pixel to any difference `x_max - x_min` or `y_max - y_min`.\n elif border_pixels == 'include':\n d = 1\n # If border pixels are not supposed to belong to the bounding boxes,\n # we have to subtract one pixel from any difference `x_max - x_min` or `y_max - y_min`.\n elif border_pixels == 'exclude':\n d = -1\n else:\n raise ValueError('`border_pixels` must be one of half, include and exclude')\n\n if mode == 'outer_product':\n # 每一行 n 个相同的数, 表示 boxes1 中某个 box 的 area, 一共 m 行\n boxes1_areas = np.tile(\n np.expand_dims((boxes1[:, 2] - boxes1[:, 0] + d) * (boxes1[:, 3] - boxes1[:, 1] + d), axis=1),\n reps=(1, n))\n # 每一行 n 个不同的数, 表示 boxes2 中所有 boxes 的 area, m 行都相同\n boxes2_areas = np.tile(\n np.expand_dims((boxes2[:, 2] - boxes2[:, 0] + d) * (boxes2[:, 3] - boxes2[:, 1] + d), axis=0),\n reps=(m, 1))\n # mode == 'element-wise'\n else:\n # 假设 boxes1 的 shape 为 (1, 4) 那么 boxes1_areas 的 shape 是 (1,)\n # boxes2 的 shape 为 (n, 4), 那么 boxes2_areas 的 shape 就是 (n, )\n # 后面两者相加时做一次广播, 相加结果的 shape 也是 (n, )\n boxes1_areas = (boxes1[:, 2] - boxes1[:, 0] + d) * (boxes1[:, 3] - boxes1[:, 1] + d)\n boxes2_areas = (boxes2[:, 2] - boxes2[:, 0] + d) * (boxes2[:, 3] - boxes2[:, 1] + d)\n\n # boxes1_areas + boxes2_area 的 shape 为 (m,n), (m,) or (n,)\n # 如果是 (m,n), 每一行表示 boxes1 中某个 box 的 area 和 boxes2 中所有 box 的 area 的和\n # 如果是 (m,), 每一个元素表示 boxes1 中某个 box 的 area 和 boxes2 中 box 的 area 的和\n # 如果是 (n,), 每一个元素表示 boxes2 中某个 box 的 area 和 boxes1 中 box 的 area 的和\n union_areas = boxes1_areas + boxes2_areas - intersection_areas\n\n return intersection_areas / union_areas\n\n\ndef get_predictions_per_class(yolo,\n model_input_size,\n image_paths,\n num_classes=20,\n ):\n \"\"\"\n\n Args:\n yolo: yolo model\n model_input_size (tuple): (w, h)\n image_paths (tuple/list):\n num_classes (int):\n\n Returns:\n\n \"\"\"\n num_images = len(image_paths)\n batch_size = 16\n # 第 0 个元素表示 background\n predictions_per_class = [list() for _ in range(num_classes + 1)]\n\n for i in range(0, num_images, batch_size):\n if i + batch_size > num_images:\n batch_image_paths = image_paths[i:]\n else:\n batch_image_paths = image_paths[i:i + batch_size]\n batch_images_data = []\n batch_image_shapes = []\n for image_path in batch_image_paths:\n image = Image.open(image_path)\n boxed_image = resize_image(image, model_input_size)\n image_data = np.array(boxed_image, dtype='float32')\n # (h, w, 3)\n # print(image_data.shape)\n image_data /= 255.\n batch_images_data.append(image_data)\n batch_image_shapes.append((image.size[1], image.size[0]))\n batch_images_data = np.array(batch_images_data)\n batch_image_shapes = np.array(batch_image_shapes)\n batch_pred_boxes, batch_pred_scores, batch_pred_classes = yolo.sess.run(\n [yolo.boxes, yolo.scores, yolo.classes],\n feed_dict={\n yolo.yolo_model.input: batch_images_data,\n # yolo.image_shape: np.stack([[416, 416]] * len(batch_image_paths)),\n yolo.image_shape: batch_image_shapes,\n K.learning_phase(): 0\n })\n for j, batch_item_pred_boxes in enumerate(batch_pred_boxes):\n # 把填充的部分去掉\n batch_item_pred_classes = batch_pred_classes[j]\n batch_item_pred_scores = batch_pred_scores[j]\n batch_item_pred_boxes = batch_item_pred_boxes[batch_item_pred_scores > 0.0]\n batch_item_pred_classes = batch_item_pred_classes[batch_item_pred_scores > 0.0]\n batch_item_pred_scores = batch_item_pred_scores[batch_item_pred_scores > 0.0]\n image_id = osp.split(batch_image_paths[j])[-1][:-4]\n # print('Found {} boxes for image {}'.format(len(pred_boxes), image_id))\n image = cv2.imread(batch_image_paths[j])\n for k, pred_box in enumerate(batch_item_pred_boxes):\n class_id = int(batch_item_pred_classes[k])\n score = batch_item_pred_scores[k]\n ymin = max(int(round(pred_box[0])), 0)\n xmin = max(int(round(pred_box[1])), 0)\n ymax = min(int(round(pred_box[2])), batch_image_shapes[j][0])\n xmax = min(int(round(pred_box[3])), batch_image_shapes[j][1])\n prediction = (image_id, score, xmin, ymin, xmax, ymax)\n # 第 0 个元素表示 background\n predictions_per_class[int(class_id) + 1].append(prediction)\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n cv2.imshow('image', image)\n cv2.waitKey(0)\n return predictions_per_class\n\n\ndef get_hdf5_data(hdf5_dataset_path,\n num_classes=20,\n verbose=True,\n ret=True):\n \"\"\"\n Counts the number of ground truth boxes for each class across the dataset.\n\n 获取 self.data_generator.labels, 每一个元素是一个 np.array, 表示一个 image 上的所有 gt_boxes 的坐标和 class_id\n 遍历这些 np.array, 根据 gt_boxes 的 class_id 统计每一个 class 的 gt_boxes 的数量\n 返回一个 len=num_classes+1 的数组, 每一个元素表示每一个 class 的 gt_boxes 的数量\n\n Arguments:\n hdf5_dataset_path (str):\n num_classes (int):\n verbose (bool, optional): If `True`, will print out the progress during runtime.\n ret (bool, optional): If `True`, returns the list of counts.\n\n Returns:\n None by default. Optionally, a list containing a count of the number of ground truth boxes for each class\n across the entire dataset.\n \"\"\"\n hdf5_dataset = h5py.File(hdf5_dataset_path, 'r')\n labels = []\n labels_dataset = hdf5_dataset['labels']\n label_shapes_dataset = hdf5_dataset['label_shapes']\n image_ids = []\n image_ids_dataset = hdf5_dataset['image_ids']\n image_shapes = []\n image_shapes_dataset = hdf5_dataset['image_shapes']\n dataset_size = len(labels_dataset)\n if verbose:\n tr = trange(dataset_size, desc='Loading data', file=sys.stdout)\n else:\n tr = range(dataset_size)\n for i in tr:\n labels.append(labels_dataset[i].reshape(label_shapes_dataset[i]))\n image_ids.append(image_ids_dataset[i])\n image_shapes.append(image_shapes_dataset[i])\n # 用于表示每个 class 有多少个 gt_boxes, 一个元素表示 background\n num_gt_per_class = np.zeros(shape=num_classes + 1, dtype=np.int)\n\n if verbose:\n tr = trange(len(labels), file=sys.stdout)\n tr.set_description('Computing the number of positive ground truth boxes per class.')\n else:\n tr = range(len(labels))\n\n # Iterate over the ground truth for all images in the dataset.\n for i in tr:\n boxes = labels[i]\n # Iterate over all ground truth boxes for the current image.\n for j in range(boxes.shape[0]):\n class_id = boxes[j, 0]\n num_gt_per_class[class_id] += 1\n\n if verbose:\n print('dataset_size={}'.format(dataset_size))\n print('num_gt_per_class={}'.format(num_gt_per_class))\n\n if ret:\n return dataset_size, labels, image_ids, num_gt_per_class, image_shapes\n\n\ndef match_predictions(predictions_per_class,\n dataset_size,\n labels,\n image_ids,\n num_gt_per_class,\n num_classes=20,\n matching_iou_threshold=0.5,\n border_pixels='half',\n sorting_algorithm='quicksort',\n verbose=True,\n ret=True):\n \"\"\"\n Matches predictions to ground truth boxes.\n\n Note that `get_predictions_per_class()` must be called before calling this method.\n 1. 遍历所有 class 的 predictions\n 2. 把 predictions 按 confidence 排序\n 3. 按 confidence 从大到小的顺序遍历所有的 predictions\n 4. 根据该 prediction 的 image_id, 到 data_generator.labels 找到该 image_id 下对应 class_id 的所有 gt_boxes\n 5. 如果 gt_boxes 为空, 该 prediction 为 false positive\n 6. 否则计算 prediction 和 gt_boxes 的 iou\n 7. 找到最大的 iou 和对应的 gt_box_index\n 8. 如果 iou > threshold\n 9. 判断这个 gt_box 是否已经被 match 过\n 10. 如果没有, prediction --> true positive\n 11. 如果有, prediction --> false positive\n 12 否则 prediction --> false positive\n Arguments:\n predictions_per_class (list):\n dataset_size (int):\n labels (list):\n image_ids (list):\n num_gt_per_class (np.array):\n num_classes (int):\n matching_iou_threshold (float, optional): A prediction will be considered a true positive if it has a\n Jaccard overlap of at least `matching_iou_threshold` with any ground truth bounding box of the same\n class.\n border_pixels (str, optional): How to treat the border pixels of the bounding boxes. Can be 'include',\n 'exclude', or 'half'.\n If 'include', the border pixels belong to the boxes.\n If 'exclude', the border pixels do not belong to the boxes.\n If 'half', then one of each of the two horizontal and vertical borders belong to the boxex, but not the\n other.\n sorting_algorithm (str, optional): Which sorting algorithm the matching algorithm should use. This argument\n accepts any valid sorting algorithm for Numpy's `argsort()` function. You will usually want to choose\n between 'quicksort' (fastest and most memory efficient, but not stable) and 'mergesort' (slight slower\n and less memory efficient, but stable). The official Matlab evaluation algorithm uses a stable sorting\n algorithm, so this algorithm is only guaranteed to behave identically if you choose 'mergesort' as the\n sorting algorithm, but it will almost always behave identically even if you choose 'quicksort'\n (but no guarantees).\n verbose (bool, optional): If `True`, will print out the progress during runtime.\n ret (bool, optional): If `True`, returns the true and false positives.\n\n Returns:\n None by default. Optionally, four nested lists containing the true positives, false positives, cumulative\n true positives, and cumulative false positives for each class.\n \"\"\"\n\n if labels is None:\n raise ValueError(\"Matching predictions to ground truth boxes not possible, no ground truth given.\")\n\n if num_gt_per_class is None:\n raise ValueError(\"There are no ground truth numbers\"\n \"You must run `get_num_gt_per_class()` before calling this method.\")\n if predictions_per_class is None:\n raise ValueError(\"There are no prediction results. \"\n \"You must run `get_predictions_per_class()` before calling this method.\")\n\n # Convert the ground truth to a more efficient format for what we need to do, which is access ground truth by\n # image ID repeatedly.\n ground_truth = {}\n # Whether or not we have annotations to decide whether ground truth boxes should be neutral or not.\n for i in range(dataset_size):\n image_id = image_ids[i]\n label = labels[i]\n ground_truth[image_id] = label\n\n # The true positives for each class, sorted by descending confidence. 第一个 [] 用于表示 background.\n # 其余元素的长度和该元素对应的 predictions 的长度一样\n true_positives = [[]]\n # The false positives for each class, sorted by descending confidence. 第一个 [] 用于表示 background.\n false_positives = [[]]\n cumulative_true_positives = [[]]\n cumulative_false_positives = [[]]\n\n # Iterate over all non-background classes.\n for class_id in range(1, num_classes + 1):\n predictions = predictions_per_class[class_id]\n # Store the matching results in these lists:\n # 1 for every prediction that is a true positive, 0 otherwise\n true_pos = np.zeros(len(predictions), dtype=np.int)\n # 1 for every prediction that is a false positive, 0 otherwise\n false_pos = np.zeros(len(predictions), dtype=np.int)\n\n # In case there are no predictions at all for this class, we're done here.\n if len(predictions) == 0:\n print(\"No predictions for class {}/{}\".format(class_id, num_classes))\n true_positives.append(true_pos)\n false_positives.append(false_pos)\n # Cumulative sums of the true positives\n cumulative_true_pos = np.cumsum(true_pos)\n # Cumulative sums of the false positives\n cumulative_false_pos = np.cumsum(false_pos)\n cumulative_true_positives.append(cumulative_true_pos)\n cumulative_false_positives.append(cumulative_false_pos)\n continue\n\n # Convert the predictions list for this class into a structured array so that we can sort it by confidence.\n # Get the number of characters needed to store the image ID strings in the structured array.\n # Keep a few characters buffer in case some image IDs are longer than others.\n num_chars_of_image_id = len(str(predictions[0][0])) + 6\n # Create the data type for the structured array.\n # 参见 https://docs.scipy.org/doc/numpy/reference/generated/numpy.dtype.html\n # U 表示 unicode, U 和 f 后面的数字表示字节数\n preds_data_type = np.dtype([('image_id', 'U{}'.format(num_chars_of_image_id)),\n ('confidence', 'f4'),\n ('xmin', 'f4'),\n ('ymin', 'f4'),\n ('xmax', 'f4'),\n ('ymax', 'f4')])\n # Create the structured array\n predictions = np.array(predictions, dtype=preds_data_type)\n # Sort the detections by decreasing confidence.\n descending_indices = np.argsort(-predictions['confidence'], kind=sorting_algorithm)\n predictions_sorted = predictions[descending_indices]\n\n if verbose:\n tr = trange(len(predictions), file=sys.stdout)\n tr.set_description(\n \"Matching predictions to ground truth, class {}/{}.\".format(class_id, num_classes))\n else:\n tr = range(len(predictions.shape))\n\n # Keep track of which ground truth boxes were already matched to a detection.\n # key 为 image_id, value 为 len=this_num_gt_boxes 的 np.array 每一个元素表示该 gt_box 是否已经和 prediction match\n gt_matched = {}\n # Iterate over all predictions.\n for i in tr:\n prediction = predictions_sorted[i]\n image_id = prediction['image_id']\n # Convert the structured array element to a regular array.\n pred_box = np.asarray(list(prediction[['xmin', 'ymin', 'xmax', 'ymax']]))\n\n # Get the relevant ground truth boxes for this prediction,\n # i.e. all ground truth boxes that match the prediction's image ID and class ID.\n\n # The ground truth could either be a tuple with `(ground_truth_boxes, eval_neutral_boxes)`\n # or only `ground_truth_boxes`.\n # 找出该 prediction 对应的 image 上所有的 gt boxes\n gt = ground_truth[image_id]\n # 从 gt boxes 过滤出属于当前 class_id 的部分\n class_mask = gt[:, 0] == class_id\n gt = gt[class_mask]\n if gt.size == 0:\n # If the image doesn't contain any objects of this class, the prediction becomes a false positive.\n false_pos[i] = 1\n continue\n\n # Compute the IoU of this prediction with all ground truth boxes of the same class.\n overlaps = iou(boxes1=gt[:, 1:],\n boxes2=pred_box,\n mode='element-wise',\n border_pixels=border_pixels)\n\n # For each detection, match the ground truth box with the highest overlap.\n # It's possible that the same ground truth box will be matched to multiple detections.\n # 找和当前 prediction 有最大 iou 的 gt_box 和 overlap\n gt_match_index = np.argmax(overlaps)\n gt_match_overlap = overlaps[gt_match_index]\n\n # 如果最大 overlap 小于 threshold, 认为 prediction 是 false positive\n if gt_match_overlap < matching_iou_threshold:\n # False positive, IoU threshold violated:\n # Those predictions whose matched overlap is below the threshold become false positives.\n false_pos[i] = 1\n else:\n if image_id not in gt_matched:\n # True positive:\n # If the matched ground truth box for this prediction hasn't been matched to a different\n # prediction already, we have a true positive.\n true_pos[i] = 1\n gt_matched[image_id] = np.zeros(shape=(gt.shape[0]), dtype=np.bool)\n gt_matched[image_id][gt_match_index] = True\n # 这个 gt_boxes 还没有 match\n elif not gt_matched[image_id][gt_match_index]:\n # True positive:\n # If the matched ground truth box for this prediction hasn't been matched to a different\n # prediction already, we have a true positive.\n true_pos[i] = 1\n gt_matched[image_id][gt_match_index] = True\n else:\n # False positive, duplicate detection:\n # If the matched ground truth box for this prediction has already been matched to a\n # different prediction previously, it is a duplicate detection for an already detected\n # object, which counts as a false positive.\n false_pos[i] = 1\n\n true_positives.append(true_pos)\n false_positives.append(false_pos)\n # Cumulative sums of the true positives\n cumulative_true_pos = np.cumsum(true_pos)\n # Cumulative sums of the false positives\n cumulative_false_pos = np.cumsum(false_pos)\n cumulative_true_positives.append(cumulative_true_pos)\n cumulative_false_positives.append(cumulative_false_pos)\n\n if ret:\n return true_positives, false_positives, cumulative_true_positives, cumulative_false_positives\n\n\ndef compute_precision_recall(cumulative_true_positives,\n cumulative_false_positives,\n num_gt_per_class,\n num_classes=20,\n verbose=True,\n ret=True):\n \"\"\"\n Computes the precisions and recalls for all classes.\n\n Note that `match_predictions()` must be called before calling this method.\n\n 根据 self.cumulative_true_positive, self.cumulative_false_positive 计算 cumulative_precision, cumulative_recall\n Arguments:\n cumulative_true_positives (list):\n cumulative_false_positives (list):\n num_gt_per_class (np.array):\n num_classes (int):\n verbose (bool, optional): If `True`, will print out the progress during runtime.\n ret (bool, optional): If `True`, returns the precisions and recalls.\n\n Returns:\n None by default.\n Optionally, two nested lists containing the cumulative precisions and recalls for each class.\n \"\"\"\n\n if (cumulative_true_positives is None) or (cumulative_false_positives is None):\n raise ValueError(\"True and false positives not available.\"\n \" You must run `match_predictions()` before you call this method.\")\n\n if num_gt_per_class is None:\n raise ValueError(\"Number of ground truth boxes per class not available.\"\n \"You must run `get_num_gt_per_class()` before you call this method.\")\n\n cumulative_precisions = [[]]\n cumulative_recalls = [[]]\n\n # Iterate over all classes.\n for class_id in range(1, num_classes + 1):\n if verbose:\n print(\"Computing precisions and recalls, class {}/{}\".format(class_id, num_classes))\n tp = cumulative_true_positives[class_id]\n fp = cumulative_false_positives[class_id]\n # 1D array with shape `(num_predictions,)`\n cumulative_precision = np.where(tp + fp > 0, tp / (tp + fp), 0)\n # 1D array with shape `(num_predictions,)`\n # 注意这里的 num_gt_per_class 已经把 neutral 去掉了\n cumulative_recall = tp / num_gt_per_class[class_id]\n cumulative_precisions.append(cumulative_precision)\n cumulative_recalls.append(cumulative_recall)\n\n if ret:\n return cumulative_precisions, cumulative_recalls\n\n\ndef compute_average_precisions(cumulative_precisions,\n cumulative_recalls,\n num_classes=20,\n mode='sample',\n num_recall_points=11,\n verbose=True,\n ret=True):\n \"\"\"\n Computes the average precision for each class.\n\n Can compute the Pascal-VOC-style average precision in both the pre-2010 (k-point sampling) and post-2010\n (integration) algorithm versions.\n\n Note that `compute_precision_recall()` must be called before calling this method.\n\n Arguments:\n cumulative_precisions (list):\n cumulative_recalls (list):\n num_classes (int):\n mode (str, optional): Can be either 'sample' or 'integrate'.\n In the case of 'sample', the average precision will be computed according to the Pascal VOC formula that\n was used up until VOC 2009, where the precision will be sampled for `num_recall_points` recall values.\n In the case of 'integrate', the average precision will be computed according to the Pascal VOC formula\n that was used from VOC 2010 onward, where the average precision will be computed by numerically\n integrating over the whole precision-recall curve instead of sampling individual points from it.\n 'integrate' mode is basically just the limit case of 'sample' mode as the number of sample points\n increases. For details, see the references below.\n num_recall_points (int, optional): Only relevant if mode is 'sample'. The number of points to sample from\n the precision-recall-curve to compute the average precisions. In other words, this is the number of\n equidistant recall values for which the resulting precision will be computed. 11 points is the value\n used in the official Pascal VOC pre-2010 detection evaluation algorithm.\n verbose (bool, optional): If `True`, will print out the progress during runtime.\n ret (bool, optional): If `True`, returns the average precisions.\n\n Returns:\n None by default. Optionally, a list containing average precision for each class.\n\n References:\n http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#sec:ap\n \"\"\"\n\n if (cumulative_precisions is None) or (cumulative_recalls is None):\n raise ValueError(\"Precisions and recalls not available. \"\n \"You must run `compute_precision_recall()` before you call this method.\")\n\n if mode not in {'sample', 'integrate'}:\n raise ValueError(\"`mode` can be either 'sample' or 'integrate', but received '{}'\".format(mode))\n\n average_precisions = [0.0]\n\n # Iterate over all classes.\n for class_id in range(1, num_classes + 1):\n if verbose:\n print(\"Computing average precision, class {}/{}\".format(class_id, num_classes))\n\n cumulative_precision = cumulative_precisions[class_id]\n cumulative_recall = cumulative_recalls[class_id]\n average_precision = 0.0\n\n # 参考 https://github.com/rafaelpadilla/Object-Detection-Metrics\n if mode == 'sample':\n for t in np.linspace(start=0, stop=1, num=num_recall_points, endpoint=True):\n # recall 大于 t 的所有 precision\n cum_prec_recall_greater_t = cumulative_precision[cumulative_recall >= t]\n if cum_prec_recall_greater_t.size == 0:\n precision = 0.0\n else:\n # 最大的 precision 作为此 recall 点的 precision\n precision = np.amax(cum_prec_recall_greater_t)\n average_precision += precision\n average_precision /= num_recall_points\n elif mode == 'integrate':\n # We will compute the precision at all unique recall values.\n # unique_recall_indices 每一个元素分别对应 unique_recalls 的每一个元素在 cumulative_recall 中的下标\n unique_recalls, unique_recall_indices, unique_recall_counts = np.unique(cumulative_recall,\n return_index=True,\n return_counts=True)\n # Store the maximal precision for each recall value and the absolute difference between any two unique\n # recall values in the lists below. The products of these two numbers constitute the rectangular areas\n # whose sum will be our numerical integral.\n maximal_precisions = np.zeros_like(unique_recalls)\n # adam 设置最后一个 recall 的 maximal precision\n maximal_precisions[-1] = np.amax(cumulative_precision[unique_recall_indices[-1]:])\n recall_deltas = np.zeros_like(unique_recalls)\n # adam 设置第一个 recall_delta, 就是一个 recall 到 0 的距离\n recall_deltas[0] = unique_recalls[0]\n # Iterate over all unique recall values in reverse order. This saves a lot of computation:\n # For each unique recall value `r`, we want to get the maximal precision value obtained\n # for any recall value `r* >= r`. Once we know the maximal precision for the last `k` recall\n # values after a given iteration, then in the next iteration, in order compute the maximal\n # precisions for the last `l > k` recall values, we only need to compute the maximal precision\n # for `l - k` recall values and then take the maximum between that and the previously computed\n # maximum instead of computing the maximum over all `l` values.\n # We skip the very last recall value, since the precision after between the last recall value\n # recall 1.0 is defined to be zero.\n # 从倒数第二个 unique_recall 开始计算和倒数第一个 unique_recall 的最大 precision 组成的矩形面积\n for i in range(len(unique_recalls) - 2, -1, -1):\n # 当前 recall 在 cumulative_recall 中的下标\n begin = unique_recall_indices[i]\n # 下一个 recall 在 cumulative_recall 中的下标\n end = unique_recall_indices[i + 1]\n # When computing the maximal precisions, use the maximum of the previous iteration to\n # avoid unnecessary repeated computation over the same precision values.\n # The maximal precisions are the heights of the rectangle areas of our integral under the\n # precision-recall curve.\n # np.amax(cumulative_precision[begin:end]) 得到当前 recall 的最大 precision\n # maximal_precisions[i + 1] 得到之后 recall 的最大 precision\n # 两者的最大值, 作为当前 recall 和下一个 recall 所组矩形的高\n maximal_precisions[i] = np.maximum(np.amax(cumulative_precision[begin:end]),\n maximal_precisions[i + 1])\n # The differences between two adjacent recall values are the widths of our rectangle areas.\n # adam\n recall_deltas[i + 1] = unique_recalls[i + 1] - unique_recalls[i]\n average_precision = np.sum(maximal_precisions * recall_deltas)\n average_precisions.append(average_precision)\n\n if ret:\n return average_precisions\n\n\ndef compute_mean_average_precision(average_precisions,\n ret=True):\n \"\"\"\n Computes the mean average precision over all classes.\n\n Note that `compute_average_precisions()` must be called before calling this method.\n\n Arguments:\n average_precisions (list):\n ret (bool, optional): If `True`, returns the mean average precision.\n\n Returns:\n A float, the mean average precision, by default. Optionally, None.\n \"\"\"\n\n if average_precisions is None:\n raise ValueError(\"Average precisions not available.\"\n \"You must run `compute_average_precisions()` before you call this method.\")\n\n # The first element is for the background class, so skip it.\n mean_average_precision = np.average(average_precisions[1:])\n\n if ret:\n return mean_average_precision\n\n\nif __name__ == '__main__':\n DATASET_DIR = '/home/adam/.keras/datasets/VOCdevkit'\n train_hdf5_path = osp.join(DATASET_DIR, '07+12_trainval.h5')\n val_hdf5_path = osp.join(DATASET_DIR, '07_test.h5')\n model_input_size_ = (416, 416)\n image_paths_ = glob.glob('/home/adam/.keras/datasets/VOCdevkit/test/VOC2007/JPEGImages/*.jpg')\n yolo_ = YOLO(model_path='logs/2019-04-09/052-20.628-22.252.h5',\n anchors_path='model_data/resized_voc_anchors.txt',\n classes_path='model_data/voc_classes.txt',\n model_image_size=model_input_size_,\n )\n predictions_pickle_path = 'predictions.pickle'\n if osp.exists(predictions_pickle_path):\n predictions_per_class_ = pickle.load(open(predictions_pickle_path, 'rb'))\n else:\n predictions_per_class_ = get_predictions_per_class(yolo_,\n model_input_size_,\n image_paths_,\n num_classes=20,\n )\n pickle.dump(predictions_per_class_, open(predictions_pickle_path, 'wb'))\n dataset_size_, labels_, image_ids_, num_gt_per_class_, *_ = get_hdf5_data(val_hdf5_path,\n num_classes=20,\n verbose=True)\n _, _, cumulative_true_positives_, cumulative_false_positives_ = match_predictions(predictions_per_class_,\n dataset_size_,\n labels_,\n image_ids_,\n num_gt_per_class_)\n cumulative_precisions_, cumulative_recalls_ = compute_precision_recall(cumulative_true_positives_,\n cumulative_false_positives_,\n num_gt_per_class_,\n )\n average_precisions_ = compute_average_precisions(cumulative_precisions_, cumulative_recalls_)\n mean_average_precision_ = compute_mean_average_precision(average_precisions_)\n for i_ in range(1, len(average_precisions_)):\n print(\"{:<14}{:<6}{}\".format(yolo_.class_names[i_ - 1], 'AP', round(average_precisions_[i_], 3)))\n print()\n print(\"{:<14}{:<6}{}\".format('', 'mAP', round(mean_average_precision_, 3)))\n", "sub_path": "yolo_evaluate.py", "file_name": "yolo_evaluate.py", "file_ext": "py", "file_size_in_byte": 43391, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.expand_dims", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 211, "usage_type": "call"}, {"api_name": "numpy.tile", "line_number": 214, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 215, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 263, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 263, "usage_type": "name"}, {"api_name": "yolo3.utils.resize_image", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 272, "usage_type": "call"}, {"api_name": "yolo.sess.run", "line_number": 273, "usage_type": "call"}, {"api_name": "yolo.sess", "line_number": 273, "usage_type": "attribute"}, {"api_name": "yolo.boxes", "line_number": 274, "usage_type": "attribute"}, {"api_name": "yolo.scores", "line_number": 274, "usage_type": "attribute"}, {"api_name": "yolo.classes", "line_number": 274, "usage_type": "attribute"}, {"api_name": "yolo.yolo_model", "line_number": 276, "usage_type": "attribute"}, {"api_name": "yolo.image_shape", "line_number": 278, "usage_type": "attribute"}, {"api_name": "keras.backend.learning_phase", "line_number": 279, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 279, "usage_type": "name"}, {"api_name": "os.path.split", "line_number": 288, "usage_type": "call"}, {"api_name": "os.path", "line_number": 288, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 290, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 301, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 302, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 303, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 304, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 329, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 339, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 339, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 347, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 347, "usage_type": "attribute"}, {"api_name": "tqdm.trange", "line_number": 350, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 350, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 461, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 461, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 463, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 471, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.dtype", "line_number": 485, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 492, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 494, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 498, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 498, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 538, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 552, "usage_type": "call"}, {"api_name": "numpy.bool", "line_number": 552, "usage_type": "attribute"}, {"api_name": "numpy.cumsum", "line_number": 571, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 573, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 624, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 696, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 703, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 709, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 715, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 717, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 718, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 743, "usage_type": "call"}, {"api_name": "numpy.amax", "line_number": 743, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 748, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 775, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 783, "usage_type": "call"}, {"api_name": "os.path", "line_number": 783, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 784, "usage_type": "call"}, {"api_name": "os.path", "line_number": 784, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 786, "usage_type": "call"}, {"api_name": "yolo.YOLO", "line_number": 787, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 793, "usage_type": "call"}, {"api_name": "os.path", "line_number": 793, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 794, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 801, "usage_type": "call"}]} +{"seq_id": "479554271", "text": "from app import *\nfrom flask import render_template, request, redirect, url_for\nfrom flask_security import login_required, roles_required\nimport flask_login\nfrom flask import json\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n\treturn redirect(\"/main\")\n\n\n@app.route(\"/sign_up\", methods=['GET', 'POST'])\ndef sign_up():\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tsurename = request.form['lastname']\n\t\tmail = request.form['login']\n\t\tpassword = request.form['password']\n\t\tuser_datastore.create_user(email=mail, password=password, name=name, surename=surename)\n\t\tdb.session.commit()\n\t\treturn redirect('/main')\n\treturn render_template('sign_up.html')\n\n\n@app.route(\"/main\", methods=['GET', 'POST'])\ndef main():\n\tif request.method == 'GET':\n\t\tlogged_in = flask_login.current_user.is_authenticated\n\t\troles = []\n\t\tif (logged_in):\n\t\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\treturn render_template('home.html',\n\t\t\t\t\t\t\t\ttags=[tag.name for tag in Tag.query.all()],\n\t\t\t\t\t\t\t\tadmin=1 if len(roles) else 0,\n\t\t\t\t\t\t\t\tlogged_in=logged_in\n\t\t\t\t\t\t\t\t)\n\n\n@app.route(\"/profile\", methods=[\"GET\", \"POST\"])\n@login_required\ndef profile():\n\tif request.method == 'GET':\n\t\tuser = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\tgames = [[game.id, game.name, (game.pitch).split(\"\\n\"), game.game_type[0].name, list(map(lambda x : x.name, game.tags))] for game in user.games]\n\t\tmarked_games = [[game.id, game.name, (game.pitch).split(\"\\n\"), game.game_type[0].name, list(map(lambda x : x.name, game.tags))] for game in user.games_stared]\n\t\tempty = 0\n\t\tif len(games) == 0:\n\t\t\tempty = 1\n\n\t\tmarks_empty = 0\n\t\tif len(marked_games) == 0:\n\t\t\tmarks_empty = 1\n\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\treturn render_template(\"profile.html\", user_id=user.id, \n\t\t\tgames=games, \n\t\t\tname=user.name, \n\t\t\tsurename=user.surename, \n\t\t\tmail=user.email,\n\t\t\tempty=empty,\n\t\t\tadmin= 1 if len(roles) else 0,\n\t\t\tmarked_games=marked_games,\n\t\t\tmarks_empty=marks_empty)\n\n@app.route(\"/edit_profile\", methods=[\"GET\", \"POST\"])\n@login_required\ndef edit_profile():\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tsurename = request.form['lastname']\n\t\tmail = request.form['login']\n\t\ttry:\n\t\t\trows = User.query.filter_by(id=int(flask_login.current_user.id)).update({\n\t\t\t\t\t\t'name':name,\n\t\t\t\t\t\t'surename':surename,\n\t\t\t\t\t\t'email':mail\n\t\t\t\t\t})\n\t\t\tdb.session.commit()\n\t\texcept:\n\t\t\treturn render_template(\"error.html\", error_text=\"Что-то пошло не так, попробуйте ещё раз\")\n\t\treturn redirect('/profile')\n\telif request.method == 'GET':\n\t\tuser = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\tgames = [[game.id, game.name, game.pitch] for game in user.games]\n\t\treturn render_template(\"edit_profile.html\", user_id=user.id, \n\t\t\tname=user.name, \n\t\t\tsurename=user.surename, \n\t\t\tmail=user.email)\n\n\n\n@app.route(\"/create_game\", methods=[\"GET\", \"POST\"])\n@login_required\ndef create_game():\n\tif request.method == 'POST':\n\t\tgame_name = request.form[\"game_name\"]\n\t\tpitch = request.form[\"pitch\"]\n\n\t\ttags_ids = request.form.getlist(\"tags\")\n\t\tprint(tags_ids)\n\t\ttags = []\n\t\tfor id_ in tags_ids:\n\t\t\ttags.append(Tag.query.filter_by(id=int(id_)).first())\n\n\t\tgame_type_id = request.form[\"game_type\"]\n\t\tgame_type = Ggtype.query.filter_by(id=game_type_id).first()\n\t\tgame_difficultly = request.form[\"game_dif\"]\n\t\tdiscription = request.form[\"discription\"]\n\t\trules = request.form[\"rules\"]\n\t\tmin_duration = request.form[\"min_duration\"]\n\t\tmax_duration = request.form[\"max_duration\"]\n\t\tsetup_difficultly = request.form[\"setup_difficulty\"]\n\t\tmin_judges = request.form[\"min_judges\"]\n\t\tmin_teams = request.form[\"min_teams\"]\n\t\tmax_teams = request.form[\"max_teams\"]\n\t\tmin_participants = request.form[\"min_participants\"]\n\t\tmax_participants = request.form[\"max_participants\"]\n\t\tauthors = [User.query.filter_by(id=flask_login.current_user.id).first()]\n\t\ttry:\n\t\t\tgame = Game(name=game_name,\n\t\t\t\t\t\tpitch=pitch,\n\n\t\t\t\t\t\tgame_type=[game_type],\n\t\t\t\t\t\ttags=tags,\n\t\t\t\t\t\tdiscription=discription,\n\n\t\t\t\t\t\tgame_difficultly=game_difficultly,\n\t\t\t\t\t\trules=rules,\n\n\t\t\t\t\t\trequirements=\"\",\n\t\t\t\t\t\tmin_judges=min_judges,\n\n\t\t\t\t\t\tmin_duration=min_duration,\n\t\t\t\t\t\tmax_duration=max_duration,\n\n\t\t\t\t\t\tmin_participants=min_participants,\n\t\t\t\t\t\tmax_participants=max_participants,\n\n\t\t\t\t\t\tmin_teams=min_teams,\n\t\t\t\t\t\tmax_teams=max_teams,\n\n\t\t\t\t\t\tsetup_difficultly=setup_difficultly,\n\n\t\t\t\t\t\tauthors=authors\n\t\t\t\t)\n\t\t\tdb.session.add(game)\n\t\t\tdb.session.commit()\n\t\t\treturn render_template(\"result.html\", text=\"Игра успешно добавлена!\")\n\t\texcept:\n\t\t\treturn render_template(\"error.html\", error_text=\"Что-то пошло не так, попробуйте ещё раз\")\n\t\treturn redirect(\"/main\")\n\telif request.method == 'GET':\n\t\ttags = [[tag.id, tag.name] for tag in Tag.query.all()]\n\t\ttypes = [[type_.id, type_.name] for type_ in Ggtype.query.all()]\n\t\tmain_author = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\treturn render_template(\"create_game.html\", tags=tags, \n\t\t\t\t\t\t\t\t\t\t\t\t\ttypes=types, \n\t\t\t\t\t\t\t\t\t\t\t\t\tmain_author=\"{0} {1}\".format(main_author.name, main_author.surename),\n\t\t\t\t\t\t\t\t\t\t\t\t\tadmin=1 if len(roles) else 0)\n\n\n@app.route(\"/game/\", methods=['GET', 'POST'])\ndef game(game_id):\n\tgame = Game.query.filter_by(id=game_id).first()\n\tif game is None:\n\t\treturn render_template(\"error.html\", error_text=\"Игры с данным id не существует\")\n\tlogged_in = flask_login.current_user.is_authenticated\n\troot = 0\n\tauthors_ids = [author.id for author in game.authors]\n\tif logged_in and flask_login.current_user.id in authors_ids:\n\t\troot = 1\n\ttags = [tag.name for tag in game.tags]\n\tstar_type = \"star_border\"\n\tcurrent_user = User.query.filter_by(id=1).first()\n\troles = []\n\tauthors = \", \".join([str(author.name) + \" \" + str(author.surename) for author in game.authors])\n\tif (logged_in):\n\t\tcurrent_user = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\tstar_type = \"star\" if User.query.filter_by(id=flask_login.current_user.id).first() in game.stars else \"star_border\"\n\treturn render_template(\"game_page.html\", logged_in=logged_in,\n\t\t\t\t\t\t\t\t\t\t\tgame_id=game.id,\n\t\t\t\t\t\t\t\t\t\t\tgame_name=game.name,\n\t\t\t\t\t\t\t\t\t\t\trequirements=game.requirements,\n\t\t\t\t\t\t\t\t\t\t\tdiscription=(game.discription).split(\"\\n\"), \n\t\t\t\t\t\t\t\t\t\t\trules=(game.rules).split(\"\\n\"), \n\t\t\t\t\t\t\t\t\t\t\tgame_type=(game.game_type)[0].name,\n\t\t\t\t\t\t\t\t\t\t\tmin_duration=game.min_duration,\n\t\t\t\t\t\t\t\t\t\t\tmax_duration=game.max_duration,\n\t\t\t\t\t\t\t\t\t\t\tsetup_difficultly=game.setup_difficultly,\n\t\t\t\t\t\t\t\t\t\t\tmin_judges=game.min_judges,\n\t\t\t\t\t\t\t\t\t\t\tmin_teams=game.min_teams,\n\t\t\t\t\t\t\t\t\t\t\tmax_teams=game.max_teams,\n\t\t\t\t\t\t\t\t\t\t\tmin_participants=game.min_participants,\n\t\t\t\t\t\t\t\t\t\t\tmax_participants=game.max_participants,\n\t\t\t\t\t\t\t\t\t\t\ttags=tags,\n\t\t\t\t\t\t\t\t\t\t\tauthors=authors,\n\t\t\t\t\t\t\t\t\t\t\tcurrent_user_name=(current_user.name + \" \" + current_user.surename),\n\t\t\t\t\t\t\t\t\t\t\troot=root,\n\t\t\t\t\t\t\t\t\t\t\tuser_id=1,\n\t\t\t\t\t\t\t\t\t\t\tadmin=1 if len(roles) else 0,\n\t\t\t\t\t\t\t\t\t\t\tstar_type=star_type,\n\t\t\t\t\t\t\t\t\t\t\tpitch=(game.pitch).split(\"\\n\"),\n\t\t\t\t\t\t\t\t\t\t\tcomment={\"id\": \"{{comment.id}}\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"name\": \"{{comment.name}}\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"date\": \"{{comment.date}}\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t \"content\": \"{{comment.content}}\"}\n\t\t\t\t\t\t\t\t\t\t\t)\n\n\n@app.route(\"/edit_game/\", methods=['GET', 'POST'])\n@login_required\ndef edit_game(game_id):\n\tgame = Game.query.filter_by(id=game_id).first()\n\tif game is None:\n\t\treturn render_template(\"error.html\", error_text=\"Игры с данным id не существует\")\n\tauthors_ids = [author.id for author in game.authors]\n\tif flask_login.current_user.id not in authors_ids:\n\t\treturn render_template(\"error.html\", error_text=\"У вас нет прав на редактирование данной игры\")\n\n\tif request.method == 'POST':\n\t\tgame_name = request.form[\"game_name\"]\n\t\tpitch = request.form[\"pitch\"]\n\t\tgame_type = request.form[\"game_type\"]\n\t\tgame_difficultly = request.form[\"game_dif\"]\n\t\tdiscription = request.form[\"discription\"]\n\t\trules = request.form[\"rules\"]\n\t\tmin_duration = request.form[\"min_duration\"]\n\t\tmax_duration = request.form[\"max_duration\"]\n\t\tsetup_difficultly = int(request.form[\"setup_difficulty\"])\n\t\tmin_judges = request.form[\"min_judges\"]\n\t\tmin_teams = request.form[\"min_teams\"]\n\t\tmax_teams = request.form[\"max_teams\"]\n\t\tmin_participants = request.form[\"min_participants\"]\n\t\tmax_participants = request.form[\"max_participants\"]\n\n\t\ttags_ids = request.form.getlist(\"tags\")\n\t\ttags = []\n\t\tfor id_ in tags_ids:\n\t\t\ttags.append(Tag.query.filter_by(id=int(id_)).first())\n\n\t\ttry:\n\t\t\trows = Game.query.filter_by(id=int(game_id)).update({\n\t\t\t\t\t'name':game_name,\n\t\t\t\t\t'pitch':pitch,\n\n\t\t\t\t\t'discription':discription,\n\n\t\t\t\t\t'game_difficultly':game_difficultly,\n\t\t\t\t\t'rules':rules,\n\n\t\t\t\t\t'requirements':\"\",\n\t\t\t\t\t'min_judges':min_judges,\n\n\t\t\t\t\t'min_duration':min_duration,\n\t\t\t\t\t'max_duration':max_duration,\n\n\t\t\t\t\t'min_participants':min_participants,\n\t\t\t\t\t'max_participants':max_participants,\n\n\t\t\t\t\t'min_teams':min_teams,\n\t\t\t\t\t'max_teams':max_teams,\n\n\t\t\t\t\t'setup_difficultly':setup_difficultly,\n\t\t\t\t})\n\t\t\tGame.query.filter_by(id=int(game_id)).first().game_type = [Ggtype.query.filter_by(id=int(game_type)).first()]\n\t\t\tGame.query.filter_by(id=int(game_id)).first().tags = tags\n\t\t\tdb.session.commit()\n\t\t\treturn render_template(\"result.html\", text=\"Игра успешно отредактирована!\")\n\t\texcept:\n\t\t\treturn render_template(\"error.html\", error_text=\"Что-то пошло не так, попробуйте ещё раз\")\n\telif request.method == 'GET':\n\t\tgame = Game.query.filter_by(id=game_id).first()\n\t\tif game is None:\n\t\t\treturn render_template(\"error.html\", error_text=\"Игры с данным id не существует\")\n\t\ttags = []\n\t\ttypes = []\n\t\tfor tag in Tag.query.all():\n\t\t\tif tag in game.tags:\n\t\t\t\ttags.append([tag.id, tag.name, \"checked\"])\n\t\t\telse:\n\t\t\t\ttags.append([tag.id, tag.name, \"\"])\n\t\tfor type_ in Ggtype.query.all():\n\t\t\tif type_ in game.game_type:\n\t\t\t\ttypes.append([type_.id, type_.name, \"selected\"])\n\t\t\telse:\n\t\t\t\ttypes.append([type_.id, type_.name, \"\"])\n\t\tlevels = [[i, \"selected\" if i == game.game_difficultly else 0] for i in range(1, 6)]\n\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\treturn render_template(\"edit_game.html\", game_id=game.id,\n\t\t\t\t\t\t\t\t\t\t\t\tpitch=game.pitch,\n\t\t\t\t\t\t\t\t\t\t\t\tgame_name=game.name,\n\t\t\t\t\t\t\t\t\t\t\t\tlevels=levels,\n\t\t\t\t\t\t\t\t\t\t\t\trequirements=game.requirements,\n\t\t\t\t\t\t\t\t\t\t\t\tdiscription=game.discription, \n\t\t\t\t\t\t\t\t\t\t\t\trules=game.rules, \n\t\t\t\t\t\t\t\t\t\t\t\tgame_type=game.game_type,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_duration=game.min_duration,\n\t\t\t\t\t\t\t\t\t\t\t\tmax_duration=game.max_duration,\n\t\t\t\t\t\t\t\t\t\t\t\tsetup_difficultly=game.setup_difficultly,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_judges=game.min_judges,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_teams=game.min_teams,\n\t\t\t\t\t\t\t\t\t\t\t\tmax_teams=game.max_teams,\n\t\t\t\t\t\t\t\t\t\t\t\tmin_participants=game.min_participants,\n\t\t\t\t\t\t\t\t\t\t\t\tmax_participants=game.max_participants,\n\t\t\t\t\t\t\t\t\t\t\t\ttags=tags,\n\t\t\t\t\t\t\t\t\t\t\t\ttypes=types,\n\t\t\t\t\t\t\t\t\t\t\t\tadmin=1 if len(roles) else 0\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n@app.route(\"/add_author/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_author(game_id):\n\tgame = Game.query.filter_by(id=game_id).first()\n\tif (User.query.filter_by(id=flask_login.current_user.id).first()) not in game.authors:\n\t\treturn render_template(\"error.html\", error_text=\"Вы не являетесь автором данной игры\")\n\tif request.method == 'GET':\n\t\tusers = User.query.all()\n\t\tadd_users = []\n\t\tfor user in users:\n\t\t\tif user not in game.authors:\n\t\t\t\tadd_users.append([user.id, \"{0} {1}\".format(user.name, user.surename)])\n\t\treturn render_template(\"add_author.html\", add_users=add_users, game_id=game_id, empty=0)\n\telif request.method == 'POST':\n\t\tkey_word = request.form[\"search\"]\n\t\tusers = User.query.all()\n\t\tadd_users = []\n\t\tfor user in users:\n\t\t\tif (user not in game.authors) and (key_word in user.name or key_word in user.surename):\n\t\t\t\tadd_users.append([user.id, \"{0} {1}\".format(user.name, user.surename)])\n\t\tempty = 0\n\t\tif len(add_users) == 0:\n\t\t\tempty = 1\n\t\troles = User.query.filter_by(id=flask_login.current_user.id).first().roles\n\t\treturn render_template(\"add_author.html\", add_users=add_users, \n\t\t\t\t\t\t\t\t\t\t\t\tgame_id=game_id, \n\t\t\t\t\t\t\t\t\t\t\t\tempty=empty,\n\t\t\t\t\t\t\t\t\t\t\t\tadmin=1 if len(roles) else 0\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\n\n\n#ajax checkers \n@app.route(\"/check_login/\", methods=['GET', 'POST'])\ndef check_exist(login):\n\tfirst_user = User.query.filter_by(email=login).first()\n\tif first_user is None:\n\t\treturn \"1\"\n\treturn \"0\"\n\n@app.route(\"/add_auhor_to_game//\", methods=[\"GET\", \"POST\"])\n@login_required\ndef add_author_to_game(game_id, author_id):\n\tgame = Game.query.filter_by(id=game_id).first()\n\tif (User.query.filter_by(id=flask_login.current_user.id).first()) not in game.authors:\n\t\treturn \"0\"\n\tnew_author = User.query.filter_by(id=author_id).first()\n\ttry:\n\t\tgame.authors.append(new_author)\n\t\tdb.session.commit()\n\texcept:\n\t\treturn \"0\"\n\treturn \"1\"\n\n\n@app.route(\"/update_star//\", methods=[\"GET\", \"POST\"])\n@login_required\ndef update_star(game_id, star_type):\n\ttry:\n\t\tuser = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\tgame = Game.query.filter_by(id=game_id).first()\n\t\tif star_type == \"star_border\":\n\t\t\tgame.stars.append(user)\n\t\telse:\n\t\t\tgame.stars.remove(user)\n\t\tdb.session.commit()\n\texcept:\n\t\treturn \"0\"\n\treturn \"1\"\n\n@app.route(\"/comment/\", methods=[\"GET\", \"POST\"])\n@login_required\ndef comment(game_id):\n\ttry:\n\t\tuser = User.query.filter_by(id=flask_login.current_user.id).first()\n\t\tgame = Game.query.filter_by(id=game_id).first()\n\t\tdata = request.get_json()\n\t\tdate = data[\"date\"]\n\t\ttext = data[\"content\"]\n\t\tcomment = Comment(text=text, date=date)\n\t\tcomment.author = [user]\n\t\tcomment.games = [game]\n\t\tdb.session.commit()\n\t\treturn str(comment.id)\n\texcept:\n\t\treturn \"-1\"\n\n@app.route(\"/get_comments/\", methods=[\"GET\", \"POST\"])\ndef get_comments(game_id):\n\tpre_comments = (Game.query.filter_by(id=game_id).first()).comments\n\tcomments = []\n\tfor comment in pre_comments:\n\t\tname = comment.author[0].name + \" \" + comment.author[0].surename\n\t\ttext = comment.text\n\t\tdate = comment.date\n\t\tcomments.append({\"id\":comment.id, \"name\": name, \"content\": text, \"date\": date})\n\tcomments.reverse()\n\treturn json.dumps(comments)\n\n\n@app.route(\"/get_games\", methods=[\"GET\", \"POST\"])\ndef get_games():\n\tgames = []\n\tif request.method == \"POST\":\n\t\ttags = request.form[\"tags\"].split(\", \")\n\t\ttext = request.form[\"text\"]\n\t\tprint(tags, text)\n\t\tgames_pre = Game.query.all()\n\t\tfor game in games_pre:\n\t\t\tflg = True\n\t\t\tfor tag in tags:\n\t\t\t\tif (tag != \"\" and tag not in list(map(lambda x: x.name, game.tags))):\n\t\t\t\t\tflg = False\n\t\t\tif (flg and (text == \"\" or (text in game.name or text in game.pitch or text in game.discription))):\n\t\t\t\tgames.append(game)\n\n\telif request.method == \"GET\":\n\t\tgames = Game.query.all()\n\n\tfor i in range(len(games)):\n\t\tgames[i] = {\n\t\t\t'id': str(games[i].id),\n\n\t\t\t'name': games[i].name,\n\t\t\t'pitch': games[i].pitch.split(\"\\n\"),\n\n\t\t\t'game_type': (games[i].game_type)[0].name,\n\t\t\t'tags': [tag.name for tag in games[i].tags],\n\t\t}\n\treturn json.dumps(games)\n\n\n\n#on start\n@app.before_first_request\ndef restrict_admin_url():\n endpoint = 'admin.index'\n url = url_for(endpoint)\n admin_index = app.view_functions.pop(endpoint)\n @app.route(url, endpoint=endpoint)\n @roles_required('admin')\n def secure_admin_index():\n return admin_index()\n\n\n\n\n", "sub_path": "view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 15473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.redirect", "line_number": 10, "usage_type": "call"}, {"api_name": "app.route", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 23, "usage_type": "call"}, {"api_name": "app.route", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 28, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 28, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 29, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}, {"api_name": "app.route", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 44, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "app.route", "line_number": 40, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 41, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 80, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 82, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 82, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 85, "usage_type": "call"}, {"api_name": "app.route", "line_number": 65, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 96, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 96, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 97, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 97, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 99, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 115, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 115, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 116, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 117, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 117, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 118, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 148, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 151, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 152, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 152, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 155, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 157, "usage_type": "call"}, {"api_name": "app.route", "line_number": 92, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 93, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 167, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 168, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 171, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 179, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 180, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 182, "usage_type": "call"}, {"api_name": "app.route", "line_number": 163, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 217, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 219, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 220, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 222, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 223, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 223, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 224, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 224, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 225, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 226, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 226, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 227, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 227, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 228, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 228, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 229, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 229, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 230, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 230, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 231, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 231, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 232, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 232, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 233, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 233, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 234, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 234, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 235, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 236, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 236, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 238, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 238, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 238, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 270, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 272, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 273, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 276, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 290, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 291, "usage_type": "call"}, {"api_name": "app.route", "line_number": 212, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 213, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 316, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 317, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 318, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 318, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 324, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 325, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 325, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 326, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 326, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 335, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 336, "usage_type": "call"}, {"api_name": "app.route", "line_number": 312, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 313, "usage_type": "name"}, {"api_name": "app.route", "line_number": 345, "usage_type": "call"}, {"api_name": "flask_login.current_user", "line_number": 356, "usage_type": "attribute"}, {"api_name": "app.route", "line_number": 352, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 353, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 371, "usage_type": "attribute"}, {"api_name": "app.route", "line_number": 367, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 368, "usage_type": "name"}, {"api_name": "flask_login.current_user", "line_number": 386, "usage_type": "attribute"}, {"api_name": "flask.request.get_json", "line_number": 388, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 388, "usage_type": "name"}, {"api_name": "app.route", "line_number": 382, "usage_type": "call"}, {"api_name": "flask_security.login_required", "line_number": 383, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 409, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 409, "usage_type": "name"}, {"api_name": "app.route", "line_number": 399, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 415, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 415, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 416, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 416, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 417, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 417, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 428, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 428, "usage_type": "name"}, {"api_name": "flask.json.dumps", "line_number": 441, "usage_type": "call"}, {"api_name": "flask.json", "line_number": 441, "usage_type": "name"}, {"api_name": "app.route", "line_number": 412, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 449, "usage_type": "call"}, {"api_name": "app.view_functions.pop", "line_number": 450, "usage_type": "call"}, {"api_name": "app.view_functions", "line_number": 450, "usage_type": "attribute"}, {"api_name": "app.route", "line_number": 451, "usage_type": "call"}, {"api_name": "flask_security.roles_required", "line_number": 452, "usage_type": "call"}, {"api_name": "app.before_first_request", "line_number": 446, "usage_type": "attribute"}]} +{"seq_id": "339235153", "text": "import twitter\nimport operator\nimport collections\nimport json\nimport re\nimport time\n\n\ndef twitter_api_auth():\n\t\"\"\"twitter API authorization\"\"\"\n\t\t\t\t\t\t\t\n\tCONSUMER_KEY = 'SDNSSHvVQp2HbLQfuI1Sog'\n\tCONSUMER_SECRET = 'QXXmWAERTKaXzObje2i9R6LnwJQKZkd40AxVynJardc'\n\tOAUTH_TOKEN = '220097299-ywALLeRzKURKpvWmYrdm93sEq45l66npOd4uktdC'\n\tOAUTH_TOKEN_SECRET = 'FWI8AApTveTYXXYppVJpG3X77ut1IZzGgIKWXlf1KSTQp'\n\t\n\t\n\t\"\"\"\n\t\n\tCONSUMER_KEY = '7HLo45hP2DXobddgg7AA'\n\tCONSUMER_SECRET = 'caaXIcdg1KnBsohreiMM1CHzznwQ83Zsq47Zgx6wovc'\n\tOAUTH_TOKEN = '2249383801-OcmRIKkMVsoF0GGG9A5YsCbGTrSiRzDqRgTsPBi'\n\tOAUTH_TOKEN_SECRET = '5qLZxPPUZ3LMt0GMnIQZKmibyzh8iPhSaWG49zEMgp3I2'\n\t\"\"\"\t\t\t\n\t\n\tauth = twitter.oauth.OAuth( OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET )\n\ttwitter_api = twitter.Twitter( auth = auth )\n\t\n\treturn twitter_api\n\n\t\n\n\n\n\ndef twitter_search(twitter_api, q, max_results, geocode = '', since='', until=''):\n\tsearch_results = twitter_api.search.tweets(q=q, count=100, geocode= geocode, since = since, until = until)\n\tstatuses = search_results['statuses']\n \n\t#Enforce a reasonable limit\n\t#max_results = min(1000, max_results)\n\tmax_iteration = max_results / 100\n \n \n\tfor _ in range(max_iteration): # 10*100 = 1000\n\t\tif 'next_results' in search_results['search_metadata']:\n\t\t\tnext_results = search_results['search_metadata']['next_results']\n\t\t\t#print('---------------next result: ' + next_results)\n\t\t\tpart1 = next_results[1:].split(\"%20\")[0]\n\t\t\tpart2 = next_results[1:].split(\"%20\")[1]\n\t\t\tpart3 = next_results[1:].split(\"%20\")[2]\n\t\t\tpart1_list = part1.split(\"&\")\n\t\t\tpart2_list = part2.split(\"&\")\n\t\t\tpart3_list = part3.split(\"&\")\n\t\t\t#print(part1_list)\n\t\t\t#print(part2_list)\n\t\t\t#print(part3_list)\n\t\t\tkwlist = part1_list + part2_list + part3_list\n\t\t\t#print(kwlist)\n\t\t\tkwargs = []\n\t\t\tfor kv in kwlist:\n\t\t\t\tif '%3A' in kv:\n\t\t\t\t\tkwargs.append( kv.split('%3A'))\n\t\t\t\telif '%2C' in kv:\n\t\t\t\t\tkv = kv.replace('%2C',',')\n\t\t\t\t\tkwargs.append(kv.split('='))\n\t\t\t\telse:\n\t\t\t\t\tkwargs.append( kv.split('='))\n \n\t\t\t\n\t\t\tkwargs = dict(kwargs)\n\t\t\t#print(kwargs)\n\t\t\t#time.sleep(10)\n\t\t\tsearch_results = twitter_api.search.tweets(**kwargs)\n\t\t\tstatuses += search_results['statuses']\n\t\t\tif len(statuses) >= max_results:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tbreak\n\t\n\t#print(\"------------------------\"+ str(len(statuses)))\n\treturn statuses\n\n\n\n\n\ndef query_for_getting_freq_kw(twitter_api, query, file1, file2, tweets_id_dict, geocode = '', until = ''):\n\t\"\"\"use twitter_api to search tweets and select best words for accurate tweets query\"\"\"\n\n\t#twitter_api = twitter_api_auth()\n\tmax_result = 100\n\t#query = ['#exercise','#workout','#gym','exercise','workout','gym']\n\t#tweets_id_dict = {}\n\tretweet_dict = {}\n\tretweet_count_threshold = 5\n\tresult_volume = 0\n\t\n\t#f1 = open(\"status_text.txt\",\"a\")\n\t#f2 = open(\"rt_status_text.txt\",\"a\")\n\t\n\tfor elem in query:\n\t\tsearch_results = twitter_api.search.tweets(q = elem, count = max_result, geocode = geocode, until = until)\n\t\tstatuses = search_results['statuses']\n\t\t#result_volume = result_volume + len(statuses)\n\t\t#for i in range(len(statuses)):\n\t\tfor status in statuses:\n\t\t\tif status['user']['lang'] == 'en':\n\t\t\t\t#print_tweets(status)\n\t\t\t\ttweet_text = status['text']\n\t\t\t\t\t\n\t\t\t\t#if current status has been retweeted many times, it may be an ads, recipes etc.\n\t\t\t\t#then, this will effect the frequent words. So we will not contain it.\n\t\t\t\tif status['retweet_count'] > retweet_count_threshold:\n\t\t\t\t\tif 'retweeted_status' in status:\n\t\t\t\t\t\tif status['retweeted_status']['id'] not in retweet_dict:\n\t\t\t\t\t\t\tretweet_dict[ status['retweeted_status']['id'] ] = tweet_text\n\t\t\t\t\t\t\ttweet_text = re.sub('\\n',' ',tweet_text)\n\t\t\t\t\t\t\tfile2.write(tweet_text + '\\n')\n\t\t\t\t\telse:\n\t\t\t\t\t\tif status['id'] not in retweet_dict:\n\t\t\t\t\t\t\tretweet_dict[ status['id'] ] = tweet_text\n\n\t\t\t\t#\"\"\" we need to analyze retweet users\"\"\"\n\n\n\t\t\t\telse:\n\t\t\t\t\t# if there is some urls in the current tweet, remove the urls\n\t\t\t\t\tif status['entities']['urls'] != []:\n\t\t\t\t\t\t#print('--------------------------------------------------')\n\t\t\t\t\t\t#print_tweets(search_results['statuses'][i])\n\t\t\t\t\t\tfor j in range(len(status['entities']['urls'])-1,-1,-1):\n\t\t\t\t\t\t\turl_index1 = status['entities']['urls'][j]['indices'][0]\n\t\t\t\t\t\t\turl_index2 = status['entities']['urls'][j]['indices'][1]\n\t\t\t\t\t\t\ttweet_text = tweet_text.replace(tweet_text[url_index1:url_index2],'')\n\t\t\t\t\telif 'indices' in status['entities']:\n\t\t\t\t\t\turl_index1 = status['entities']['indices'][0]\n\t\t\t\t\t\turl_index2 = status['entities']['indices'][1]\n\t\t\t\t\t\ttweet_text = tweet_text.replace(tweet_text[url_index1:url_index2],'')\n\t\t\t\t\telif 'media' in status['entities']:\n\t\t\t\t\t\tif 'indices' in status['entities']['media'][0]:\n\t\t\t\t\t\t\turl_index1 = status['entities']['media'][0]['indices'][0]\n\t\t\t\t\t\t\turl_index2 = status['entities']['media'][0]['indices'][1]\n\t\t\t\t\t\t\ttweet_text = tweet_text.replace(tweet_text[url_index1:url_index2],'')\n\n\n\t\t\t\t\tif status['id'] not in tweets_id_dict:\n\t\t\t\t\t\ttweets_id_dict[ status['id'] ] = tweet_text\n\t\t\t\t\t\ttweet_text = re.sub('\\n',' ',tweet_text)\n\t\t\t\t\t\t#print_tweets(tweet_text)\n\t\t\t\t\t\tfile1.write(tweet_text + '\\n')\n\t\t\t\t\t\tresult_volume = result_volume + 1\n\t#print(\"total result volume: ---------------------------------------\" + str(result_volume))\n\t#file1.close()\n\t#file2.close()\n\treturn result_volume\n\n\n\n\n\ndef keywords_query(query, file1, file2, tweets_id_dict, geocode = '', since = '', until = ''):\n\n\ttwitter_api = twitter_api_auth()\n\tmax_result = 100\n\tretweet_dict = {}\n\tretweet_count_threshold = 50\n\tresult_volume = 0\n\n\tfor elem in query:\n\t\tsearch_results = twitter_api.search.tweets(q = elem, count = max_result, geocode = geocode, until = until, since = since)\n\t\tstatuses = search_results['statuses']\n\t\t#result_volume = result_volume + len(statuses)\n\t\t#for i in range(len(statuses)):\n\t\tfor status in statuses:\n\t\t\tif status['user']['lang'] == 'en':\n\t\t\t\t#print_tweets(status)\n\t\t\t\ttweet_text = status['text']\n\t\t\t\t\t\n\t\t\t\t#if current status has been retweeted many times, it may be an ads, recipes etc.\n\t\t\t\t#then, this will effect the frequent words. So we will not contain it.\n\t\t\t\tif status['retweet_count'] > retweet_count_threshold:\n\t\t\t\t\tif 'retweeted_status' in status:\n\t\t\t\t\t\tif status['retweeted_status']['id'] not in retweet_dict:\n\t\t\t\t\t\t\tretweet_dict[ status['retweeted_status']['id'] ] = tweet_text\n\t\t\t\t\t\t\ttweet_text = re.sub('\\n',' ',tweet_text)\n\t\t\t\t\t\t\tfile2.write(tweet_text + '\\n')\n\t\t\t\t\telse:\n\t\t\t\t\t\tif status['id'] not in retweet_dict:\n\t\t\t\t\t\t\tretweet_dict[ status['id'] ] = tweet_text\n\n\t\t\t\telse:\t\n\t\t\t\t\t\n\t\t\t\t\tif status['id'] not in tweets_id_dict:\n\t\t\t\t\t\ttweets_id_dict[ status['id'] ] = tweet_text\n\t\t\t\t\t\ttweet_text = re.sub('\\n',' ',tweet_text)\n\t\t\t\t\t\t#print_tweets(tweet_text)\n\t\t\t\t\t\tfile1.write(tweet_text + '\\n')\n\t\t\t\t\t\tresult_volume = result_volume + 1\n\n\treturn result_volume\n\n\n\n\ndef print_tweets(tweets):\n\t\"\"\"raw data is Json type, we need to print it with indentation for easy reading\"\"\"\n\tprint( json.dumps(tweets, indent = 3) )\n\n\n\n\ndef get_top_freq_words(file_obj, top_number):\n\ttext = file_obj.read()\n\ttext = text.lower()\n\ttext = re.sub('@[a-zA-Z0-9|_]*','@ ',text)\n\t#text = re.sub(r'[\\.\\?\\;\\,\\!\\\"]* ([a-zA-Z0-9])', r'\\1 \\1',text)\n\t#text = re.sub(r'([a-zA-Z0-9])[\\.\\?\\;\\,\\!\\\"]*$', r'\\1', text)\n\ttext = re.sub(\"%[0-9|.]*\", '%XX', text)\n\ttext = re.sub(\"\\$[0-9|.]*\", '$XX', text)\n\n\n\tfor ch in '!\"%&()*+,-./:;<=>?[\\\\]^_`{|}~':\n\t\ttext = text.replace(ch,' ')\n\twords = text.split()\n\tcounts = collections.Counter(words)\n\ttop_freq_words = counts.most_common()[: top_number]\n\t#print(top_freq_words)\n\tfile_obj.close()\n\treturn top_freq_words\n\n\n\n\n\n\n\t\n\t\t\n\n\n\n\n\n\n\n", "sub_path": "twitter/tt.py", "file_name": "tt.py", "file_ext": "py", "file_size_in_byte": 7601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "twitter.oauth.OAuth", "line_number": 26, "usage_type": "call"}, {"api_name": "twitter.oauth", "line_number": 26, "usage_type": "attribute"}, {"api_name": "twitter.Twitter", "line_number": 27, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 118, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 149, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 186, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 196, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 208, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 216, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 219, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 220, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "384811378", "text": "import cv2\n\ndef paizhao():\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n # 展示图片\n # cv2.imshow(\"capture\", frame)\n cv2.imwrite(\"D:/Gitee/blog/sousuo/paizhao.jpg\", frame)\n cap.release()\n cv2.destroyAllWindows()\npaizhao()\n\n", "sub_path": "AA全部文件/Linux文件程序/Graduation_project/share/不要的文件备份/Windows/人脸搜索/笔记存储/opencv.py", "file_name": "opencv.py", "file_ext": "py", "file_size_in_byte": 255, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "cv2.VideoCapture", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "352847714", "text": "#!/usr/bin/python3\n'''\n Database storage system\n'''\nimport os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.base_model import Base\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\n\navailable_objects = [User, State, City, Amenity, Place, Review]\n\n\nclass DBStorage():\n '''\n Implementation for the DBStorage\n '''\n __engine = None\n __session = None\n\n def __init__(self):\n '''\n init method\n\n '''\n target = 'mysql+mysqldb://{}:{}@{}:3306/{}'.format(\n os.getenv(\"HBNB_MYSQL_USER\"),\n os.getenv(\"HBNB_MYSQL_PWD\"),\n os.getenv(\"HBNB_MYSQL_HOST\"),\n os.getenv(\"HBNB_MYSQL_DB\"))\n\n self.__engine = create_engine(target, pool_pre_ping=True)\n\n if os.getenv(\"HBNB_ENV\") == \"test\":\n Base.metadata.drop_all(self.__engine)\n\n def all(self, cls=None):\n '''\n all method\n '''\n ret_dict = {}\n\n if not cls:\n for classname in available_objects:\n for item in self.__session.query(classname).all():\n key = item.__class__.__name__ + \".\" + item.id\n val = item\n ret_dict[key] = val\n else:\n # for item in self.__session.query(eval(cls)).all():\n for item in self.__session.query(cls).all():\n key = item.__class__.__name__ + \".\" + item.id\n val = item\n ret_dict[key] = val\n\n return ret_dict\n\n def new(self, obj):\n '''\n add the object to the current database session\n '''\n if obj is not None:\n self.__session.add(obj)\n\n def save(self):\n '''\n commit all changes of the current database session\n '''\n self.__session.commit()\n\n def delete(self, obj=None):\n '''\n delete from the current database session obj if not None\n '''\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n '''\n create all tables in the database\n '''\n Base.metadata.create_all(self.__engine)\n session_factory = sessionmaker(bind=self.__engine,\n expire_on_commit=False)\n Session = scoped_session(session_factory)\n self.__session = Session()\n\n def close(self):\n '''\n call remove() method on the private session attribute\n '''\n self.__session.close()\n", "sub_path": "models/engine/db_storage.py", "file_name": "db_storage.py", "file_ext": "py", "file_size_in_byte": 2681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "models.user.User", "line_number": 16, "usage_type": "name"}, {"api_name": "models.state.State", "line_number": 16, "usage_type": "name"}, {"api_name": "models.city.City", "line_number": 16, "usage_type": "name"}, {"api_name": "models.amenity.Amenity", "line_number": 16, "usage_type": "name"}, {"api_name": "models.place.Place", "line_number": 16, "usage_type": "name"}, {"api_name": "models.review.Review", "line_number": 16, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 32, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 33, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 34, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 37, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 39, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata.drop_all", "line_number": 40, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata", "line_number": 40, "usage_type": "attribute"}, {"api_name": "models.base_model.Base", "line_number": 40, "usage_type": "name"}, {"api_name": "models.base_model.Base.metadata.create_all", "line_number": 87, "usage_type": "call"}, {"api_name": "models.base_model.Base.metadata", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.base_model.Base", "line_number": 87, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 88, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "261449663", "text": "from .base import ToDoFunctionalTest\nfrom selenium import webdriver\n\nclass NewVisitorTest(ToDoFunctionalTest):\n\n def test_can_start_a_list_and_retrieve_it_later(self):\n\n self.browser.get(self.live_server_url)\n\n # She notices page title and header mention to-do lists\n self.assertIn('To-Do', self.browser.title)\n header_text = self.browser.find_element_by_tag_name('h1').text\n self.assertIn('To-Do', header_text)\n\n # Edith really likes fly fishing\n # She is invited to enter a to-do item\n inputbox = self.browser.find_element_by_id('id_new_item')\n self.assertEqual(\n inputbox.get_attribute('placeholder'),\n 'Enter a to-do item'\n )\n\n # She types \"Buy peacock feathers\" into a text box\n self.enter_a_new_item('Buy peacock feathers')\n\n # Sometimes she may forget and leave the text box empty!\n\n # When she hits enter, she is taken to a new URL,\n # and now the page lists \"1. Buy peacock feathers\" as\n # a to-do list table\n edith_list_url = self.browser.current_url\n self.assertRegexpMatches(edith_list_url, '/lists/.+')\n self.check_for_row_in_list_table('1. Buy peacock feathers')\n\n # She can still add more to do items\n # She writes \"Use peacock feathers to make fly\"\n # (Edith is very methodolical)\n self.enter_a_new_item('Use peacock feathers to make a fly')\n\n # The homepage uodates again, and now shows both items on her list\n self.check_for_row_in_list_table('1. Buy peacock feathers')\n self.check_for_row_in_list_table('2. Use peacock feathers to make a fly')\n\n # Now a new user, Pala, comes along to the site\n\n # We use a new browser session to make sure that no information\n # of Edith's is coming through from cookies etc\n self.browser.quit()\n self.browser = webdriver.Firefox()\n\n # Pala vists the home page. There is no sign of Edith's list\n self.browser.get(self.live_server_url)\n page_text = self.browser.find_element_by_tag_name('body').text\n self.assertNotIn('Buy peacock feathers', page_text)\n self.assertNotIn('make a fly', page_text)\n\n # Pala starts a new list by entering a new item.\n # He is less interesting that Eidth\n self.enter_a_new_item('Buy milk')\n\n # Pala gets his own unique URL\n pala_list_url = self.browser.current_url\n self.assertRegexpMatches(pala_list_url, '/lists/.+')\n self.assertNotEqual(pala_list_url, edith_list_url)\n\n # Again there is no trace of Edith's list\n page_text = self.browser.find_element_by_tag_name('body').text\n self.assertNotIn('Buy peacock feathers', page_text)\n self.assertIn('Buy milk', page_text)\n\n # She visits the URL- her to-do list is still there\n\n\n # She is done!\n", "sub_path": "superlists/functional_tests/test_simple_list_creation.py", "file_name": "test_simple_list_creation.py", "file_ext": "py", "file_size_in_byte": 2891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "base.ToDoFunctionalTest", "line_number": 4, "usage_type": "name"}, {"api_name": "selenium.webdriver.Firefox", "line_number": 49, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 49, "usage_type": "name"}]} +{"seq_id": "198046273", "text": "from __future__ import print_function\r\nimport os\r\nimport time\r\nimport sys\r\n\r\nimport importlib\r\nimport FetchData as FD\r\nimport FindIntention as FI\r\n\r\nclass Main:\r\n\r\n def AssembleInfo(self):\r\n obj = FD.FetchDataClass(datainfopath=\"selected_algo.json\")\r\n self.dataset = obj.ExtractTraingData()\r\n self.domain, self.sensorname, self.algorithmname = obj.ExtractTrainingInfo()\r\n obj = FI.FindIntentionClass(currentuserintentpath=\"user_intention.json\")\r\n self.isAppendIntention, self.intention_id, self.conditionalkey = obj.AppendIntention()\r\n\r\n\r\n def FeedIntoAlgorithm(self):\r\n self.AssembleInfo()\r\n if self.isAppendIntention == True:\r\n algorithm = importlib.import_module(self.algorithmname)\r\n obj = getattr(algorithm, self.algorithmname)(self.dataset, self.intention_id)\r\n obj.TrainModel()\r\n #print (\"Accuracy : \", obj.Accuracy())\r\n\r\n\r\n def PeriodicPrediction(self):\r\n import Prediction as PD\r\n obj = PD.PredictionClass(domain=self.domain, intentionid=self.intention_id, conditionalkey=self.conditionalkey)\r\n insight = obj.ExportInsights()\r\n #insight = obj.Predict()\r\n print(\"insight : \", insight)\r\n\r\n\r\n def Trigger(self):\r\n cached_stamp = 0\r\n filename = \"selected_algo.json\"\r\n while True:\r\n time.sleep(0.5)\r\n stamp = os.stat(filename).st_mtime\r\n if stamp != cached_stamp:\r\n cached_stamp = stamp\r\n print(\"file updated\", file=sys.stderr)\r\n self.FeedIntoAlgorithm()\r\n from twisted.internet import task\r\n from twisted.internet import reactor\r\n from datetime import datetime\r\n timeout = 60\r\n l = task.LoopingCall(self.PeriodicPrediction)\r\n l.start(timeout)\r\n reactor.run()\r\n\r\n\r\nobj = Main().Trigger()\r\n\r\n\r\n", "sub_path": "Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 1942, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "FetchData.FetchDataClass", "line_number": 13, "usage_type": "call"}, {"api_name": "FindIntention.FindIntentionClass", "line_number": 16, "usage_type": "call"}, {"api_name": "importlib.import_module", "line_number": 23, "usage_type": "call"}, {"api_name": "Prediction.PredictionClass", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "os.stat", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 45, "usage_type": "attribute"}, {"api_name": "twisted.internet.task.LoopingCall", "line_number": 51, "usage_type": "call"}, {"api_name": "twisted.internet.task", "line_number": 51, "usage_type": "name"}, {"api_name": "twisted.internet.reactor.run", "line_number": 53, "usage_type": "call"}, {"api_name": "twisted.internet.reactor", "line_number": 53, "usage_type": "name"}, {"api_name": "{'PD': 'Prediction', 'task': 'twisted.internet.task', 'reactor': 'twisted.internet.reactor', 'datetime': 'datetime.datetime'}", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "555890941", "text": "# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#\n# Copyright 2016 BigML\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"A local Predictive LDA Model.\n\nThis module allows you to download and use LDA models for local\npredicitons. Specifically, the function LDA.distribution allows you\nto pass in input text and infers a generative distribution over the\ntopics in the learned LDA model.\n\nExample usage (assuming that you have previously set up the BIGML_USERNAME\nand BIGML_API_KEY environment variables and that you own the lda/id\nbelow):\n\nfrom bigml.api import BigML\nfrom bigml.lda import LDA\n\napi = BigML()\n\nlda = LDA('lda/5026965515526876630001b2')\ntopic_distribution = lda.distribution({\"text\": \"A sample string\"}))\n\n\"\"\"\n\nimport random\nimport logging\nimport array\nimport Stemmer\n\n\nfrom bigml.api import FINISHED\nfrom bigml.api import BigML, get_lda_id, get_status\nfrom bigml.basemodel import retrieve_resource\nfrom bigml.basemodel import ONLY_MODEL\nfrom bigml.model import STORAGE\nfrom bigml.modelfields import ModelFields\n\n\nLOGGER = logging.getLogger('BigML')\n\nMAXIMUM_TERM_LENGTH = 30\nUPDATES = 64\n\nCODE_TO_NAME = {\n \"da\": u'danish',\n \"nl\": u'dutch',\n \"en\": u'english',\n \"fi\": u'finnish',\n \"fr\": u'french',\n \"de\": u'german',\n \"hu\": u'hungarian',\n \"it\": u'italian',\n \"nn\": u'norwegian',\n \"pt\": u'portuguese',\n \"ro\": u'romanian',\n \"ru\": u'russian',\n \"es\": u'spanish',\n \"sv\": u'swedish',\n \"tr\": u'turkish'\n}\n\nclass LDA(ModelFields):\n \"\"\" A lightweight wrapper around an LDA model.\n\n Uses a BigML remote LDA model to build a local version that can be used\n to generate topic distributions for input documents locally.\n\n \"\"\"\n\n def __init__(self, lda_model, api=None):\n\n self.resource_id = None\n self.stemmer = None\n self.seed = None\n self.case_sensitive = False\n self.bigrams = False\n self.ntopics = None\n self.temp = None\n self.uniform_doc_assignments = None\n self.uniform_normalizer = None\n self.phi = None\n self.term_to_index = None\n\n if not (isinstance(lda_model, dict) and 'resource' in lda_model and\n lda_model['resource'] is not None):\n if api is None:\n api = BigML(storage=STORAGE)\n self.resource_id = get_lda_id(lda_model)\n if self.resource_id is None:\n raise Exception(api.error_message(lda_model,\n resource_type='lda',\n method='get'))\n query_string = ONLY_MODEL\n lda_model = retrieve_resource(api, self.resource_id,\n query_string=query_string)\n else:\n self.resource_id = get_lda_id(lda_model)\n\n if 'object' in lda_model and isinstance(lda_model['object'], dict):\n lda_model = lda_model['object']\n\n if 'model' in lda_model and isinstance(lda_model['model'], dict):\n status = get_status(lda_model)\n if 'code' in status and status['code'] == FINISHED:\n\n model = lda_model['model']\n\n if 'language' in model and model['language'] is not None:\n lang = model['language']\n if lang in CODE_TO_NAME:\n self.stemmer = Stemmer.Stemmer(CODE_TO_NAME[lang])\n\n self.term_to_index = {self.stem(term): index for index, term\n in enumerate(model['termset'])}\n\n self.seed = model['hashed_seed']\n self.case_sensitive = model['case_sensitive']\n self.bigrams = model['bigrams']\n\n self.ntopics = len(model['term_topic_assignments'][0])\n\n self.alpha = model['alpha']\n self.ktimesalpha = self.ntopics * self.alpha\n\n self.uniform_doc_assignments = [1] * self.ntopics\n self.uniform_normalizer = self.ntopics + self.ktimesalpha\n\n self.temp = [0] * self.ntopics\n\n assignments = model['term_topic_assignments']\n beta = model['beta']\n nterms = len(self.term_to_index)\n\n sums = [sum(n[index] for n in assignments) for index\n in range(self.ntopics)]\n\n self.phi = [[0 for _ in range(nterms)]\n for _ in range(self.ntopics)]\n\n for k in range(self.ntopics):\n norm = sums[k] + nterms * beta\n for w in range(nterms):\n self.phi[k][w] = (assignments[w][k] + beta) / norm\n\n ModelFields.__init__(self, model['fields'])\n else:\n raise Exception(\"The topic model isn't finished yet\")\n else:\n raise Exception(\"Cannot create the LDA instance. Could not\"\n \" find the 'model' key in the resource:\\n\\n%s\" %\n lda_model)\n\n def distribution(self, input_data, by_name=True):\n \"\"\"Returns the distribution of topics given the input text.\n\n \"\"\"\n # Checks and cleans input_data leaving the fields used in the model\n input_data = self.filter_input_data(input_data, by_name=by_name)\n\n # Checks that all modeled fields are present in input data\n for field_id in self.fields:\n if field_id not in input_data:\n raise Exception(\"Failed to predict a topic distribution. \"\n \"Input data must contain values for all \"\n \"modeled text fields.\")\n\n return self.distribution_for_text(\"\\n\\n\".join(input_data.values()))\n\n def distribution_for_text(self, text):\n \"\"\"Returns the topic distribution of the given `text`, which can\n either be a string or a list of strings\n\n \"\"\"\n if isinstance(text, (str, unicode)):\n astr = text\n else:\n # List of strings\n astr = \"\\n\\n\".join(text)\n\n doc = self.tokenize(astr)\n return self.infer(doc, UPDATES)\n\n def stem(self, term):\n \"\"\"Returns the stem of the given term, if the stemmer is defined\n\n \"\"\"\n if not self.stemmer:\n return term\n else:\n return self.stemmer.stemWord(term)\n\n def append_bigram(self, out_terms, last_term, term_before):\n \"\"\"Takes two terms and appends the index of their concatenation to the\n provided list of output terms\n\n \"\"\"\n if self.bigrams and last_term is not None and term_before is not None:\n bigram = self.stem(term_before + \" \" + last_term)\n if bigram in self.term_to_index:\n out_terms.append(self.term_to_index[bigram])\n\n def tokenize(self, astr):\n \"\"\"Tokenizes the input string `astr` into a list of integers, one for\n each term term present in the `self.term_to_index`\n dictionary. Uses word stemming if applicable.\n\n \"\"\"\n out_terms = []\n\n last_term = None\n term_before = None\n\n space_was_sep = False\n saw_char = False\n\n text = unicode(astr)\n index = 0\n\n while index < len(text):\n self.append_bigram(out_terms, last_term, term_before)\n\n char = text[index]\n buf = array.array('u')\n saw_char = False\n\n if not char.isalnum():\n saw_char = True\n\n while not char.isalnum() and index < len(text):\n index += 1\n char = text[index]\n\n while (index < len(text) and\n (char.isalnum() or char == \"'\") and\n len(buf) < MAXIMUM_TERM_LENGTH):\n\n buf.append(char)\n index += 1\n\n if index < len(text):\n char = text[index]\n else:\n char = None\n\n if len(buf) > 0:\n term_out = buf.tounicode()\n\n if not self.case_sensitive:\n term_out = term_out.lower()\n\n if space_was_sep and not saw_char:\n term_before = last_term\n else:\n term_before = None\n\n last_term = term_out\n\n if char == \" \" or char == \"\\n\":\n space_was_sep = True\n\n tstem = self.stem(term_out)\n if tstem in self.term_to_index:\n out_terms.append(self.term_to_index[tstem])\n\n index += 1\n\n self.append_bigram(out_terms, last_term, term_before)\n\n return out_terms\n\n def sample_topic(self, term, assignments, normalizer, rng):\n \"\"\"Samples a topic for the given `term`, given a set of topic\n assigments for the current document and a normalizer term\n derived from the dirichlet hyperparameters\n\n \"\"\"\n for k in range(self.ntopics):\n topic_term = self.phi[k][term]\n topic_document = (assignments[k] + self.alpha) / normalizer\n self.temp[k] = topic_term * topic_document\n\n for k in range(1, self.ntopics):\n self.temp[k] += self.temp[k - 1]\n\n random_value = rng.random() * self.temp[-1]\n topic = 0\n\n while self.temp[topic] < random_value and topic < self.ntopics:\n topic += 1\n\n return topic\n\n def sample_uniform(self, term, rng):\n \"\"\"Samples a topic for the given term assuming uniform topic\n assignments for the given document. Used to initialize the\n gibbs sampler.\n\n \"\"\"\n assignments = self.uniform_doc_assignments\n norm = self.uniform_normalizer\n\n return self.sample_topic(term, assignments, norm, rng)\n\n def infer(self, doc, max_updates):\n \"\"\"Infer a topic distribution for a document using `max_updates`\n iterations of gibbs sampling\n\n \"\"\"\n rng = random.Random(self.seed)\n normalizer = len(doc) + self.ktimesalpha\n doc_assignments = [0] * self.ntopics\n\n for term in doc:\n topic = self.sample_uniform(term, rng)\n doc_assignments[topic] += 1\n\n # Gibbs sampling\n for _ in range(max_updates):\n for term in doc:\n topic = self.sample_topic( \\\n term, doc_assignments, normalizer, rng)\n doc_assignments[topic] += 1\n normalizer += 1\n\n return [(doc_assignments[k] + self.alpha) / normalizer\n for k in range(self.ntopics)]\n", "sub_path": "bigml/lda.py", "file_name": "lda.py", "file_ext": "py", "file_size_in_byte": 11116, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 53, "usage_type": "call"}, {"api_name": "bigml.modelfields.ModelFields", "line_number": 76, "usage_type": "name"}, {"api_name": "bigml.api.BigML", "line_number": 101, "usage_type": "call"}, {"api_name": "bigml.model.STORAGE", "line_number": 101, "usage_type": "name"}, {"api_name": "bigml.api.get_lda_id", "line_number": 102, "usage_type": "call"}, {"api_name": "bigml.basemodel.ONLY_MODEL", "line_number": 107, "usage_type": "name"}, {"api_name": "bigml.basemodel.retrieve_resource", "line_number": 108, "usage_type": "call"}, {"api_name": "bigml.api.get_lda_id", "line_number": 111, "usage_type": "call"}, {"api_name": "bigml.api.get_status", "line_number": 117, "usage_type": "call"}, {"api_name": "bigml.api.FINISHED", "line_number": 118, "usage_type": "name"}, {"api_name": "Stemmer.Stemmer", "line_number": 125, "usage_type": "call"}, {"api_name": "bigml.modelfields.ModelFields.__init__", "line_number": 159, "usage_type": "call"}, {"api_name": "bigml.modelfields.ModelFields", "line_number": 159, "usage_type": "name"}, {"api_name": "array.array", "line_number": 237, "usage_type": "call"}, {"api_name": "random.Random", "line_number": 323, "usage_type": "call"}]} +{"seq_id": "103130151", "text": "# https://www.quantstart.com/qstrader/tutorial-60-40-portfolio/\nimport os\n\nimport pandas as pd\nimport pytz\n\nfrom qstrader.alpha_model.fixed_signals import FixedSignalsAlphaModel\nfrom qstrader.asset.equity import Equity\nfrom qstrader.asset.universe.static import StaticUniverse\nfrom qstrader.data.backtest_data_handler import BacktestDataHandler\nfrom qstrader.data.daily_bar_csv import CSVDailyBarDataSource\nfrom qstrader.statistics.tearsheet import TearsheetStatistics\nfrom qstrader.trading.backtest import BacktestTradingSession\n\nif __name__ == \"__main__\":\n start_dt = pd.Timestamp('2003-09-30 14:30:00', tz=pytz.UTC)\n end_dt = pd.Timestamp('2019-12-31 23:59:00', tz=pytz.UTC)\n\n # Construct the symbols and assets necessary for the backtest\n strategy_symbols = ['SPY', 'AGG']\n strategy_assets = ['EQ:%s' % symbol for symbol in strategy_symbols]\n strategy_universe = StaticUniverse(strategy_assets)\n\n # To avoid loading all CSV files in the directory, set the\n # data source to load only those provided symbols\n csv_dir = os.environ.get('QSTRADER_CSV_DATA_DIR', '.')\n data_source = CSVDailyBarDataSource(csv_dir, Equity, csv_symbols=strategy_symbols)\n data_handler = BacktestDataHandler(strategy_universe, data_sources=[data_source])\n\n # Construct an Alpha Model that simply provides\n # static allocations to a universe of assets\n # In this case 60% SPY ETF, 40% AGG ETF,\n # rebalanced at the end of each month\n strategy_alpha_model = FixedSignalsAlphaModel({'EQ:SPY': 0.6, 'EQ:AGG': 0.4})\n strategy_backtest = BacktestTradingSession(\n start_dt,\n end_dt,\n strategy_universe,\n strategy_alpha_model,\n rebalance='end_of_month',\n long_only=True,\n cash_buffer_percentage=0.01,\n data_handler=data_handler\n )\n strategy_backtest.run()\n\n # Construct benchmark assets (buy & hold SPY)\n benchmark_assets = ['EQ:SPY']\n benchmark_universe = StaticUniverse(benchmark_assets)\n\n # Construct a benchmark Alpha Model that provides\n # 100% static allocation to the SPY ETF, with no rebalance\n benchmark_alpha_model = FixedSignalsAlphaModel({'EQ:SPY': 1.0})\n benchmark_backtest = BacktestTradingSession(\n start_dt,\n end_dt,\n benchmark_universe,\n benchmark_alpha_model,\n rebalance='buy_and_hold',\n long_only=True,\n cash_buffer_percentage=0.01,\n data_handler=data_handler\n )\n benchmark_backtest.run()\n\n # Performance Output\n tearsheet = TearsheetStatistics(\n strategy_equity=strategy_backtest.get_equity_curve(),\n benchmark_equity=benchmark_backtest.get_equity_curve(),\n title='60/40 US Equities/Bonds'\n )\n tearsheet.plot_results()\n", "sub_path": "examples/01_60_40/qstrader_strat.py", "file_name": "qstrader_strat.py", "file_ext": "py", "file_size_in_byte": 2739, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.Timestamp", "line_number": 16, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pandas.Timestamp", "line_number": 17, "usage_type": "call"}, {"api_name": "pytz.UTC", "line_number": 17, "usage_type": "attribute"}, {"api_name": "qstrader.asset.universe.static.StaticUniverse", "line_number": 22, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 26, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 26, "usage_type": "attribute"}, {"api_name": "qstrader.data.daily_bar_csv.CSVDailyBarDataSource", "line_number": 27, "usage_type": "call"}, {"api_name": "qstrader.asset.equity.Equity", "line_number": 27, "usage_type": "argument"}, {"api_name": "qstrader.data.backtest_data_handler.BacktestDataHandler", "line_number": 28, "usage_type": "call"}, {"api_name": "qstrader.alpha_model.fixed_signals.FixedSignalsAlphaModel", "line_number": 34, "usage_type": "call"}, {"api_name": "qstrader.trading.backtest.BacktestTradingSession", "line_number": 35, "usage_type": "call"}, {"api_name": "qstrader.asset.universe.static.StaticUniverse", "line_number": 49, "usage_type": "call"}, {"api_name": "qstrader.alpha_model.fixed_signals.FixedSignalsAlphaModel", "line_number": 53, "usage_type": "call"}, {"api_name": "qstrader.trading.backtest.BacktestTradingSession", "line_number": 54, "usage_type": "call"}, {"api_name": "qstrader.statistics.tearsheet.TearsheetStatistics", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "451593203", "text": "import urllib.request\nimport tarfile\nfrom tqdm import tqdm\nimport os\n\nclass DownloadProgressBar(tqdm):\n def update_to(self, b=1, bsize=1, tsize=None):\n if tsize is not None:\n self.total = tsize\n self.update(b * bsize - self.n)\n\ndef download_url(url, output_path):\n with DownloadProgressBar(unit='B', unit_scale=True,\n miniters=1, desc=url.split('/')[-1]) as t:\n urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)\n\nurl = 'http://homepages.inf.ed.ac.uk/jyamagis/release/VCTK-Corpus.tar.gz'\noutput_path = 'VCTK-Corpus.tar.gz'\n\ndownload_url(url, output_path)\nprint('Download complete')\nprint('Extracting tar file. This will take a while...')\ntf = tarfile.open(output_path)\ntf.extractall()\nprint('Extract complete!')\nos.remove(output_path)\nprint(\"Deleted \" + output_path)\n", "sub_path": "data/download.py", "file_name": "download.py", "file_ext": "py", "file_size_in_byte": 860, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tqdm.tqdm", "line_number": 6, "usage_type": "name"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 15, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 15, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 15, "usage_type": "name"}, {"api_name": "tarfile.open", "line_number": 23, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "26044478", "text": "#!/usr/bin/env python3\n#\n# Copyright 2015 Greg Yang\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport sys\nimport yaml\nfrom mako.template import Template\n\nparser = argparse.ArgumentParser(\n description='render mako template based on a YAML configuration')\nparser.add_argument('-o', '--out',\n help='output file (default: print to STDOUT).')\nparser.add_argument('-s', '--schema',\n help='Schema used in YAML validation with pykwalify')\nparser.add_argument('template', help='Mako template')\nparser.add_argument('source', help='YAML data source')\n\nargs = parser.parse_args()\n\nwith open(args.source) as sourceFile:\n source = yaml.load(sourceFile)\n\nif args.schema:\n from pykwalify.core import Core\n c = Core(source_file=args.source, schema_files=[args.schema])\n c.validate(raise_exception=True)\n\nwith open(args.template) as templateFile:\n templateContent = templateFile.read()\n\ntemplate = Template(templateContent)\noutputConent = template.render(tcg=source)\n\nif args.out:\n with open(args.out, 'w') as outputFile:\n outputFile.write(outputConent)\nelse:\n print(outputConent)\n", "sub_path": "tcg.py", "file_name": "tcg.py", "file_ext": "py", "file_size_in_byte": 1652, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 23, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 35, "usage_type": "call"}, {"api_name": "pykwalify.core.Core", "line_number": 39, "usage_type": "call"}, {"api_name": "mako.template.Template", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "249181218", "text": "import simplejson as json\nimport cv2\nimport os\n\nfrom .models import BaseImage\nfrom .forms import FormBaseImage\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom django.conf import settings\nfrom haystack.query import SearchQuerySet\n\ndef extractImageBIC_part(path):\n\t#load image\n\t#0 = gray\n\t#without = rgb\n\tpath_dir= '/var/www/html/sri/'\n\timg= cv2.imread(path_dir+path, 0)\n\n\t#edge histogram (texture)\n\t#img= cv2.Canny(img,100,200)\n\n\t#set the size in the view of image into 40%\n\timg= cv2.resize(img, (400, 400))\n\n\t#load histogram opencv\n\t#plt.hist(img.ravel(),256,[0,256])\n\n\t#load my histograms bic (border/interior)\n\thistogramBorder1= []\n\tfor i in range(256):\n\t\thistogramBorder1.append(0)\n\n\thistogramInside1= []\n\tfor i in range(256):\n\t\thistogramInside1.append(0)\n\n\t#load my histograms bic 2\n\thistogramBorder2= []\n\tfor i in range(256):\n\t\thistogramBorder2.append(0)\n\n\thistogramInside2= []\n\tfor i in range(256):\n\t\thistogramInside2.append(0)\n\n\t#load my histograms bic 3\n\thistogramBorder3= []\n\tfor i in range(256):\n\t\thistogramBorder3.append(0)\n\n\thistogramInside3= []\n\tfor i in range(256):\n\t\thistogramInside3.append(0)\n\n\t#load my histograms bic 4\n\thistogramBorder4= []\n\tfor i in range(256):\n\t\thistogramBorder4.append(0)\n\n\thistogramInside4= []\n\tfor i in range(256):\n\t\thistogramInside4.append(0)\n\n\t#load my histograms bic 5\n\thistogramBorder5= []\n\tfor i in range(256):\n\t\thistogramBorder5.append(0)\n\n\thistogramInside5= []\n\tfor i in range(256):\n\t\thistogramInside5.append(0)\n\n\tw= img.shape[1]\n\th= img.shape[0]\n\tlimiar= 12\n\tfor i in range(h):\n\t\tfor j in range(w):\n\t\t\t#escurece particionamento 1\n\t\t\tif(j >= 0 and j <= w/2):#col\n\t\t\t\tif(i >= 0 and i <= h/2):#lin\n\t\t\t\t\t#img[i][j]= 255 #branco\n\t\t\t\t\t#bic part1\n\t\t\t\t\tif(w > j+2 and h > i+2):\n\t\t\t\t\t\tpixel= int(img[i+1][j+1])\n\t\t\t\t\t\t#os quatro vizinhos mais proximos\n\t\t\t\t\t\tv1= int(img[i][j])\n\t\t\t\t\t\tv2= int(img[i][j+2])\n\t\t\t\t\t\tv3= int(img[i+2][j])\n\t\t\t\t\t\tv4= int(img[i+2][j+2])\n\t\t\t\t\t\t#pixels da borda\n\t\t\t\t\t\tif(abs(pixel-v1) > limiar and abs(pixel-v2) > limiar and abs(pixel-v3) > limiar and abs(pixel-v4) > limiar):\n\t\t\t\t\t\t\thistogramBorder1[pixel]= histogramBorder1[pixel] + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thistogramInside1[pixel]= histogramInside1[pixel] + 1\n\t\t\t#escurece particionamento 2\n\t\t\tif(j >= w/2 and j <= w):#col\n\t\t\t\tif(i >= 0 and i <= h/2):#lin\n\t\t\t\t\t#img[i][j]= 0\n\t\t\t\t\t#bic part2\n\t\t\t\t\tif(w > j+2 and h > i+2):\n\t\t\t\t\t\tpixel= int(img[i+1][j+1])\n\t\t\t\t\t\t#os quatro vizinhos mais proximos\n\t\t\t\t\t\tv1= int(img[i][j])\n\t\t\t\t\t\tv2= int(img[i][j+2])\n\t\t\t\t\t\tv3= int(img[i+2][j])\n\t\t\t\t\t\tv4= int(img[i+2][j+2])\n\t\t\t\t\t\t#pixels da borda\n\t\t\t\t\t\tif(abs(pixel-v1) > limiar and abs(pixel-v2) > limiar and abs(pixel-v3) > limiar and abs(pixel-v4) > limiar):\n\t\t\t\t\t\t\thistogramBorder2[pixel]= histogramBorder2[pixel] + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thistogramInside2[pixel]= histogramInside2[pixel] + 1\n\t\t\t#escurece particionamento 3\n\t\t\tif(j >= 0 and j <= w/2):#col\n\t\t\t\tif(i >= h/2 and i <= h):#lin\n\t\t\t\t\t#img[i][j]= 0\n\t\t\t\t\t#bic part3\n\t\t\t\t\tif(w > j+2 and h > i+2):\n\t\t\t\t\t\tpixel= int(img[i+1][j+1])\n\t\t\t\t\t\t#os quatro vizinhos mais proximos\n\t\t\t\t\t\tv1= int(img[i][j])\n\t\t\t\t\t\tv2= int(img[i][j+2])\n\t\t\t\t\t\tv3= int(img[i+2][j])\n\t\t\t\t\t\tv4= int(img[i+2][j+2])\n\t\t\t\t\t\t#pixels da borda\n\t\t\t\t\t\tif(abs(pixel-v1) > limiar and abs(pixel-v2) > limiar and abs(pixel-v3) > limiar and abs(pixel-v4) > limiar):\n\t\t\t\t\t\t\thistogramBorder3[pixel]= histogramBorder3[pixel] + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thistogramInside3[pixel]= histogramInside3[pixel] + 1\n\t\t\t#escurece particionamento 4\n\t\t\tif(j >= w/2 and j <= w):#col\n\t\t\t\tif(i >= h/2 and i <= h):#lin\n\t\t\t\t\t#img[i][j]= 255\n\t\t\t\t\t#bic part4\n\t\t\t\t\tif(w > j+2 and h > i+2):\n\t\t\t\t\t\tpixel= int(img[i+1][j+1])\n\t\t\t\t\t\t#os quatro vizinhos mais proximos\n\t\t\t\t\t\tv1= int(img[i][j])\n\t\t\t\t\t\tv2= int(img[i][j+2])\n\t\t\t\t\t\tv3= int(img[i+2][j])\n\t\t\t\t\t\tv4= int(img[i+2][j+2])\n\t\t\t\t\t\t#pixels da borda\n\t\t\t\t\t\tif(abs(pixel-v1) > limiar and abs(pixel-v2) > limiar and abs(pixel-v3) > limiar and abs(pixel-v4) > limiar):\n\t\t\t\t\t\t\thistogramBorder4[pixel]= histogramBorder4[pixel] + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thistogramInside4[pixel]= histogramInside4[pixel] + 1\n\t\t\t#escurece particionamento 5\n\t\t\tif(j >= w/4 and j <= (w/4)*3):#col\n\t\t\t\tif(i >= h/4 and i <= (h/4)*3):#lin\n\t\t\t\t\t#img[i][j]= 255\n\t\t\t\t\t#bic part5\n\t\t\t\t\tif(w > j+2 and h > i+2):\n\t\t\t\t\t\tpixel= int(img[i+1][j+1])\n\t\t\t\t\t\t#os quatro vizinhos mais proximos\n\t\t\t\t\t\tv1= int(img[i][j])\n\t\t\t\t\t\tv2= int(img[i][j+2])\n\t\t\t\t\t\tv3= int(img[i+2][j])\n\t\t\t\t\t\tv4= int(img[i+2][j+2])\n\t\t\t\t\t\t#pixels da borda\n\t\t\t\t\t\tif(abs(pixel-v1) > limiar and abs(pixel-v2) > limiar and abs(pixel-v3) > limiar and abs(pixel-v4) > limiar):\n\t\t\t\t\t\t\thistogramBorder5[pixel]= histogramBorder5[pixel] + 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\thistogramInside5[pixel]= histogramInside5[pixel] + 1\n\tdescriptor= []\n\t#add descriptor 1\n\tdescriptor.append(histogramBorder1+histogramInside1)\n\t#add descriptor 2\n\tdescriptor.append(histogramBorder2+histogramInside2)\n\t#add descriptor 3\n\tdescriptor.append(histogramBorder3+histogramInside3)\n\t#add descriptor 4\n\tdescriptor.append(histogramBorder4+histogramInside4)\n\t#add descriptor 5\n\tdescriptor.append(histogramBorder5+histogramInside5)\n\treturn descriptor\n\ndef compareHistogramBIC_part(histogram1, histogram2, limiar):\n #bigger or equal 60% of similarity\n similarityMin= 0.6\n len_histogram1= len(histogram1[0])\n len_histogram2= len(histogram2[0])\n similarity= 0\n for n in range(len(histogram1)):\n equal= 0\n for i in range(len_histogram1):\n if abs(int(histogram1[n][i])-int(histogram2[n][i])) <= limiar:\n \tequal= equal + 1\n similarity= (equal/len_histogram2)+similarity\n #5=parts\n similarityGet= similarity/5\n if similarityGet >= similarityMin:\n return [True, similarityGet]\n else:\n return [False, similarityGet]\n\ndef normalizeListBic_Part(part):\n\tbic_part= []\n\tfor i in range(5):\n\t\tif i == 0:\n\t\t\tnormalize= part.split(']')[i].replace(\"[[\",'').replace(' ','').split(',')\n\t\telse:\n\t\t\tnormalize= part.split(']')[i].replace(', [','').replace(' ','').split(',')\n\t\tbic_part.append(normalize)\n\treturn bic_part\n\ndef orderBySimilarity(resultQueryImage):\n\tresultQueryImage.sort(key=lambda i:i[1])\n\tresultQueryImageOrder= list()\n\tfor i in range(len(resultQueryImage)):\n\t\tresultQueryImageOrder.append(resultQueryImage[i][0])\n\tresultQueryImageOrder.reverse()\n\treturn resultQueryImageOrder[:30]\n\ndef combinarTextImage(resultQueryText, resultQueryImage):\n\tresultQueryTextImage= list()\n\tresultQueryImagePart= list()\n\tresultQueryTextsPart= list()\n\tfor rqi in resultQueryImage:\n\t\tcount= 0\n\t\tfor rqt in resultQueryText:\n\t\t\tif rqi.id == rqt.object.id:\n\t\t\t\tresultQueryTextImage.append(rqt.object)\n\t\t\t\tcount= count +1\n\t\t\t\tbreak\n\t\tif count == 0:\n\t\t\tresultQueryImagePart.append(rqi)\n\tfor rqt in resultQueryText:\n\t\tcount= 0\n\t\tfor rti in resultQueryTextImage:\n\t\t\tif rqt.object.id == rti.id:\n\t\t\t\tcount= count +1\n\t\tif count == 0:\n\t\t\tresultQueryTextsPart.append(rqt.object)\n\treturn resultQueryTextImage+resultQueryImagePart+resultQueryTextsPart\n\ndef home(request):\n\tcontext= {}\n\tresultQueryText= {}\n\tresultQueryImage= list()\n\tresultQueryTextImage= list()\n\tif request.method == 'POST':\n\t\tform= FormBaseImage(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tcontext['noneResult']= False\n\t\t\tsearch_text= form.cleaned_data['search_text']\n\t\t\tsearch_image= form.cleaned_data['search_image']#search_image_name\n\t\t\tif search_text == '' and search_image == None:\n\t\t\t\tcontext['noneResult']= True\n\t\t\telse:\n\t\t\t\t#search_text contem texto para processar\n\t\t\t\tif search_text != '':\n\t\t\t\t\tcontext['is_text']= True\n\t\t\t\t\t#faz busca textual TBRI\n\t\t\t\t\tresultQueryText= SearchQuerySet().autocomplete(content_auto=search_text)\n\t\t\t\t\ttotal_query_texts= len(resultQueryText)\n\t\t\t\t\tif total_query_texts == 0:\n\t\t\t\t\t\tresultQueryText= {}\n\t\t\t\t\t\tcontext['is_text']= False\n\t\t\t\t\t\tcontext['noneResult']= True\n\t\t\t\t\tcontext['total_query_texts']= total_query_texts\n\t\t\t\t#search_image contem imagem para processar\n\t\t\t\tif search_image != None:\n\t\t\t\t\tcontext['is_image']= True\n\t\t\t\t\t#obtem a requisicao post da imagem\n\t\t\t\t\tsearch_image_upload= request.FILES['search_image']\n\t\t\t\t\t#guarda o caminha da imagem de upload\n\t\t\t\t\tpathImage= str('tmp/'+search_image_upload.name)\n\t\t\t\t\t#salva a imagem temporariamente no path\n\t\t\t\t\tpath= default_storage.save(pathImage, ContentFile(search_image_upload.read()))\n\t\t\t\t\t#obtemm a imagem e adiciona no caminho path\n\t\t\t\t\ttmp_file= os.path.join(settings.MEDIA_ROOT, path)\n\t\t\t\t\t#obtem o vetor de caracteristica da imagem de upload\n\t\t\t\t\textractImageUpload= extractImageBIC_part(pathImage)\n\t\t\t\t\t#load (BaseImage)\n\t\t\t\t\tallBaseImages= BaseImage.objects.all()\n\t\t\t\t\t#busca imagens semelhantes da upload com o BaseImage\n\t\t\t\t\ttotal_query_images= 0\n\t\t\t\t\tfor q_img in allBaseImages:\n\t\t\t\t\t\t#processa a comparacao da imagem de upload com as da BaseImage\n\t\t\t\t\t\tresultcompareHistogramBIC= compareHistogramBIC_part(extractImageUpload, normalizeListBic_Part(q_img.descriptorBIC_part), 16)\n\t\t\t\t\t\tif resultcompareHistogramBIC[0:1][0]:\n\t\t\t\t\t\t\tresultQueryImage.append((q_img, resultcompareHistogramBIC[1:2][0]))\n\t\t\t\t\t\t\t##print([q_img, resultcompareHistogramBIC[1:2][0]][1])\n\t\t\t\t\t\t\ttotal_query_images= total_query_images +1\n\t\t\t\t\t#order by similarity\n\t\t\t\t\tresultQueryImage= orderBySimilarity(resultQueryImage)\n\t\t\t\t\tif total_query_images == 0:\n\t\t\t\t\t\tcontext['is_image']= False\n\t\t\t\t\t\tcontext['noneResult']= True\n\t\t\t\t\tcontext['total_query_images']= total_query_images\n\t\t\t\t\t#deleta a imagem de upload depois do processamento\n\t\t\t\t\tdefault_storage.delete(path)\n\t\t\t\t#combinacao da consulta textual com conteudo\n\t\t\t\tif search_text != '' and search_image != None:\n\t\t\t\t\tprint('combinando...')\n\t\t\t\t\tcontext['is_text']= False\n\t\t\t\t\tcontext['is_image']= False\n\t\t\t\t\tcontext['is_text_image']= True\n\t\t\t\t\tresultQueryTextImage= combinarTextImage(resultQueryText, resultQueryImage)\n\t\t\t\t\tcontext['total_query_text_image']= len(resultQueryTextImage)\n\t\t\t\t\tif len(resultQueryTextImage) == 0:\n\t\t\t\t\t\tcontext['is_text_image']= False\n\t\t\t\t\t\tcontext['noneResult']= True\n\t\t\t#recria os campos do formulario de consulta\n\t\t\tform= FormBaseImage()\n\telse:\n\t\tform= FormBaseImage()\n\tcontext['form']= form\n\tcontext['resultQueryText']= resultQueryText\n\tcontext['resultQueryImage']= resultQueryImage\n\tcontext['resultQueryTextImage']= resultQueryTextImage\n\treturn render(request, 'home.html', context)\n\nimport unicodedata\nimport re\ndef removeCharEspecials(palavra):\n\t#Unicode normalize transforma um caracter em seu equivalente em latin.\n\tnfkd= unicodedata.normalize('NFKD', palavra)\n\tpalavraSemAcento= u\"\".join([c for c in nfkd if not unicodedata.combining(c)])\n\t#Usa expressão regular para retornar a palavra apenas com números, letras e espaço\n\treturn re.sub('[^a-zA-Z0-9 \\\\\\]', '', palavraSemAcento)\n\ndef dropRepet(l):\n\t#l= ['copo','copos','copaiba','girafa','amor','copo','Copos','feliz']\n\ts= []\n\tif len(l) > 0:\n\t\tif len(s) == 0:\n\t\t\ts.append(l[0])\n\t#print(l)\n\tfor i in range(1,len(l)):\n\t\tcont= False\n\t\tfor j in range(len(s)):\n\t\t\tif l[i].lower() == s[j].lower():\n\t\t\t\tcont= True\n\t\tif not cont:\n\t\t\ts.append(l[i])\n\t#print(s)\n\treturn s\n\n#feature: melhorar o autocomplete para boas sugestoes\ndef autocomplete(request):\n\tsuggestions= []\n\tif request.is_ajax:\n\t\tword= request.GET.get('terms','')\n\t\tprint('word_to_search: '+word)\n\t\tif word != '':\n\t\t\tresultSuggestions= SearchQuerySet().autocomplete(content_auto=word)\n\t\t\tfor rs in resultSuggestions:\n\t\t\t\ttext_s= rs.object.search_text.split(\" \")\n\t\t\t\tfor w in range(len(text_s)):\n\t\t\t\t\tterm= text_s[w]\n\t\t\t\t\tindex_s= term.lower().find(word.lower())\n\t\t\t\t\tif index_s != -1:\n\t\t\t\t\t\tsuggestions.append(removeCharEspecials(term))\n\t#drop of list repets\n\tsuggestions= dropRepet(suggestions)\n\t#limit lenth suggestions\n\tsuggestions= suggestions[:10]\n\treturn HttpResponse(json.dumps(suggestions))\n", "sub_path": "pages/views_bic_part_001.py", "file_name": "views_bic_part_001.py", "file_ext": "py", "file_size_in_byte": 11719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "cv2.imread", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "forms.FormBaseImage", "line_number": 243, "usage_type": "call"}, {"api_name": "haystack.query.SearchQuerySet", "line_number": 255, "usage_type": "call"}, {"api_name": "django.core.files.storage.default_storage.save", "line_number": 270, "usage_type": "call"}, {"api_name": "django.core.files.storage.default_storage", "line_number": 270, "usage_type": "name"}, {"api_name": "django.core.files.base.ContentFile", "line_number": 270, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 272, "usage_type": "call"}, {"api_name": "os.path", "line_number": 272, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 272, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 272, "usage_type": "name"}, {"api_name": "models.BaseImage.objects.all", "line_number": 276, "usage_type": "call"}, {"api_name": "models.BaseImage.objects", "line_number": 276, "usage_type": "attribute"}, {"api_name": "models.BaseImage", "line_number": 276, "usage_type": "name"}, {"api_name": "django.core.files.storage.default_storage.delete", "line_number": 293, "usage_type": "call"}, {"api_name": "django.core.files.storage.default_storage", "line_number": 293, "usage_type": "name"}, {"api_name": "forms.FormBaseImage", "line_number": 306, "usage_type": "call"}, {"api_name": "forms.FormBaseImage", "line_number": 308, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 313, "usage_type": "call"}, {"api_name": "unicodedata.normalize", "line_number": 319, "usage_type": "call"}, {"api_name": "unicodedata.combining", "line_number": 320, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 322, "usage_type": "call"}, {"api_name": "haystack.query.SearchQuerySet", "line_number": 348, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 360, "usage_type": "call"}, {"api_name": "simplejson.dumps", "line_number": 360, "usage_type": "call"}]} +{"seq_id": "588357204", "text": "from decimal import Decimal\nimport datetime\nimport logging\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom mysql.connector import IntegrityError\n\nfrom common.functions import desktopSizer\nfrom database.database import PyDB, DBQueryModel\nfrom widgets.messageboxes import InfoMsgBox\nfrom widgets.dialogs import JNewDialog\nfrom widgets.widgets import JLabel, JPushButton, JCancelButton, JTableView, LabelledLineEditRO, \\\n LabelledComboBox, LabelledDateEditRO\n\n\nclass ReverseReceiptDialog(JNewDialog):\n def __init__(self, rec_id, parent=None):\n super().__init__(parent)\n\n self.rec_id = rec_id\n\n self.dbHandler = PyDB.dbHandler\n\n self.setWindowTitle(\"Post reverse income:\")\n h, v = desktopSizer(250, 200)\n self.resize(1200, 600)\n\n self.vertMain = QtWidgets.QVBoxLayout(self)\n self.horiButtons = QtWidgets.QHBoxLayout()\n\n self.lIncomeID = LabelledLineEditRO(\"Income id:\", self)\n self.dePostingDate = LabelledDateEditRO(\"Original posting date:\", self)\n self.dePayDate = LabelledDateEditRO(\"Original payment date:\", self)\n self.lIncomeTot = LabelledLineEditRO(\"Income total:\", self)\n self.lPayer = LabelledLineEditRO(\"Payer:\", self)\n self.cPayment = LabelledComboBox(\"Payment type:\", self, True)\n self.cBank = LabelledComboBox(\"Bank:\", self, True)\n self.label_Info = JLabel(\"\", self)\n\n s_tempInfo = \"\"\"Reverses the action of a receipt, e.g. generates charges and rent arrears\n for everything posted on the receipt\"\"\"\n self.label_Info.setText(s_tempInfo)\n\n self.mod_recAlloc = DBQueryModel(\"SELECT * FROM income_allocation WHERE IncomeID = {}\".format(rec_id), self)\n\n self.tab_recAlloc = JTableView(self)\n self.tab_recAlloc.setModel(self.mod_recAlloc)\n\n self.tab_recAlloc.resizeColumnsToContents()\n\n self.cPayment.cb.addItems(\n [each[0] for each in self.dbHandler.bufferedExecute(\"SELECT PaymentDescription FROM types_payment\")])\n self.cBank.cb.addItems(\n [each[0] for each in self.dbHandler.bufferedExecute(\"SELECT BankDescription FROM types_bank\")])\n\n self.btnAccept = JPushButton(\"Accept\", self)\n self.btnCancel = JCancelButton(\"Cancel\", self)\n\n self.horiButtons.addWidget(self.btnAccept)\n self.horiButtons.addWidget(self.btnCancel)\n\n self.vertMain.addWidget(self.lIncomeID)\n self.vertMain.addWidget(self.dePostingDate)\n self.vertMain.addWidget(self.dePayDate)\n self.vertMain.addWidget(self.lIncomeTot)\n self.vertMain.addWidget(self.lPayer)\n self.vertMain.addWidget(self.cPayment)\n self.vertMain.addWidget(self.cBank)\n self.vertMain.addWidget(self.tab_recAlloc)\n self.vertMain.addWidget(self.label_Info, QtCore.Qt.AlignHCenter)\n self.vertMain.addLayout(self.horiButtons)\n\n self.actions()\n self.populate()\n\n def actions(self):\n\n self.btnAccept.clicked.connect(self.save)\n self.btnCancel.clicked.connect(self.reject)\n\n def populate(self):\n\n data = self.dbHandler.bufferedExecute(\"\"\"SELECT IncomeTotal, IncomeDate, PayDate, Payer, PaymentDescription, BankDescription\n FROM income LEFT JOIN types_bank\n ON income.BankType = types_bank.BankType\n LEFT JOIN types_payment\n ON income.PaymentType = types_payment.PaymentType\n WHERE IncomeID = %s\"\"\", (self.rec_id,))[0]\n\n self.lIncomeID.le.setText(str(self.rec_id))\n self.dePostingDate.de.setDate(data[1])\n self.dePayDate.de.setDate(data[2])\n self.lIncomeTot.le.setText(str(data[0]))\n self.lPayer.le.setText(data[3])\n self.cPayment.cb.setCurrentText(data[4])\n self.cBank.cb.setCurrentText(data[5])\n\n def save(self):\n logging.info(\"Running Save method\")\n try:\n self.dbHandler.startTransaction()\n rs_temp = self.dbHandler.bufferedExecute(\"\"\"SELECT IncomeTotal, Payer, BankType, PayDate\n FROM income\n WHERE IncomeID = %s\"\"\", (self.rec_id,))[0]\n\n rs_tempAlloc = self.dbHandler.bufferedExecute(\"\"\"SELECT IncomeID, AllocationID, RentCode,\n Landlord, ChargeID, ChargeType, AllocationTotal\n FROM income_allocation\n WHERE IncomeID = %s\"\"\", (self.rec_id,))\n\n r_code = self.dbHandler.bufferedExecute(\"\"\"SELECT RentCode FROM income_allocation WHERE\n IncomeID = %s LIMIT 1\"\"\", (self.rec_id,))[0][0]\n\n d_recDate = datetime.date.today()\n d_payDate = d_recDate\n total = - Decimal(rs_temp[0])\n payer = rs_temp[1]\n payment_type = 2\n bank_type = rs_temp[2]\n last_income_date = rs_temp[3]\n\n rec_no = self.dbHandler.insertID(\"\"\"INSERT INTO income\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (None, d_recDate, total, payer, bank_type, \"N\", payment_type, d_payDate))\n\n #insert into bank_transaction\n self.dbHandler.bufferedExecute(\"\"\"INSERT INTO bank_transaction\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\",\n (None, rec_no, d_recDate, payer, \"R\", r_code, 36, total, bank_type))\n\n rs_insertAlloc = []\n\n for record in rs_tempAlloc:\n s_allocationID = record[1]\n s_rentCode = record[2]\n l_landlord = record[3]\n l_chargeID = record[4]\n l_chargeType = record[5]\n l_allocTotal = record[6]\n\n # Generate new receipt\n rs_insertAlloc.append((rec_no, s_allocationID, s_rentCode, l_landlord,\n l_chargeID, l_chargeType, -l_allocTotal))\n\n # \"Unclear\" charges\n # If rent then increase arrears\n # If charge, first check charges arc - if in charges arc then move back to charges and delete\n # Otherwise increase existing charge balance by receipt amount\n if l_chargeType == 1:\n self.dbHandler.bufferedExecute(\"UPDATE rents SET Arrears = %s WHERE RentCode = %s\",\n (l_allocTotal, s_rentCode))\n else:\n logging.info(\"Testing if charge row exists\")\n row_exists = self.dbHandler.bufferedExecute(\"SELECT COUNT(1) FROM charges WHERE ChargeID = %s\", (l_chargeID,))[0][0]\n print(row_exists)\n logging.info(\"If 1 row exists, if 0 it does not result is {}\".format(row_exists))\n # Here we check if a charge still exists. If so we update, otherwise we insert a new row\n if row_exists == 1:\n logging.info(\"Charge already exists, running SQL UPDATE row\")\n self.dbHandler.bufferedExecute(\"\"\"UPDATE charges\n SET ChargeBalance = ChargeBalance + %s\n WHERE ChargeID = %s\"\"\", (l_allocTotal, l_chargeID))\n else:\n chargeDescription = self.dbHandler.bufferedExecute(\n \"SELECT ChargeDescription FROM types_charge WHERE ChargeType = %s LIMIT 1\",\n (l_chargeType,))[0][0]\n\n allocationTotal = self.dbHandler.bufferedExecute(\"\"\"SELECT AllocationTotal \n FROM income_allocation\n WHERE ChargeID = %s\"\"\",(l_chargeID,))\n\n # chargeTotal = sum(chargeTotal)\n logging.info(\"Allocation total is {}\".format(allocationTotal))\n chargeTotal = 0\n # This is here to deal with negative values in the allocation table\n for each in allocationTotal:\n logging.info(\"each is {}\".format(each[0]))\n chargeTotal += each[0]\n\n logging.info(\"Attempting SQL query to INSERT charge row\")\n self.dbHandler.bufferedExecute(\"\"\"INSERT INTO charges\n SET \n ChargeID = %s,\n RentCode = %s,\n ChargeType = %s, \n ChargeStartDate = %s,\n ChargeTotal = %s, \n ChargeDetails = %s,\n ChargeBalance = %s\n \"\"\", (l_chargeID, s_rentCode, l_chargeType,\n last_income_date, chargeTotal,\n (\"£{} {} created by reversing receipt \"\n \"originally cleared on {}\".format\n (chargeTotal,chargeDescription,\n last_income_date)), l_allocTotal))\n ## This needs to be done in a manner in which ChargeBalance = ChargeBalance + %s is preserved\n ## A charge may have not been wholly paid in which case the charge may exist\n ## If charge exists still use previous method otherwise uses new query\n\n self.dbHandler.insertMany(\"\"\"INSERT INTO income_allocation\n VALUES(%s, %s, %s, %s, %s, %s, %s)\"\"\", rs_insertAlloc)\n\n self.dbHandler.commit()\n\n InfoMsgBox(\"Successfully generated reverse receipt\", None, None, self).exec()\n\n except:\n self.dbHandler.rollback()\n InfoMsgBox(\"Generation of reverse receipt failed\", None, None, self).exec()\n\n self.accept()\n", "sub_path": "income/reversereceiptdialog.py", "file_name": "reversereceiptdialog.py", "file_ext": "py", "file_size_in_byte": 10951, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "widgets.dialogs.JNewDialog", "line_number": 16, "usage_type": "name"}, {"api_name": "database.database.PyDB.dbHandler", "line_number": 22, "usage_type": "attribute"}, {"api_name": "database.database.PyDB", "line_number": 22, "usage_type": "name"}, {"api_name": "common.functions.desktopSizer", "line_number": 25, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 29, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 29, "usage_type": "name"}, {"api_name": "widgets.widgets.LabelledLineEditRO", "line_number": 31, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledDateEditRO", "line_number": 32, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledDateEditRO", "line_number": 33, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledLineEditRO", "line_number": 34, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledLineEditRO", "line_number": 35, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledComboBox", "line_number": 36, "usage_type": "call"}, {"api_name": "widgets.widgets.LabelledComboBox", "line_number": 37, "usage_type": "call"}, {"api_name": "widgets.widgets.JLabel", "line_number": 38, "usage_type": "call"}, {"api_name": "database.database.DBQueryModel", "line_number": 44, "usage_type": "call"}, {"api_name": "widgets.widgets.JTableView", "line_number": 46, "usage_type": "call"}, {"api_name": "widgets.widgets.JPushButton", "line_number": 56, "usage_type": "call"}, {"api_name": "widgets.widgets.JCancelButton", "line_number": 57, "usage_type": "call"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 70, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore", "line_number": 70, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 99, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 114, "usage_type": "attribute"}, {"api_name": "decimal.Decimal", "line_number": 116, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 153, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 156, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 159, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 173, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 177, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 180, "usage_type": "call"}, {"api_name": "widgets.messageboxes.InfoMsgBox", "line_number": 205, "usage_type": "call"}, {"api_name": "widgets.messageboxes.InfoMsgBox", "line_number": 209, "usage_type": "call"}]} +{"seq_id": "34532153", "text": "from scipy.io import loadmat\r\nimport matplotlib.pyplot as plt\r\nmnist_raw=loadmat('mnist-original.mat')\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.decomposition import PCA\r\n\r\nmnist = {'data':mnist_raw['data'].T,'target':mnist_raw[\"label\"][0]}\r\n\r\nx_train,x_test,y_train,y_test=train_test_split(mnist[\"data\"],mnist[\"target\"], random_state=0)\r\n\r\npca=PCA(.95) #ลดให้เหลือ 95%\r\ndata = pca.fit_transform(x_train) #ลดขนาด\r\nresult = pca.inverse_transform(data) #ทำให้กลับมาเหมือนเดิม\r\n#print(str(pca.n_components_))\r\nnew_complement=str(pca.n_components_)\r\n\r\n#SHOW image\r\nplt.figure(figsize=(8,4))\r\n\r\n#image feature 784 components\r\nplt.subplot(1,2,1)\r\nplt.imshow(mnist['data'][0].reshape(28,28),cmap=plt.cm.gray,interpolation='nearest')\r\nplt.xlabel('784 components')\r\nplt.title('Original')\r\n\r\n#image feature 154 components 95%\r\nplt.subplot(1,2,2)\r\nplt.imshow(result[0].reshape(28,28),cmap=plt.cm.gray,interpolation='nearest')\r\nplt.xlabel(new_complement+' components')\r\nplt.title('PAC')\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "PCAMnist.py", "file_name": "PCAMnist.py", "file_ext": "py", "file_size_in_byte": 1099, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scipy.io.loadmat", "line_number": 3, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 9, "usage_type": "call"}, {"api_name": "sklearn.decomposition.PCA", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 22, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 24, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 24, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 28, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "475478885", "text": "import numpy as np\r\nimport csv\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import date, timedelta\r\nfrom pandas import * # pour read_excel\r\nimport os\r\n\r\n\r\n# Pour recuperer les donnees du fichier \r\n\r\ndef donnees (Nom, groupe, Sheet, un, deux, trois) :\r\n \r\n f = read_excel(Nom+'.xls', Sheet)\r\n liste = f.groupby(groupe).sum()\r\n nb_larves = liste[un]\r\n nb_inflo = liste[deux]\r\n nb_inflo_morte = liste[trois]\r\n\r\n return nb_larves, nb_inflo, nb_inflo_morte\r\n\r\n# Pour afficher trois courbes sur un meme graphe\r\n\r\ndef triplot (x, y1, y2, y3, y1_nom, y2_nom, y3_nom, titre, show=1) :\r\n \r\n plt.plot (x, y1, label=y1_nom)\r\n plt.plot (x, y2, label=y2_nom)\r\n plt.plot (x, y3, label=y3_nom)\r\n plt.legend ()\r\n plt.title (titre)\r\n plt.xticks (rotation=60)\r\n if (show==1) :\r\n plt.show ()\r\n \r\n# Pour afficher trois subplots\r\n\r\ndef trisubplot (x, y1, y2, y3, y1_nom, y2_nom, y3_nom, titre, xx, yy, yy_nom, xxx, yyy, yyy_nom) :\r\n \r\n plt.subplot(311)\r\n triplot(x, y1, y2, y3, y1_nom, y2_nom, y3_nom, titre, 0)\r\n plt.subplot(312)\r\n plt.plot(xx, yy, label=yy_nom)\r\n plt.subplot(313)\r\n plt.plot(xxx, yyy, label=yyy_nom)\r\n plt.show()\r\n \r\n# Pour afficher deux subplots\r\n\r\ndef deuxsubplot (x, y1, y2, y3, y1_nom, y2_nom, y3_nom, titre) :\r\n\r\n plt.subplot(211)\r\n plt.plot(x, y1, label=y1_nom)\r\n plt.plot(x, y2, label=y2_nom)\r\n plt.title(titre)\r\n plt.legend()\r\n plt.subplot(212)\r\n plt.plot(x, y3, label=y3_nom)\r\n plt.plot(x,np.zeros(len(x)),'k:')\r\n plt.legend()\r\n plt.show()", "sub_path": "old/Python/Exploration des donnees/Fonctions.py", "file_name": "Fonctions.py", "file_ext": "py", "file_size_in_byte": 1564, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 42, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 43, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 43, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 44, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 44, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "588515927", "text": "# -*- coding: utf-8 -*-\n\n# django imports\nfrom django.contrib.gis import forms\nfrom django.contrib.gis.geos import Point\nfrom django.utils.translation import ugettext_lazy as _\n\n# third party imports\nfrom lxml.etree import XMLSyntaxError\nfrom pykml import parser\n\n# project imports\nfrom apps.core.models import Spot\n\n\nclass ImportKmlForm(forms.Form):\n kml_file = forms.FileField(label=_('KML file'))\n\n def clean(self):\n cleaned_data = super(ImportKmlForm, self).clean()\n kml_file = cleaned_data.get('kml_file')\n if kml_file:\n file_text = kml_file.read()\n kml_file.seek(0)\n try:\n self.kml_doc = parser.fromstring(file_text)\n except XMLSyntaxError:\n self.add_error('kml_file', _('Not a valid KML file.'))\n\n def to_spots_list(self):\n assert hasattr(self, 'cleaned_data'), \"form's is_valid() method must\"\\\n \"be called before\"\n spots = []\n for el in self.kml_doc.Document.iterchildren(\n tag='{http://www.opengis.net/kml/2.2}Placemark'\n ):\n name = getattr(el, 'name')\n name = getattr(name, 'text')\n point = getattr(el, 'Point')\n coords = getattr(point, 'coordinates')\n if name and coords:\n try:\n lng, lat, alt = map(float, coords.text.split(','))\n except ValueError:\n try:\n lng, lat = map(float, coords.text.split(','))\n except ValueError:\n continue\n spots.append(\n Spot(\n name=name,\n location=Point(lng, lat)\n )\n )\n return spots\n\n\nclass ExportKMLForm(forms.Form):\n pretty_print = forms.BooleanField(label='pretty-print', required=False)\n", "sub_path": "django_project/apps/import_export/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 1932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.contrib.gis.forms.Form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.forms", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.gis.forms.FileField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.gis.forms", "line_number": 17, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 17, "usage_type": "call"}, {"api_name": "pykml.parser.fromstring", "line_number": 26, "usage_type": "call"}, {"api_name": "pykml.parser", "line_number": 26, "usage_type": "name"}, {"api_name": "lxml.etree.XMLSyntaxError", "line_number": 27, "usage_type": "name"}, {"api_name": "django.utils.translation.ugettext_lazy", "line_number": 28, "usage_type": "call"}, {"api_name": "apps.core.models.Spot", "line_number": 50, "usage_type": "call"}, {"api_name": "django.contrib.gis.geos.Point", "line_number": 52, "usage_type": "call"}, {"api_name": "django.contrib.gis.forms.Form", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.contrib.gis.forms", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.gis.forms.BooleanField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.contrib.gis.forms", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "307905043", "text": "from backend.models import Publisher\nfrom backend.serializers import PublisherSerializer\nfrom rest_framework import generics\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\nfrom bs4 import BeautifulSoup\nimport requests\nfrom django.views.decorators.csrf import csrf_exempt\n\n# Create your views here.\n\nclass PublisherList(generics.ListCreateAPIView):\n queryset = Publisher.objects.all()\n serializer_class = PublisherSerializer\n\n\nclass PublisherDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Publisher.objects.all()\n serializer_class = PublisherSerializer\n\n\ndef scrapeSite(self):\n page = requests.get('https://infogob.jne.gob.pe/Localidad/Peru_procesos-electorales_uHzVUEHmgS0%3dzE', verify=False)\n soup = BeautifulSoup(page.content, 'html.parser')\n divMapa = soup.find_all('svg', id='Mapa')\n regiones =[]\n\n for a in divMapa:\n a_tags = a.find_all('g')\n for q in a_tags:\n if 'q' in q.attrs:\n regiones.append({\n 'description' : q['title'],\n 'token' : q['q'],\n 'url_regidor' : '/regidor/?region=' + q['title'].lower() + '&token=' + q['q']\n })\n\n return JsonResponse(regiones, safe=False)\n\n\ndef scrapeRegion(request):\n url_default = 'https://infogob.jne.gob.pe'\n region = request.GET.get(\"region\")\n token = request.GET.get(\"token\")\n\n data ={\"token\":token}\n page = requests.post('https://infogob.jne.gob.pe/Localidad/Peru/' + region.lower() + '_procesos-electorales',params=data, allow_redirects=False)\n soup = BeautifulSoup(page.text, 'html.parser')\n href_region = soup.find_all('a')\n \n href = soup.find_all('a')\n for q in href:\n if 'href' in q.attrs:\n url_region = q['href']\n\n url_final = url_region.replace(\"%22\", \"\")\n \n page = requests.post(url_default + url_final)\n soup = BeautifulSoup(page.text, 'html.parser')\n table = soup.find('table', id='gridAutoridadesRegionales')\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')[0]\n regidor = rows.find_all('td')[1]\n\n response = {\n 'GOBERNADOR REGIONAL' : regidor.find(text=True)\n }\n\n return JsonResponse(response, safe=False)", "sub_path": "infogob/backend/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2266, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rest_framework.generics.ListCreateAPIView", "line_number": 13, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 13, "usage_type": "name"}, {"api_name": "backend.models.Publisher.objects.all", "line_number": 14, "usage_type": "call"}, {"api_name": "backend.models.Publisher.objects", "line_number": 14, "usage_type": "attribute"}, {"api_name": "backend.models.Publisher", "line_number": 14, "usage_type": "name"}, {"api_name": "backend.serializers.PublisherSerializer", "line_number": 15, "usage_type": "name"}, {"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.generics", "line_number": 18, "usage_type": "name"}, {"api_name": "backend.models.Publisher.objects.all", "line_number": 19, "usage_type": "call"}, {"api_name": "backend.models.Publisher.objects", "line_number": 19, "usage_type": "attribute"}, {"api_name": "backend.models.Publisher", "line_number": 19, "usage_type": "name"}, {"api_name": "backend.serializers.PublisherSerializer", "line_number": 20, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 25, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 39, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 48, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 59, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 60, "usage_type": "call"}, {"api_name": "django.http.JsonResponse", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "595897817", "text": "from transformers import AdamW, get_linear_schedule_with_warmup\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim import Adam\nfrom config.datapath import TENSORBOARD_PATH, RESOURCE_PATH\nfrom config.const import DO_LOWER_CASE\nfrom config.const import NUM_EPOCH, DEVICE, MAX_GRAD_NORM, DEFAULT_LEARNING_RATE, ADAM_EPSILON, WARMUP_STEP\nfrom config.const import CONFIG, TOKENIZER, MODEL, ES_EPOCH\nfrom utils.model_path_or_name import model_path_or_name\n\nfrom utils.calc_loss import calc_loss\nfrom utils.create_tbx import create_tbx\nfrom data.data_manager import DataManager\nfrom models.model import Seq2Seq\nfrom time import time\nimport torch\n\nfrom utils.tokenizer import tokenizer\nfrom config.datapath import RESOURCE_TRAIN_PATH, RESOURCE_DEV_PATH\n\ndata_manager = DataManager(tokenizer)\ntrain_data_iterator = data_manager.load_file(RESOURCE_TRAIN_PATH, clear_cache=True)\ndev_data_iterator = data_manager.load_file(RESOURCE_DEV_PATH, clear_cache=True)\n\nvocab_size = tokenizer.vocab_size\nmodel = Seq2Seq\n\n\ndef train_iteration(model: Seq2Seq, data_iterator, optimizer, tbx, current_iter, device=DEVICE, print_step=1):\n epoch_loss = 0\n len_iter = 0\n model.train()\n model.to(device)\n for batch in data_iterator:\n current_iter += 1\n\n inputs = data_manager.fix_batch(batch.text, device=device)\n\n loss, outputs = model(loss_out=True)\n\n loss.backward()\n clip_grad_norm_(model.parameters(), MAX_GRAD_NORM)\n optimizer.step()\n optimizer.zero_grad()\n epoch_loss += loss.item()\n\n # logging\n if current_iter % print_step == 0:\n tbx.add_scalar(\"iter/train_loss\", loss.item(), current_iter)\n print(\"current:\", current_iter)\n len_iter += 1\n epoch_loss = epoch_loss / len_iter\n return model, current_iter, epoch_loss, optimizer\n\n\ndef train(model: SentenceAutoEncoder, data_iterator, device=DEVICE, epochs=NUM_EPOCH, tensorboard_path=TENSORBOARD_PATH,\n tokenizer=tokenizer):\n model.train()\n model.to(device)\n optimizer = Adam(model.parameters(), lr=DEFAULT_LEARNING_RATE, eps=ADAM_EPSILON)\n\n tbx = create_tbx(tensorboard_path)\n\n current_iter = 0\n t_total = epochs * len(data_iterator)\n print_step = int(t_total * 0.01)\n\n start = time()\n\n prev_best_eval_loss = float('inf')\n es_wait_num_epoch = 0\n\n for epoch in range(epochs):\n model, current_iter, epoch_loss, optimizer = train_iteration(model, data_iterator,\n optimizer=optimizer, tbx=tbx,\n current_iter=current_iter,\n print_step=print_step)\n tbx.add_scalar(\"epoch/train_loss\", epoch_loss, epoch)\n eval_loss = calc_loss(model, dev_data_iterator)\n\n if prev_best_eval_loss > eval_loss:\n prev_best_eval_loss = eval_loss\n es_wait_num_epoch = 0\n suffix = \"es_eval_loss_best\"\n model.save_pretrained(model_path_or_name(MODEL, suffix=suffix))\n tokenizer.save_pretrained(model_path_or_name(TOKENIZER, suffix=suffix))\n else:\n es_wait_num_epoch += 1\n if es_wait_num_epoch > ES_EPOCH and ES_EPOCH != 0:\n print(\"Early stopping by \", epoch)\n break\n\n print(\"epoch % 3d: train loss: %.3f: dev loss: %.3f, duration: %d, current_state: %d / %d\" % (\n epoch, epoch_loss, eval_loss, time() - start, current_iter, t_total))\n\n model.save_pretrained(model_path_or_name(MODEL))\n tokenizer.save_pretrained(model_path_or_name(TOKENIZER))\n\n tbx.close()\n\n\nif __name__ == '__main__':\n model.to(DEVICE)\n train(model, train_data_iterator)\n", "sub_path": "{{ cookiecutter.repo_name }}/src/models/train_model.py", "file_name": "train_model.py", "file_ext": "py", "file_size_in_byte": 3773, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "data.data_manager.DataManager", "line_number": 20, "usage_type": "call"}, {"api_name": "utils.tokenizer.tokenizer", "line_number": 20, "usage_type": "argument"}, {"api_name": "config.datapath.RESOURCE_TRAIN_PATH", "line_number": 21, "usage_type": "argument"}, {"api_name": "config.datapath.RESOURCE_DEV_PATH", "line_number": 22, "usage_type": "argument"}, {"api_name": "utils.tokenizer.tokenizer.vocab_size", "line_number": 24, "usage_type": "attribute"}, {"api_name": "utils.tokenizer.tokenizer", "line_number": 24, "usage_type": "name"}, {"api_name": "models.model.Seq2Seq", "line_number": 25, "usage_type": "name"}, {"api_name": "models.model.Seq2Seq", "line_number": 28, "usage_type": "name"}, {"api_name": "config.const.DEVICE", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.utils.clip_grad_norm_", "line_number": 41, "usage_type": "call"}, {"api_name": "config.const.MAX_GRAD_NORM", "line_number": 41, "usage_type": "argument"}, {"api_name": "config.const.DEVICE", "line_number": 55, "usage_type": "name"}, {"api_name": "config.const.NUM_EPOCH", "line_number": 55, "usage_type": "name"}, {"api_name": "config.datapath.TENSORBOARD_PATH", "line_number": 55, "usage_type": "name"}, {"api_name": "utils.tokenizer.tokenizer", "line_number": 56, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 59, "usage_type": "call"}, {"api_name": "config.const.DEFAULT_LEARNING_RATE", "line_number": 59, "usage_type": "name"}, {"api_name": "config.const.ADAM_EPSILON", "line_number": 59, "usage_type": "name"}, {"api_name": "utils.create_tbx.create_tbx", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 67, "usage_type": "call"}, {"api_name": "utils.calc_loss.calc_loss", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.model_path_or_name.model_path_or_name", "line_number": 84, "usage_type": "call"}, {"api_name": "config.const.MODEL", "line_number": 84, "usage_type": "argument"}, {"api_name": "utils.tokenizer.tokenizer.save_pretrained", "line_number": 85, "usage_type": "call"}, {"api_name": "utils.tokenizer.tokenizer", "line_number": 85, "usage_type": "name"}, {"api_name": "utils.model_path_or_name.model_path_or_name", "line_number": 85, "usage_type": "call"}, {"api_name": "config.const.TOKENIZER", "line_number": 85, "usage_type": "argument"}, {"api_name": "config.const.ES_EPOCH", "line_number": 88, "usage_type": "name"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "utils.model_path_or_name.model_path_or_name", "line_number": 95, "usage_type": "call"}, {"api_name": "config.const.MODEL", "line_number": 95, "usage_type": "argument"}, {"api_name": "utils.tokenizer.tokenizer.save_pretrained", "line_number": 96, "usage_type": "call"}, {"api_name": "utils.tokenizer.tokenizer", "line_number": 96, "usage_type": "name"}, {"api_name": "utils.model_path_or_name.model_path_or_name", "line_number": 96, "usage_type": "call"}, {"api_name": "config.const.TOKENIZER", "line_number": 96, "usage_type": "argument"}, {"api_name": "config.const.DEVICE", "line_number": 102, "usage_type": "argument"}]} +{"seq_id": "651025248", "text": "'''\nCreated on Jan 18, 2017\n\n@author: Greg Petrochenkov\n'''\n\nfrom Web.flow_calc import FlowCalculator\nfrom flask import Flask, render_template, request, jsonify, send_file, send_from_directory\nfrom werkzeug.utils import secure_filename\nimport os\nfrom StringIO import StringIO\nfrom PIL import Image\nimport pandas as pd\n\n\napp = Flask(__name__)\n#UPLOAD_FOLDER = '/opt/django/webapps/pubs_ui/FloodAnalysis/FloodAnalysis27/Web/uploads'\nUPLOAD_FOLDER = 'C:\\\\Users\\\\chogg\\\\Documents\\\\GitHub\\\\FloodAnalysis_v2\\\\FloodAnalysis27\\\\Web\\\\uploads'\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'csv'])\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['ALLOWED_EXTENSIONS'] = ALLOWED_EXTENSIONS\n\nflow_calculator = FlowCalculator()\n\n@app.route('/')\ndef render_layout():\n '''\n Main layout for application\n '''\n \n return render_template(\"layout.html\")\n\n@app.route('/js/')\ndef send_js(path):\n return send_from_directory('static/js', path)\n\n@app.route('/css/')\ndef send_css(path):\n return send_from_directory('static/css', path)\n\n@app.route('/images/')\ndef send_images(path):\n return send_from_directory('static/images', path)\n\n@app.route('/query_nwis', methods=['POST'])\ndef query_nwis():\n '''\n Queries NWIS given the form parmeters\n '''\n \n station_id = request.form['station_id']\n start_date = str(request.form['start_date'])\n end_date = str(request.form['end_date'])\n tz = request.form['tz']\n ds = request.form['daylight_savings']\n\n flow_calculator.get_nwis_ts(station_id, start_date, end_date, tz, ds)\n d = {\"message\": \"Queried NWIS station %s\" % station_id}\n \n return jsonify(**d)\n\n@app.route('/calculate_geometry', methods=['POST'])\ndef calculate_geometry():\n '''\n Calculates Cross-sectional Geometry and Reads or Computes\n Hydraulic Properties Table\n '''\n \n #cross section geometry file\n coord_file = request.files[\"coord_file\"]\n flow_calculator.coord_file = os.path.join(app.config['UPLOAD_FOLDER'], \n secure_filename(coord_file.filename))\n coord_file.save(flow_calculator.coord_file)\n \n #datum for stage\n flow_calculator.datum = float(request.form[\"datum\"])\n \n #subdivisions\n flow_calculator.sub_divisions = \\\n [int(y) for y in [x for x in\n request.form['sub_divisions'].split(',') \n if x != '']]\n \n #auto calculation of table or read from file\n flow_calculator.auto = str(request.form[\"auto_calculate\"])\n if flow_calculator.auto == \"false\":\n properties_file = request.files[\"properties_file\"]\n flow_calculator.properties_file = os.path.join(app.config['UPLOAD_FOLDER'], \n secure_filename(properties_file.filename))\n properties_file.save(flow_calculator.properties_file)\n else:\n flow_calculator.stage_increment = request.form[\"z_step\"]\n \n flow_calculator.create_table()\n \n d = {\"message\": \"Table is setup and ready to process/download\",\n \"graph\": flow_calculator.cross_section_graph}\n \n return jsonify(**d)\n\n@app.route('/flood_parameters', methods=['POST'])\ndef flood_parameters():\n '''\n Assigns values to flood parameters\n '''\n \n #Known flood conditions \n flow_calculator.channel_bed_slope = float(request.form[\"channel_bed_slope\"])\n flow_calculator.stage_hB = float(request.form[\"stage_hB\"])\n flow_calculator.stage_hp = float(request.form[\"stage_hp\"])\n flow_calculator.rise_peak_delta = float(request.form[\"days_between\"])\n flow_calculator.flow_QB = float(request.form[\"flow_QB\"])\n flow_calculator.flow_Qp = float(request.form[\"flow_Qp\"])\n flow_calculator.flow_Q0 = float(request.form[\"flow_Q0\"])\n \n d = {\"message\": \"Flood parameters are now set up\",\n \"graph\": flow_calculator.cross_section_graph}\n \n return jsonify(**d)\n\n@app.route('/newton_raphson', methods=['POST'])\ndef newton_raphson():\n '''\n Reads in table manning's n vs. stage or static manning's n,\n computes Q via the Newton Raphson Method and finally outputs\n the graphs for the results\n '''\n \n #methods regarding manning's n, either static for all stage\n #or reads in a table of stage vs. manning's n\n flow_calculator.static_manning = str(request.form[\"static_calculate\"])\n \n if flow_calculator.static_manning == 'true':\n #manning coefficient\n flow_calculator.manning_rough = float(request.form[\"manning_coef\"])\n else:\n manning_file = request.files[\"manning_file\"]\n flow_calculator.manning_file = os.path.join(app.config['UPLOAD_FOLDER'], \n secure_filename(manning_file.filename))\n manning_file.save(flow_calculator.manning_file)\n \n flow_calculator.manning_df = pd.read_csv(flow_calculator.manning_file)\n flow_calculator.manning_df.columns = [[\"Stage\", \"n\"]]\n \n #Initialize the time series data and then process via the Newton Raphson method\n flow_calculator.initialize_timeseries_data_for_newton_method()\n flow_calculator.flow_newton_raphson_method()\n \n #if discrete and/or alt output, include the\n #discrete flow field measurements and/or the \n #alternative approximated flow\n \n if str(request.form[\"discrete_output\"]) == \"true\":\n flow_calculator.graph_output.discrete_discharge = True\n else:\n flow_calculator.graph_output.discrete_discharge = False\n \n if str(request.form[\"alt_output\"]) == \"true\":\n flow_calculator.graph_output.alt_discharge = True\n else:\n flow_calculator.graph_output.alt_discharge = False\n \n flow_calculator.process_results()\n \n d = {\"message\": \"Table is setup and ready to process/download\",\n \"ssr\": flow_calculator.SSR}\n \n return jsonify(**d)\n \n@app.route('/download_table', methods=['GET','POST'])\ndef download_table():\n '''\n Downloads the hydraulic properties table\n '''\n \n file_name = os.path.join(app.config['UPLOAD_FOLDER'], \n secure_filename(\"table.csv\"))\n \n flow_calculator.properties_df.to_csv(path_or_buf=file_name)\n \n return send_file(file_name, mimetype=\"text/csv\",\n as_attachment = True,\n attachment_filename = \"table.csv\")\n \n@app.route('/download_flow_table', methods=['GET','POST'])\ndef download_flow_table():\n '''\n Downloads the time series of stage and flow table\n '''\n \n file_name = os.path.join(app.config['UPLOAD_FOLDER'], \n secure_filename(\"flow_table.csv\"))\n \n flow_calculator.flow_df.to_csv(path_or_buf=file_name)\n \n return send_file(file_name, mimetype=\"text/csv\",\n as_attachment = True,\n attachment_filename = \"flow_table.csv\")\n \n@app.route('/image')\ndef serve_img():\n '''\n Serves an image to the client\n '''\n \n name = request.args.get('name')\n file_name = ''.join([UPLOAD_FOLDER,'/',name,'.png'])\n img = Image.open(file_name)\n return serve_pil_image(img)\n\ndef serve_pil_image(pil_img):\n '''\n Puts the image data in a string buffer and then returns the data\n to serve the image to the client\n '''\n \n img_io = StringIO()\n pil_img.save(img_io, 'JPEG', quality=70)\n img_io.seek(0)\n return send_file(img_io, mimetype='image/jpeg') \n \napp.secret_key = os.urandom(24)\napp.run()\n", "sub_path": "FloodAnalysis27/Web/flask_app.py", "file_name": "flask_app.py", "file_ext": "py", "file_size_in_byte": 7420, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 16, "usage_type": "call"}, {"api_name": "Web.flow_calc.FlowCalculator", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 43, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 51, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 52, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 52, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 55, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 55, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 76, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 76, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 81, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 81, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 85, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 87, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 87, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 88, "usage_type": "call"}, {"api_name": "os.path", "line_number": 88, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 89, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 92, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 92, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 99, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 108, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 108, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 109, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 109, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 110, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 110, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 111, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 111, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 112, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 112, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 113, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 113, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 114, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 114, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 119, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 131, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 131, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 135, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 135, "usage_type": "name"}, {"api_name": "flask.request.files", "line_number": 137, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 139, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 142, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 153, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 153, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 158, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 158, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 168, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 177, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 196, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 206, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 206, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 206, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 208, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 208, "usage_type": "name"}, {"api_name": "StringIO.StringIO", "line_number": 217, "usage_type": "call"}, {"api_name": "flask.send_file", "line_number": 220, "usage_type": "call"}, {"api_name": "os.urandom", "line_number": 222, "usage_type": "call"}]} +{"seq_id": "244421034", "text": "\"\"\"\ntest_dynamics.py\n----------------\n\nTest dynamics algorithms.\n\n\"\"\"\n\nimport networkx as nx\nfrom netrd import dynamics\nfrom netrd.dynamics import BaseDynamics\n\n\ndef test_dynamics_valid_dimensions():\n \"\"\"Dynamics models should return N x L arrays.\"\"\"\n\n G = nx.karate_club_graph()\n N = G.number_of_nodes()\n\n for L in [25, 100]:\n for obj in dynamics.__dict__.values():\n if isinstance(obj, type) and BaseDynamics in obj.__bases__:\n TS = obj().simulate(G, L)\n assert TS.shape == (N, L)\n\n assert BaseDynamics().simulate(G, 25).shape == (N, 25)\n assert BaseDynamics().simulate(G, 100).shape == (N, 100)\n", "sub_path": "tests/test_dynamics.py", "file_name": "test_dynamics.py", "file_ext": "py", "file_size_in_byte": 663, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "networkx.karate_club_graph", "line_number": 17, "usage_type": "call"}, {"api_name": "netrd.dynamics.__dict__.values", "line_number": 21, "usage_type": "call"}, {"api_name": "netrd.dynamics.__dict__", "line_number": 21, "usage_type": "attribute"}, {"api_name": "netrd.dynamics", "line_number": 21, "usage_type": "name"}, {"api_name": "netrd.dynamics.BaseDynamics", "line_number": 22, "usage_type": "name"}, {"api_name": "netrd.dynamics.BaseDynamics", "line_number": 26, "usage_type": "call"}, {"api_name": "netrd.dynamics.BaseDynamics", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "337898917", "text": "from docx import Document\nfrom docx.shared import Cm, Mm, Inches, RGBColor\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nimport json, datetime, sys, boto3\nimport matplotlib.pyplot as plt\nimport statistics\n\nprint ('# Transcription')\njob_start = datetime.datetime.now()\n\n# Function to help convert timestamps from s to H:M:S\ndef convert_time_stamp(n):\n ts = datetime.timedelta(seconds=float(n))\n ts = ts - datetime.timedelta(microseconds=ts.microseconds)\n return str(ts)\n\n# Function to set table column widths\ndef set_col_widths(table):\n widths = (Inches(0.6), Inches(1), Inches(4.5))\n for row in table.rows:\n for idx, width in enumerate(widths):\n row.cells[idx].width = width\n\n# Logging\nlogs = boto3.client('logs')\ndef write_log(log_text):\n log_info = logs.describe_log_streams(\n logGroupName='Transcripts',\n logStreamNamePrefix='Application')\n log_time = int(datetime.datetime.now().timestamp() * 1000)\n response = logs.put_log_events(\n logGroupName='Transcripts',\n logStreamName='Application',\n logEvents=[\n {\n 'timestamp': log_time,\n 'message': log_text\n },\n ],\n sequenceToken=log_info['logStreams'][0]['uploadSequenceToken']\n )\n\n# Initiate Document\ndocument = Document()\n# A4 Size\ndocument.sections[0].page_width = Mm(210)\ndocument.sections[0].page_height = Mm(297)\n# Font\nfont = document.styles['Normal'].font\nfont.name = 'Calibri'\n\n# Load Transcription output from command line input\n# eg: python3 application.py 'output.json'\nfile = sys.argv[1]\ndata = json.load(open(file))\nprint (file, 'opened...')\n\n# Document title and intro\ntitle = str('Transcription of ' + data['jobName'])\ndocument.add_heading(title, level=1)\n# Set thresholds for formatting later\nthreshold_for_grey = 0.98\n# Intro\ndocument.add_paragraph('Transcription using AWS Transcribe automatic speech recognition.')\ndocument.add_paragraph(datetime.datetime.now().strftime('Document produced on %A %d %B %Y at %X.'))\ndocument.add_paragraph() # Spacing\ndocument.add_paragraph('Grey text has less than ' + str(int(threshold_for_grey * 100)) + '% confidence. ')\n\n# Stats dictionary\nstats = {\n 'timestamps': [],\n 'accuracy': [],\n '9.8': 0, '9': 0, '8': 0, '7': 0, '6': 0, '5': 0, '4': 0, '3': 0, '2': 0, '1': 0, '0': 0,\n 'total': len(data['results']['items'])}\nprint ('Producing stats...')\n\n# Confidence count\nfor item in data['results']['items']:\n if item['type'] == 'pronunciation':\n stats['timestamps'].append(float(item['start_time']))\n stats['accuracy'].append(int(float(item['alternatives'][0]['confidence']) * 100))\n if float(item['alternatives'][0]['confidence']) >= 0.98: stats['9.8'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.9: stats['9'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.8: stats['8'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.7: stats['7'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.6: stats['6'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.5: stats['5'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.4: stats['4'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.3: stats['3'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.2: stats['2'] += 1\n elif float(item['alternatives'][0]['confidence']) >= 0.1: stats['1'] += 1\n else: stats['0'] += 1\n\n# Display confidence count table\ntable = document.add_table(rows=1, cols=3)\ntable.style = document.styles['Light List Accent 1']\ntable.alignment = WD_ALIGN_PARAGRAPH.CENTER\nhdr_cells = table.rows[0].cells\nhdr_cells[0].text = 'Confidence'\nhdr_cells[1].text = 'Count'\nhdr_cells[2].text = 'Percentage'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('98% - 100%')\nrow_cells[1].text = str(stats['9.8'])\nrow_cells[2].text = str(round(stats['9.8'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('90% - 97%')\nrow_cells[1].text = str(stats['9'])\nrow_cells[2].text = str(round(stats['9'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('80% - 89%')\nrow_cells[1].text = str(stats['8'])\nrow_cells[2].text = str(round(stats['8'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('70% - 79%')\nrow_cells[1].text = str(stats['7'])\nrow_cells[2].text = str(round(stats['7'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('60% - 69%')\nrow_cells[1].text = str(stats['6'])\nrow_cells[2].text = str(round(stats['6'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('50% - 59%')\nrow_cells[1].text = str(stats['5'])\nrow_cells[2].text = str(round(stats['5'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('40% - 49%')\nrow_cells[1].text = str(stats['4'])\nrow_cells[2].text = str(round(stats['4'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('30% - 39%')\nrow_cells[1].text = str(stats['3'])\nrow_cells[2].text = str(round(stats['3'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('20% - 29%')\nrow_cells[1].text = str(stats['2'])\nrow_cells[2].text = str(round(stats['2'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('10% - 19%')\nrow_cells[1].text = str(stats['1'])\nrow_cells[2].text = str(round(stats['1'] / stats['total'] * 100, 2)) + '%'\nrow_cells = table.add_row().cells\nrow_cells[0].text = str('0% - 9%')\nrow_cells[1].text = str(stats['0'])\nrow_cells[2].text = str(round(stats['0'] / stats['total'] * 100, 2)) + '%'\n\n# Add paragraph for spacing\ndocument.add_paragraph()\n\n# Display scatter graph of confidence\n# Confidence of each word as scatter graph\nplt.scatter(stats['timestamps'], stats['accuracy'])\n# Mean average as line across graph\nplt.plot([stats['timestamps'][0], stats['timestamps'][-1]], [statistics.mean(stats['accuracy']), statistics.mean(stats['accuracy'])], 'r')\n# Formatting\nplt.xlabel('Time (seconds)')\n#plt.xticks(range(0, int(stats['timestamps'][-1]), 60))\nplt.ylabel('Accuracy (percent)')\nplt.yticks(range(0, 101, 10))\nplt.title('Accuracy during video')\nplt.legend(['Accuracy average (mean)', 'Individual words'], loc='lower center')\nplt.savefig('chart.png')\ndocument.add_picture('chart.png', width=Cm(14.64))\ndocument.paragraphs[-1].alignment = WD_ALIGN_PARAGRAPH.CENTER\ndocument.add_page_break()\n\n# Process and display transcript by speaker segments\nprint ('Writing transcript...')\ntable = document.add_table(rows=1, cols=3)\ntable.style = document.styles['Light List Accent 1']\nhdr_cells = table.rows[0].cells\nhdr_cells[0].text = 'Time'\nhdr_cells[1].text = 'Speaker'\nhdr_cells[2].text = 'Content'\n\nfor segment in data['results']['speaker_labels']['segments']:\n # If there is content in the segment\n if len(segment['items']) > 0:\n # Add a row, write the time and speaker\n row_cells = table.add_row().cells\n row_cells[0].text = convert_time_stamp(segment['start_time'])\n row_cells[1].text = str(segment['speaker_label'])\n\n # Segments group individual word results by speaker. They are cross-referenced by time.\n # For each word in the segment...\n for word in segment['items']:\n # Run through the word results and get the corresponding result\n for result in data['results']['items']:\n if result['type'] == 'pronunciation':\n if result['start_time'] == word['start_time']:\n\n # Get the word with the highest confidence\n if len(result['alternatives']) > 0:\n current_word = dict()\n confidence_scores = []\n for score in result['alternatives']:\n confidence_scores.append(score['confidence'])\n for alternative in result['alternatives']:\n if alternative['confidence'] == max(confidence_scores):\n current_word = alternative.copy()\n\n # Write and format the word\n run = row_cells[2].paragraphs[0].add_run(' ' + current_word['content'])\n if float(current_word['confidence']) < threshold_for_grey:\n font = run.font\n font.color.rgb = RGBColor(204, 204, 204)\n\n # If the next item is punctuation, add it\n try:\n if data['results']['items'][data['results']['items'].index(result) + 1]['type'] == 'punctuation':\n run = row_cells[2].paragraphs[0].add_run(data['results']['items'][data['results']['items'].index(result) + 1]['alternatives'][0]['content'])\n # Occasional IndexErrors encountered\n except:\n pass\n\nset_col_widths(table)\n\n# Save the file\ndocument_title = str(data['jobName'] + '.docx')\ndocument.save(document_title)\nprint(document_title, 'saved.')\n\n# Logging\nif len(sys.argv) > 2:\n if sys.argv[2] == 'log':\n job_finish = datetime.datetime.now()\n job_duration = job_finish - job_start\n write_log('Job name: ' + data['jobName'] + ', Word count: ' + str(stats['total']) + ', Accuracy average: ' + str(round(statistics.mean(stats['accuracy']), 2)) + ', Job duration: ' + str(job_duration.seconds))\n print(data['jobName'], 'logged.')\n\nprint ('')", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 9792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.datetime.now", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 14, "usage_type": "call"}, {"api_name": "docx.shared.Inches", "line_number": 19, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 30, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 30, "usage_type": "attribute"}, {"api_name": "docx.Document", "line_number": 44, "usage_type": "call"}, {"api_name": "docx.shared.Mm", "line_number": 46, "usage_type": "call"}, {"api_name": "docx.shared.Mm", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 55, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 65, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 65, "usage_type": "attribute"}, {"api_name": "docx.enum.text.WD_ALIGN_PARAGRAPH.CENTER", "line_number": 97, "usage_type": "attribute"}, {"api_name": "docx.enum.text.WD_ALIGN_PARAGRAPH", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "statistics.mean", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "docx.shared.Cm", "line_number": 163, "usage_type": "call"}, {"api_name": "docx.enum.text.WD_ALIGN_PARAGRAPH.CENTER", "line_number": 164, "usage_type": "attribute"}, {"api_name": "docx.enum.text.WD_ALIGN_PARAGRAPH", "line_number": 164, "usage_type": "name"}, {"api_name": "docx.shared.RGBColor", "line_number": 206, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 224, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 225, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 226, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 226, "usage_type": "attribute"}, {"api_name": "statistics.mean", "line_number": 228, "usage_type": "call"}]} +{"seq_id": "502084088", "text": "\"\"\"Testing the PointLabelTooltip plugin of mpld3.\"\"\"\n\nimport mpld3\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport databench\n\n\nANALYSIS = databench.Analysis('mpld3PointLabel', __name__, __doc__)\nANALYSIS.thumbnail = 'mpld3PointLabel.png'\n\n\n@ANALYSIS.signals.on('connect')\ndef onconnect():\n \"\"\"Run as soon as a browser connects to this.\"\"\"\n\n fig, ax = plt.subplots()\n points = ax.scatter(\n np.random.rand(40),\n np.random.rand(40),\n s=300,\n alpha=0.3,\n )\n\n # use the mpld3 tooltop plugin\n labels = [\"Point {0}\".format(i) for i in range(40)]\n tooltip = mpld3.plugins.PointLabelTooltip(points, labels)\n mpld3.plugins.connect(fig, tooltip)\n\n # send the plot to the frontend\n ANALYSIS.signals.emit('mpld3canvas', mpld3.fig_to_dict(fig))\n\n # done\n ANALYSIS.signals.emit('log', {'action': 'done'})\n", "sub_path": "analyses/mpld3PointLabel.py", "file_name": "mpld3PointLabel.py", "file_ext": "py", "file_size_in_byte": 865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "databench.Analysis", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.random.rand", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "mpld3.plugins.PointLabelTooltip", "line_number": 28, "usage_type": "call"}, {"api_name": "mpld3.plugins", "line_number": 28, "usage_type": "attribute"}, {"api_name": "mpld3.plugins.connect", "line_number": 29, "usage_type": "call"}, {"api_name": "mpld3.plugins", "line_number": 29, "usage_type": "attribute"}, {"api_name": "mpld3.fig_to_dict", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "171364050", "text": "#!/usr/bin/env python3\n\n\"\"\"\nA CLI tool for quickly printing washer/dryer status information quickly.\nIntended for use with the Laundry View system at Princeton University.\n\nProvide no arguments for statuses from all rooms, or specify any number of\npartial or complete room names to have only statuses from those rooms printed.\n\nAfter doing `chmod u+x laundry.py`, the following will obtain status information\nfor Brown Hall, Witherspoon Hall, and Buyers Hall:\n\n ./laundry.py brown wither buyers\n\nNote that you must be on Princeton's campus (or using the VPN) for this to work,\nand that specified names must at least partially match the names on the list at\nhttp://laundryview.com/lvs.php.\n\nAdd `alias laundry='\"/path/to your/laundry.py\"'` to your `.bashrc` for added\nconvenience.\n\nIntended for use with Python 3. Requires Beautiful Soup for scraping, which can\nbe installed with `pip3 install beautifulsoup4`.\n\nBy: Lucas Mayer\nLast updated: April 5th, 2016\n\"\"\"\n\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nfrom sys import argv\n\nLIST_URL = \"http://laundryview.com/lvs.php\"\nDATA_URL = \"http://classic.laundryview.com/laundry_room.php?view=c&lr=\"\n\ndef print_statuses(requested_room_names):\n # Scrape laundry IDs from list page\n soup = BeautifulSoup(urlopen(LIST_URL).read(), \"html.parser\")\n room_links = soup.find_all(\"a\", attrs = {\"class\": \"a-room\"})\n rooms = {}\n for room_link in room_links:\n room_id = int(room_link[\"href\"].split(\"=\")[1].rstrip())\n room_name = room_link.text.strip().lower()\n # Save only desired rooms\n for requested_name in requested_room_names:\n if requested_name.lower() in room_name.lower():\n rooms[room_name] = room_id\n break\n\n # Scrape current statuses\n for room_name, room_id in rooms.items():\n # Get statuses of each machine\n url = DATA_URL + str(room_id)\n soup = BeautifulSoup(urlopen(url).read(), \"html.parser\")\n statuses = soup.find_all(\"span\", attrs = {\"class\": \"stat\"})\n washers = []\n dryers = []\n for status in statuses:\n # Get status\n status_str = status.text.strip()\n # What kind of machine?\n align = status.parent.parent.parent.parent.parent[\"align\"]\n if align == \"left\":\n washers.append(status_str)\n else:\n dryers.append(status_str)\n # Print information\n print(room_name.title())\n for i, status_str in enumerate(washers, 1):\n print(\"\\tWasher {}: {}\".format(i, status_str))\n for i, status_str in enumerate(dryers, 1):\n print(\"\\tDryer {}: {}\".format(i, status_str))\n\nif __name__ == \"__main__\":\n if len(argv) == 1:\n print_statuses([\"\"]) # Print all\n else:\n print_statuses(argv[1:])\n", "sub_path": "laundry.py", "file_name": "laundry.py", "file_ext": "py", "file_size_in_byte": 2883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 38, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 38, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 54, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 54, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 75, "usage_type": "argument"}, {"api_name": "sys.argv", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "230625700", "text": "# -*- coding: utf-8 -*-\nimport logging\nimport urllib2\nimport urllib\n\nlogger = logging.getLogger(\"monitor\")\n\n\ndef dingding_alarm_msg(msg):\n\tif not msg:\n\t\treturn\n\turl = \"http://hadoop004:1090/dingding/alarm?token\"\n\t\n\tbody_dict = {\n\t\t\"token\": \"e118a4e75784e5d84fca3e89d4c9b1c6c7451774d72e33d672b2b8ff9e17e6e4\",\n\t\t\"touser\": \"robot\",\n\t\t\"msg\": \"%s\" % msg\n\t}\n\tpost_data = urllib.urlencode(body_dict)\n\treq = urllib2.urlopen(url, post_data)\n\t\n\tlogger.info(\"dingding_alarm_msg req code: %s\", req.getcode())\n", "sub_path": "DM/monitor/business/utils/dingding.py", "file_name": "dingding.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "89599705", "text": "from django.core.management.base import BaseCommand, CommandError\n\nfrom django.db import connections, IntegrityError\nfrom django.contrib.auth.models import User\nfrom profiles.models import Employee\n\nclass Command(BaseCommand):\n help = 'Migrate users from 192.168.10.240 database to 192.168.10.238'\n\n # def add_arguments(self, parser):\n # parser.add_argument('poll_id', nargs='+', type=int)\n\n def handle(self, *args, **options):\n\n # dtr alias\n cursor = connections['dtr'].cursor()\n\n query_users = \"EXEC [spX_GET_UserInfo] '', ''\"\n cursor.execute(query_users)\n users = cursor.fetchall()\n\n user_count = 0\n acc_type = 'staff'\n\n for user in users:\n print(user)\n user_id = user[1]\n user_name = user[2]\n\n try:\n new_user = User(username=user_id)\n new_user.set_password(user_id)\n new_user.save()\n\n new_employee = Employee(user=new_user)\n new_employee.giv_name = user_name\n new_employee.acc_type = acc_type\n new_employee.save()\n user_count = user_count + 1\n except IntegrityError as e:\n print('Error on this user:', user)\n except Exception as E:\n print(E)\n\n self.stdout.write(self.style.SUCCESS(\"Migrated %s users\" % user_count))", "sub_path": "Django_sample_production_web/apps/profiles/management/commands/migrateuser.py", "file_name": "migrateuser.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "django.core.management.base.BaseCommand", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.connections", "line_number": 16, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User", "line_number": 31, "usage_type": "call"}, {"api_name": "profiles.models.Employee", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.IntegrityError", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "375751872", "text": "\"\"\"\n模拟键盘单个案件操作\n\"\"\"\n\nfrom selenium import webdriver\nimport unittest\nimport time\nfrom selenium.webdriver.common.keys import Keys\n\nclass VisitSogouByChrome(unittest.TestCase):\n def setUp(self):\n # 启动Chrome浏览器\n self.driver = webdriver.Chrome(executable_path=\"D:\\\\chromedriver\")\n\n def test_SingleKeys(self):\n url='https://www.baidu.com/'\n self.driver.get(url)\n # 通过id获取元素\n query = self.driver.find_element_by_id('kw')\n # 通过webdriver发送一个f12键\n query.send_keys(Keys.F12)\n # 搜索selenium\n query.send_keys(\"selenium\")\n # 通过webdrive发生一个回车键\n query.send_keys(Keys.RETURN)\n time.sleep(3)\n\n\n def tearDown(self):\n # 退出谷歌浏览器\n self.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "webdriver_api/single_keys.py", "file_name": "single_keys.py", "file_ext": "py", "file_size_in_byte": 881, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.F12", "line_number": 21, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.keys.Keys.RETURN", "line_number": 25, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.keys.Keys", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 26, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "622731999", "text": "import random\r\nimport sys\r\nimport csvParser\r\nimport decisionTree as dt\r\nimport classify\r\nfrom lxml import etree\r\nfrom TreeNode import Node\r\nimport itertools\r\n\r\ndef sliceData(data, k):\r\n numEntries = len(data)\r\n lenSlice = int(numEntries / k)\r\n dataSlices = []\r\n for i in range(k):\r\n slice = random.sample(data, lenSlice)\r\n dataSlices.append(slice)\r\n\r\n return dataSlices\r\n\r\ndef calcClassifErrors(data, classif):\r\n tp, tn, fp, fn = 0, 0, 0, 0\r\n for cl in classif:\r\n dP = cl[0]\r\n c = cl[1]\r\n if dP['Category'] == \"Obama\" and c == \"Obama\":\r\n tp += 1\r\n elif dP['Category'] == \"Obama\" and c == \"McCain\":\r\n fn += 1\r\n elif dP['Category'] == \"McCain\" and c == \"Obama\":\r\n fp += 1\r\n elif dP['Category'] == \"McCain\" and c == \"McCain\":\r\n tn += 1\r\n\r\n return (tp, tn, fp, fn)\r\n\r\ndef main():\r\n tp, tn, fp, fn = 0, 0, 0, 0\r\n\r\n if not len(sys.argv) >= 3:\r\n print(\"\\t\\tMissing arguments\\n\\tProper Call :\\tpython validation.py k-fold []\")\r\n return\r\n\r\n trainingSet = sys.argv[1]\r\n k = int(sys.argv[2])\r\n\r\n data = csvParser.parse(trainingSet)\r\n if k == -1:\r\n k = len(data) - 1\r\n elif k == 0:\r\n print(\"\")\r\n \"\"\" slice data into training and testing subsets \"\"\"\r\n dSlices = sliceData(data, k)\r\n attributes = list(data[0].keys())\r\n\r\n if len(sys.argv) == 4:\r\n \trestrFile = sys.argv[3]\r\n \twith open(restrFile, 'r') as file:\r\n \t\trestr = file.read().split(',')\r\n\r\n \tattributes = restrictAttrib(attributes[:-1], restr[1:])\r\n\r\n \"\"\" perform cross validation \"\"\"\r\n kFoldResults = []\r\n print(\"\\n\\nRunning {}-fold cross validation on {} ...\".format(k, trainingSet))\r\n for i in range(k):\r\n root = Node('Root', None)\r\n trainingSet = dSlices[0:i] + dSlices[i+1:]\r\n trainingSet = list(itertools.chain.from_iterable(trainingSet))\r\n testSet = dSlices[i]\r\n\r\n dt.build(trainingSet, attributes, root, 0.01)\r\n classif = classify.classifyCollection(root, testSet)\r\n classifErrors = calcClassifErrors(data, classif)\r\n kFoldResults.append(classifErrors)\r\n\r\n acc = 0\r\n accs = []\r\n for r in kFoldResults:\r\n tp += r[0]\r\n tn += r[1]\r\n fp += r[2]\r\n fn += r[3]\r\n acc = (r[0] + r[1]) / (r[0] + r[1] + r[2] + r[3])\r\n accs.append(acc)\r\n\r\n recall = (tp / (tp + fn)) * 100\r\n prec = (tp / (tp + fp)) * 100\r\n pf = (fp / (fp + tn)) * 100\r\n fm = ((2 * prec * recall) / (prec + recall))\r\n totAcc = ((tp + tn) / (tp + tn + fp + fn)) * 100\r\n avgAcc = (sum(accs) / len(accs)) * 100\r\n\r\n print(\"\\nAggregate Confusion Matrix:\")\r\n print(\"\\t\\t\\tClassified Positive\\t|\\tClassified Negative\")\r\n print(\"\\t===================================================================\")\r\n print(\"\\tActual Positive |\\t{}\\t\\t|\\t{}\\t\".format(tp, fn))\r\n print(\"\\t===================================================================\")\r\n print(\"\\tActual Negative |\\t{}\\t\\t|\\t{}\\t\".format(fp, tn))\r\n print(\"\\n\\tAggregate Performance Measures\")\r\n print(\"Recall\\t\\t:\\t{} %\".format(recall))\r\n print(\"Precision\\t:\\t{} %\".format(prec))\r\n print(\"pf\\t\\t:\\t{} \".format(pf))\r\n print(\"f-measure\\t:\\t{} \".format(fm))\r\n print(\"\\nOverall Accuracy:\\t{} %\".format(totAcc))\r\n print(\"Average Accuracy:\\t{} %\".format(avgAcc))\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n", "sub_path": "validation.py", "file_name": "validation.py", "file_ext": "py", "file_size_in_byte": 3449, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "random.sample", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 43, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 44, "usage_type": "attribute"}, {"api_name": "csvParser.parse", "line_number": 46, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 56, "usage_type": "attribute"}, {"api_name": "TreeNode.Node", "line_number": 66, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 68, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 68, "usage_type": "attribute"}, {"api_name": "decisionTree.build", "line_number": 71, "usage_type": "call"}, {"api_name": "classify.classifyCollection", "line_number": 72, "usage_type": "call"}]} +{"seq_id": "411605248", "text": "import asyncio\nimport websockets\nimport pickle\n\nasync def talking(strategy_dir, strategy_name, internal_config, external_config, config_dir, feedback_dir):\n async with websockets.connect(\n #'ws://localhost:8765') as websocket:\n 'ws://45.76.164.162:10') as websocket:\n\n #file_name = input(\"Input file name:\\n\")\n #file_name = 'temp_strategy_class.py'\n\n async def send_file(file_name):\n # send file\n with open(file_name, 'rb') as f:\n for line in f:\n await websocket.send(line)\n await websocket.send(\"END OF FILE\")\n # send strategy_name greeting\n await websocket.send(strategy_name)\n print(f\"> {strategy_name} sent\")\n\n # greeting from server\n greeting = await websocket.recv()\n print(f\"< {greeting}\")\n\n # send strategy file\n await send_file(strategy_dir + strategy_name)\n print(f\"< {await websocket.recv()}\") # feedback from server\n \n # send internal config\n await send_file(config_dir + internal_config)\n print(f\"< {await websocket.recv()}\")\n\n # send external config\n await send_file(config_dir + external_config)\n print(f\"< {await websocket.recv()}\")\n\n # handler is not useful anymore\n \"\"\"\n request = await websocket.recv()\n print(f\"< {request}\")\n\n titles = pickle.dumps(['USDT_BTC_10sec_s'])\n await websocket.send(titles)\n \"\"\"\n while True:\n info = await websocket.recv()\n if info == \"image name\":\n img_name = await websocket.recv()\n feedback_figure = open(feedback_dir+img_name, 'wb')\n while True:\n image_slice = await websocket.recv()\n if image_slice == 'END OF FILE':\n feedback_figure.close()\n print(\"feedback figure received...\")\n break\n else:\n feedback_figure.write(image_slice)\n elif info == \"END OF DIR\":\n break\n\n\n \n \n#asyncio.get_event_loop().run_until_complete(talking(\"temp_strategy_class.py\",\"internal_config.json\",\"external_config.json\"))", "sub_path": "test/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2283, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "websockets.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "473308459", "text": "import os\nimport time\nfrom datetime import datetime, date\nimport requests\nfrom сlasses.vk_api_constants import RATINGS\n\n\n# DTO classes\nfrom сlasses.vkinder_bot_constants import PHRASES\n\n\nclass ApiCity:\n def __init__(self, row: dict):\n self.id = row.get('id', None)\n self.title = row.get('title', None)\n self.area = row.get('area', None)\n self.region = row.get('region', None)\n\n\nclass ApiCountry:\n def __init__(self, row: dict):\n self.id = row.get('id', None)\n self.title = row.get('title', None)\n\n\nclass ApiPhoto:\n def __init__(self, row: dict):\n self.url = row.get('url', None)\n self.likes_count = row.get('likes_count', None)\n self.comments_count = row.get('comments_count', None)\n self.reposts_count = row.get('reposts_count', None)\n self.owner_id = row.get('owner_id', None)\n self.id = row.get('id', None)\n\n\nclass VKinderSearch:\n def __init__(self):\n self.id = None\n self.sex_id = None\n self.status_id = None\n self.min_age = None\n self.max_age = None\n self.city_id = None\n self.city_name = None\n\n\nclass ApiUser:\n def __init__(self, row: dict = None, rating_id: int = RATINGS['new']):\n if row is None:\n row = {}\n self.vk_id = str(row.get('id', None))\n self.fname = row.get('first_name', None)\n self.lname = row.get('last_name', None)\n self.sex_id = row.get('sex', None)\n self.is_closed = row.get('is_closed', None)\n # hardcoded country ID\n self.country_id = row.get('country', {}).get('id', 1)\n self.country_name = row.get('country', {}).get('title', 'Россия')\n self.city_id = row.get('city', {}).get('id', 1)\n self.city_name = row.get('city', {}).get('title', 'Россия')\n self.hometown = row.get('home_town', None)\n self.domain = row.get('domain', None)\n self.last_seen_time = row.get('last_seen', {}).get('time', None)\n self.db_id = None\n self.rating_id = rating_id\n self.photos: list[ApiPhoto] = []\n bdate = row.get('bdate', None)\n if bdate:\n bdate = decode_date_from_str(bdate)\n self.birth_day = bdate['birth_day']\n self.birth_month = bdate['birth_month']\n self.birth_year = bdate['birth_year']\n self.age = bdate['age']\n self.birth_date = bdate['birth_date']\n else:\n self.birth_day = None\n self.birth_month = None\n self.birth_year = None\n self.age = None\n self.birth_date = None\n\n\nclass VKinderClient(ApiUser):\n def __init__(self, user: ApiUser):\n super().__init__()\n self.__dict__.update(user.__dict__)\n self._found_user_iter = -1\n self._status = 0\n self.rating_filter = RATINGS['new']\n self._search = VKinderSearch()\n self.searches = []\n self.found_cities: list[ApiCity] = []\n self.found_countries: list[ApiCountry] = []\n self._found_users: list[ApiUser] = []\n self.last_contact = datetime.now()\n self.active_user: ApiUser = None\n\n # this prevents to import VKinderSearch in main modules\n def reset_search(self):\n self.search = VKinderSearch()\n\n def get_next_user(self) -> ApiUser:\n while self._found_user_iter < len(self._found_users)-1:\n self._found_user_iter += 1\n if self._found_users[self._found_user_iter].rating_id == self.rating_filter:\n return self._found_users[self._found_user_iter]\n\n @property\n def search(self):\n return self._search\n\n # here we also resets found users list\n @search.setter\n def search(self, value):\n self._search = value\n self.found_users = []\n\n @property\n def found_users(self):\n return self._found_users\n\n @found_users.setter\n def found_users(self, value):\n self._found_user_iter = -1\n self._found_users = value\n\n @property\n def status(self):\n return self._status\n\n @status.setter\n def status(self, value):\n self.last_contact = datetime.now()\n self._status = value\n\n\ndef decode_date_from_str(datestr: str) -> dict:\n \"\"\"\n Decode VK birth date\n :param datestr: string in format \"D.M.YYYY\" or \"D.M\"\n :return: dictionary {'birth_year': 0, 'birth_month': 0, 'birth_day': 0, 'age': 0, 'birth_date': date}\n \"\"\"\n bdate = datestr.split('.') if datestr else []\n # extending list to prevent index out of range if received not full birth date (D.M.YYYY or D.M)\n bdate.extend([None, None, None])\n birth_day = int(bdate[0]) if bdate[0] else bdate[0]\n birth_month = int(bdate[1]) if bdate[1] else bdate[1]\n birth_year = int(bdate[2]) if bdate[2] else bdate[2]\n age = None\n birth_date = None\n if birth_year:\n age = calculate_age(birth_day, birth_month, birth_year)\n birth_date = date(birth_year, birth_month, birth_day)\n return {'birth_year': birth_year, 'birth_month': birth_month, 'birth_day': birth_day, 'age': age,\n 'birth_date': birth_date}\n\n\ndef calculate_age(birth_date, birth_month, birth_year: int) -> int:\n \"\"\"\n Determines the number of full years since the passed date till present time\n \"\"\"\n today = date.today()\n return today.year - birth_year - ((today.month, today.day) < (birth_month, birth_date))\n\n\ndef decorator_speed_meter(is_debug_mode=True):\n \"\"\"\n Measures working time of called function\n \"\"\"\n def decorator_func(target_function):\n def wrapper_func(*args, **kwargs):\n start_time = time.time()\n result = target_function(*args, **kwargs)\n if is_debug_mode:\n log(f'{target_function.__name__}: {time.time() - start_time} sec', is_debug_mode)\n return result\n return wrapper_func\n return decorator_func\n\n\ndef break_str(s: str, break_chars: list[str] = None, max_size: int = 4096) -> list[str]:\n \"\"\"\n Split string into chunks with given max size, splitting done by line break, whitespace, comma or by given signs\n \"\"\"\n if break_chars is None:\n break_chars = ['\\n', ' ', ',']\n result = []\n start = 0\n end = max_size + 1\n while True:\n sample = s[start:end]\n if len(sample) < max_size:\n result.append(sample)\n break\n pos = -1\n for char in break_chars:\n pos = sample.rfind(char)\n if pos > -1:\n break\n if pos == -1:\n result.append(sample)\n start += max_size\n end += max_size + 1\n else:\n result.append(sample[:pos + 1])\n start += pos + 1\n end = start + max_size\n return result\n\n\ndef timestamp_to_str(timestamp: int) -> int:\n \"\"\"\n Determines the number of days since the passed date till present time\n \"\"\"\n result = (datetime.today() - datetime.utcfromtimestamp(timestamp)).days\n return result\n\n\ndef last_seen(timestamp: int) -> str:\n \"\"\"\n Human readable time that elapsed since given timestamp till present time\n \"\"\"\n result = ''\n if timestamp is None:\n return result\n result = PHRASES['last_seen']\n days_ago = timestamp_to_str(timestamp)\n if days_ago == 0:\n result += PHRASES['today']\n elif days_ago == 1:\n result += PHRASES['yesterday']\n elif days_ago == 2:\n result += PHRASES['day_before_yesterday']\n elif 3 <= days_ago <= 7:\n result += PHRASES['at_this_week']\n elif 31 <= days_ago <= 365:\n result += PHRASES['x_months_ago'].format(int(days_ago / 30))\n elif 366 <= days_ago:\n result += PHRASES['x_years_ago'].format(int(days_ago / 365))\n else:\n result += PHRASES['x_days_ago'].format(days_ago)\n return result\n\n\ndef log(message, is_debug_msg=False, sep='\\n'):\n \"\"\"\n Log messages to console if debug message flag set\n \"\"\"\n if is_debug_msg:\n now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S:%f\")\n if type(message) in [list, dict, tuple, set]:\n message = [f'{now} - {x}' for x in message]\n print(*message, sep=sep)\n else:\n print(f'{now} - {message}', sep=sep)\n else:\n return\n\n\ndef get_filetype_by_url(url: str) -> str:\n \"\"\"\n Recognize file extension by it's MIME code returned by server, returns extension with preceding dot sign\n https://ru.wikipedia.org/wiki/%D0%A1%D0%BF%D0%B8%D1%81%D0%BE%D0%BA_MIME-%D1%82%D0%B8%D0%BF%D0%BE%D0%B2\n \"\"\"\n extension = ''\n if not url:\n return extension\n response = requests.head(url)\n if not (200 <= response.status_code < 300):\n return extension\n file_type = response.headers.get('Content-Type', '')\n return extract_filetype(file_type)\n\n\ndef extract_filetype(content_type_str: str):\n \"\"\"\n Extract exact MIME type from Content-Type, returns second MIME if Content-Type is complex (i.e. image/jpeg)\n \"\"\"\n pos = content_type_str.find('/')\n if pos < 0:\n return '.' + content_type_str\n return '.' + content_type_str[pos + 1:]\n\n\ndef solve_filename_conflict(name: str, extension: str, folder: str = ''):\n \"\"\"\n Calculates next available file name if desired file name is already taken\n \"\"\"\n folder = folder + os.sep if folder else ''\n path = f'{os.path.abspath(os.getcwd())}{os.sep}{folder + os.sep if folder else \"\"}'\n tmp_name = name\n filename = f'{tmp_name}{extension}'\n i = 0\n postfix = '_'\n while os.path.isfile(path + filename):\n i += 1\n tmp_name = f'{name}{postfix}{i}'\n filename = f'{tmp_name}{extension}'\n return tmp_name\n\n\ndef prepare_params(*args):\n \"\"\"\n Normalize all parameters that can be passed as integer or mixed list of integers and strings,\n and makes from them one string that can be accepted as request parameter\n :param args: integer, string or list of integers and string\n :return: string with values separated by commas\n \"\"\"\n result = []\n for param in args:\n if param is None:\n continue\n if type(param) in [int, bool, float]:\n result += [str(param)]\n elif type(param) is str:\n result += [param]\n elif type(param) in [list, dict, tuple, set]:\n result += [','.join([str(x) for x in param])]\n result = ','.join([x for x in result])\n return result\n\n\ndef clear_db(sqlalchemy, engine):\n \"\"\"\n Drops all tables in DB\n :param sqlalchemy: sqlalchemy instance\n :param engine: db engine\n :return: none\n \"\"\"\n inspect = sqlalchemy.inspect(engine)\n for table_entry in reversed(inspect.get_sorted_table_and_fkc_names()):\n table_name = table_entry[0]\n if table_name:\n with engine.begin() as conn:\n conn.execute(sqlalchemy.text(f'DROP TABLE \"{table_name}\"'))\n return\n\n\ndef read_textfile(filename: str) -> str:\n \"\"\"\n Reads text file\n :param filename: name of file with query\n :return: text of query\n \"\"\"\n query_file = open(filename, mode='rt', encoding='utf-8')\n query_text = ''.join(query_file.readlines())\n query_file.close()\n return query_text\n\n\ndef get_users_ratings_counts(users: list[ApiUser]) -> dict:\n \"\"\"\n Counts total ratings of all found users and return as dict {'new': 0, 'liked': 0, 'disliked': 0, 'banned': 0}\n \"\"\"\n result = {'new': 0, 'liked': 0, 'disliked': 0, 'banned': 0}\n for user in users:\n result[get_dict_key_by_value(RATINGS, user.rating_id)] += 1\n return result\n\n\ndef get_dict_key_by_value(dictionary: dict, value):\n \"\"\"\n Find and return key of element in dictionary by its value\n \"\"\"\n for dict_key, dict_value in dictionary.items():\n if dict_value == value:\n return dict_key\n\n\ndef format_city_name(city: ApiCity) -> str:\n \"\"\"\n Prepare city info for showing in huge lists\n \"\"\"\n tmp = ', '.join([x for x in [city.area, city.region] if x])\n result = f'{city.title}{\" (\" + tmp + \")\" if tmp else \"\"}'\n return result\n", "sub_path": "сlasses/vk_api_classes.py", "file_name": "vk_api_classes.py", "file_ext": "py", "file_size_in_byte": 12042, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "сlasses.vk_api_constants.RATINGS", "line_number": 48, "usage_type": "name"}, {"api_name": "сlasses.vk_api_constants.RATINGS", "line_number": 89, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 95, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 95, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 133, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 133, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 162, "usage_type": "name"}, {"api_name": "time.time", "line_number": 172, "usage_type": "call"}, {"api_name": "time.time", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.datetime.today", "line_number": 215, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 215, "usage_type": "name"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 215, "usage_type": "call"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 226, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 229, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 231, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 233, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 235, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 237, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 239, "usage_type": "name"}, {"api_name": "сlasses.vkinder_bot_constants.PHRASES", "line_number": 241, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 250, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 250, "usage_type": "name"}, {"api_name": "requests.head", "line_number": 268, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 289, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 290, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 290, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}, {"api_name": "сlasses.vk_api_constants.RATINGS", "line_number": 357, "usage_type": "argument"}]} +{"seq_id": "146258747", "text": "import covasim as cv\nimport pandas as pd\nimport sciris as sc\nimport pylab as pl\nimport numpy as np\nfrom matplotlib import ticker\nimport datetime as dt\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\n\n# Paths and filenames\nfigsfolder = 'figs'\nresfolder = 'results'\nscenarios = ['FNL', 'primaryPNL', 'staggeredPNL']\nlabels = ['FNL', 'Primary-only PNL', 'Staggered PNL']\nT = sc.tic()\n\n# Define plotting functions\n#%% Helper functions\n\ndef format_ax(ax, sim, key=None):\n @ticker.FuncFormatter\n def date_formatter(x, pos):\n return (sim['start_day'] + dt.timedelta(days=int(x))).strftime('%b\\n%y')\n ax.xaxis.set_major_formatter(date_formatter)\n if key != 'r_eff':\n sc.commaticks()\n pl.xlim([0, sim['n_days']])\n pl.axvspan(lockdown1[0], lockdown1[1], color='steelblue', alpha=0.2, lw=0)\n pl.axvspan(lockdown2[0], lockdown2[1], color='steelblue', alpha=0.2, lw=0)\n pl.axvspan(lockdown3[0], lockdown3[1], color='lightblue', alpha=0.2, lw=0)\n\n return\n\ndef plotter(key, sims, ax, label='', ylabel='', low_q=0.05, high_q=0.95, subsample=2):\n\n which = key.split('_')[1]\n try:\n color = cv.get_colors()[which]\n except:\n color = [0.5,0.5,0.5]\n\n ys = []\n for s in sims:\n ys.append(s.results[key].values)\n yarr = np.array(ys)\n\n best = pl.median(yarr, axis=0)\n low = pl.quantile(yarr, q=low_q, axis=0)\n high = pl.quantile(yarr, q=high_q, axis=0)\n\n\n tvec = np.arange(len(best))\n# tempsim = cv.Sim(datafile='../UK_Covid_cases_january03.xlsx')\n# sim = sims[0]\n# if key in tempsim.data:\n# data_t = np.array((tempsim.data.index-sim['start_day'])/np.timedelta64(1,'D'))\n# inds = np.arange(0, len(data_t), subsample)\n# data = tempsim.data[key][inds]\n# pl.plot(data_t[inds], data, 'd', c=color, markersize=10, alpha=0.5, label='Data')\n\n fill_label = None\n end = None\n start = 2 if key == 'r_eff' else 0\n pl.fill_between(tvec[start:end], low[start:end], high[start:end], facecolor=color, alpha=0.2, label=fill_label)\n pl.plot(tvec[start:end], best[start:end], c=color, label=label, lw=4, alpha=1.0)\n\n sc.setylim()\n\n datemarks = pl.array([sim.day('2020-03-01'),sim.day('2020-05-01'),\n sim.day('2020-07-01'),sim.day('2020-09-01'),\n sim.day('2020-11-01'),sim.day('2021-01-01'),sim.day('2021-03-01')])\n ax.set_xticks(datemarks)\n pl.ylabel(ylabel)\n\n return\n\n\n# Fonts and sizes\nfont_size = 36\nfont_family = 'Libertinus Sans'\npl.rcParams['font.size'] = font_size\npl.rcParams['font.family'] = font_family\n\n# Plot locations\n# Subplot sizes\nxgapl = 0.1\nxgapm = 0.02\nxgapr = 0.03\nygapb = 0.075\nygapm = 0.02\nygapt = 0.05\nnrows = 3\nncols = 3\ndx = (1 - (ncols - 1) * xgapm - xgapl - xgapr) / ncols\ndy = (1 - (nrows - 1) * ygapm - ygapb - ygapt) / nrows\nnplots = nrows * ncols\nax = {}\n\n\npl.figure(figsize=(24, 16))\n# Import files\nfilepaths = [f'{resfolder}/uk_sim_{scen}.obj' for scen in scenarios]\nsims = sc.odict()\nmsims = sc.odict()\n\nfor scen in scenarios:\n filepath = f'{resfolder}/uk_sim_{scen}.obj'\n msims[scen] = sc.loadobj(filepath)\n sims[scen] = msims[scen].sims\n msims[scen].reduce()\n\nsim = sims[0][0] # Extract a sim to refer to\n\n# Extract weekly infection data\nw0, w1, w2, w3, w4, w5, w6, w7 = cv.date('2021-01-10'), cv.date('2021-01-17'), cv.date('2021-01-24'), cv.date('2021-01-31'), \\\n cv.date('2021-02-07'), cv.date('2021-02-14'), cv.date('2021-02-21'), cv.date('2021-02-28')\nwd = [sim.day(w0), sim.day(w1), sim.day(w2), sim.day(w3), sim.day(w4), sim.day(w5), sim.day(w6), sim.day(w7)]\ninf_med = []\ninf_low = []\ninf_high = []\nfor scen in scenarios:\n inf_med.append(msims[scen].results['new_infections'].values[wd])\n inf_low.append(msims[scen].results['new_infections'].low[wd])\n inf_high.append(msims[scen].results['new_infections'].high[wd])\n\nepsx = 0.003\nllpad = 0.01\n\nlockdown1 = [sim.day('2020-03-23'),sim.day('2020-05-31')]\nlockdown2 = [sim.day('2020-11-05'),sim.day('2020-12-03')]\nlockdown3 = [sim.day('2021-01-04'),sim.day('2021-02-08')]\n\nfor nc in range(ncols):\n pl.figtext(xgapl + (dx + xgapm) * nc + epsx, ygapb + dy * nrows + ygapm * (nrows - 1) + llpad, labels[nc],\n fontsize=36, fontweight='bold', bbox={'edgecolor': 'none', 'facecolor': 'white', 'alpha': 0.5, 'pad': 4})\n\nfor pn in range(nplots):\n ax[pn] = pl.axes([xgapl + (dx + xgapm) * (pn % ncols), ygapb + (ygapm + dy) * (pn // ncols), dx, dy])\n print([xgapl + (dx + xgapm) * (pn % ncols), ygapb + (ygapm + dy) * (pn // ncols)])\n print(list(sims.keys())[pn % ncols])\n format_ax(ax[pn], sim)\n\n if (pn%ncols) != 0:\n ax[pn].set_yticklabels([])\n else:\n ax[pn].set_ylabel('New infections')\n\n if pn in range(ncols):\n plotter('r_eff', sims[pn % ncols], ax[pn])\n ax[pn].set_ylim(0, 3.5)\n ax[pn].axhline(y=1, color='red', linestyle='--')\n if (pn%ncols) == 0:\n ax[pn].set_ylabel('R')\n elif pn in range(ncols,ncols*2):\n plotter('cum_deaths', sims[pn % ncols], ax[pn])\n ax[pn].set_ylim(0, 150_000)\n if (pn%ncols) == 0:\n ax[pn].set_ylabel('Total deaths')\n else:\n plotter('new_infections', sims[pn % ncols], ax[pn])\n ax[pn].set_ylim(0, 250_000)\n if (pn%ncols) == 0:\n ax[pn].set_ylabel('New infections')\n\n if pn not in range(ncols):\n ax[pn].set_xticklabels([])\n\ncv.savefig(f'{figsfolder}/fig_UK_school_scens.png', dpi=100)\n\n\n################################################################################\n# ## Fig 3\n################################################################################\npl.figure(figsize=(24, 12))\n#font_size = 24\n#pl.rcParams['font.size'] = font_size\n\n# Subplot sizes\nxgapl = 0.06\nxgapm = 0.1\nxgapr = 0.01\nygapb = 0.11\nygapm = 0.1\nygapt = 0.02\nnrows = 1\nncols = 2\ndx1 = (1-(ncols-1)*xgapm-xgapl-xgapr)*0.6\ndx2 = (1-(ncols-1)*xgapm-xgapl-xgapr)*0.4\ndy = (1-(nrows-1)*ygapm-ygapb-ygapt)/nrows\nnplots = nrows*ncols\n\ncolors = pl.cm.GnBu(np.array([0.3,0.65,1.]))\n\n# Fig 3A. box plot chart\nbox_ax = pl.axes([xgapl, ygapb, dx1, dy])\nx = np.arange(8)\n\nfor sn in range(3):\n box_ax.errorbar(x+0.1*sn-0.3, inf_med[sn]/1e3, yerr=[inf_low[sn]/1e3, inf_high[sn]/1e3], fmt='o', color=colors[sn], label=labels[sn], ecolor=colors[sn], ms=20, elinewidth=3, capsize=0)\n\nbox_ax.set_xticks(x-0.15)\n#box_ax.set_xticklabels(labels)\n\n@ticker.FuncFormatter\ndef date_formatter(x, pos):\n return (cv.date('2021-01-12') + dt.timedelta(days=x*7)).strftime('%d-%b')\n\nbox_ax.xaxis.set_major_formatter(date_formatter)\npl.ylabel('Estimated daily infections (000s)')\nsc.boxoff(ax=box_ax)\nsc.commaticks()\nbox_ax.legend(frameon=False)\n\n\n# B. Cumulative total infections\nwidth = 0.8 # the width of the bars\nx = [0,1,2]\ndata = np.array([msims[sn].results['cum_infections'].values[-1]-msims[sn].results['cum_infections'].values[sim.day('2021-01-04')] for sn in scenarios])\nbar_ax = pl.axes([xgapl+xgapm+dx1, ygapb, dx2, dy])\nfor sn,scen in enumerate(scenarios):\n bar_ax.bar(x[sn], data[sn]/1e3, width, color=colors[sn], alpha=1.0)\n\nbar_ax.set_xticklabels(['', 'FNL', 'Primary-only\\nPNL', 'Staggered\\nPNL'])\nsc.boxoff()\nsc.commaticks()\nbar_ax.set_ylabel('Total estimated infections\\nJan 4 - Feb 28 (000s)')\n\ncv.savefig(f'{figsfolder}/fig_bars.png', dpi=100)\n\n\n\n\nsc.toc(T)", "sub_path": "6_schools2/plot_UK_school_scenarios.py", "file_name": "plot_UK_school_scenarios.py", "file_ext": "py", "file_size_in_byte": 7397, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sciris.tic", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 23, "usage_type": "attribute"}, {"api_name": "matplotlib.ticker", "line_number": 23, "usage_type": "name"}, {"api_name": "sciris.commaticks", "line_number": 28, "usage_type": "call"}, {"api_name": "pylab.xlim", "line_number": 29, "usage_type": "call"}, {"api_name": "pylab.axvspan", "line_number": 30, "usage_type": "call"}, {"api_name": "pylab.axvspan", "line_number": 31, "usage_type": "call"}, {"api_name": "pylab.axvspan", "line_number": 32, "usage_type": "call"}, {"api_name": "covasim.get_colors", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "pylab.median", "line_number": 49, "usage_type": "call"}, {"api_name": "pylab.quantile", "line_number": 50, "usage_type": "call"}, {"api_name": "pylab.quantile", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 54, "usage_type": "call"}, {"api_name": "pylab.fill_between", "line_number": 66, "usage_type": "call"}, {"api_name": "pylab.plot", "line_number": 67, "usage_type": "call"}, {"api_name": "sciris.setylim", "line_number": 69, "usage_type": "call"}, {"api_name": "pylab.array", "line_number": 71, "usage_type": "call"}, {"api_name": "pylab.ylabel", "line_number": 75, "usage_type": "call"}, {"api_name": "pylab.rcParams", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pylab.rcParams", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pylab.figure", "line_number": 102, "usage_type": "call"}, {"api_name": "sciris.odict", "line_number": 105, "usage_type": "call"}, {"api_name": "sciris.odict", "line_number": 106, "usage_type": "call"}, {"api_name": "sciris.loadobj", "line_number": 110, "usage_type": "call"}, {"api_name": "covasim.date", "line_number": 117, "usage_type": "call"}, {"api_name": "covasim.date", "line_number": 118, "usage_type": "call"}, {"api_name": "pylab.figtext", "line_number": 136, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 140, "usage_type": "call"}, {"api_name": "covasim.savefig", "line_number": 170, "usage_type": "call"}, {"api_name": "pylab.figure", "line_number": 176, "usage_type": "call"}, {"api_name": "pylab.cm.GnBu", "line_number": 194, "usage_type": "call"}, {"api_name": "pylab.cm", "line_number": 194, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 197, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 198, "usage_type": "call"}, {"api_name": "covasim.date", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FuncFormatter", "line_number": 206, "usage_type": "attribute"}, {"api_name": "matplotlib.ticker", "line_number": 206, "usage_type": "name"}, {"api_name": "pylab.ylabel", "line_number": 211, "usage_type": "call"}, {"api_name": "sciris.boxoff", "line_number": 212, "usage_type": "call"}, {"api_name": "sciris.commaticks", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}, {"api_name": "pylab.axes", "line_number": 221, "usage_type": "call"}, {"api_name": "sciris.boxoff", "line_number": 226, "usage_type": "call"}, {"api_name": "sciris.commaticks", "line_number": 227, "usage_type": "call"}, {"api_name": "covasim.savefig", "line_number": 230, "usage_type": "call"}, {"api_name": "sciris.toc", "line_number": 235, "usage_type": "call"}]} +{"seq_id": "642067355", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom wordcloud import WordCloud\nfrom game.flaskapp_andrius.api import preprocesser\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef clean_recipe_table(recipe_table_csv, updated_images_csv):\n \"\"\"\n Takes original recipe table, updates images using new csv, then cleans data\n Input format\n clean_recipe_table('../data/recipe_table.csv', '../data/recipe_table_updated_UAT.csv')\n \"\"\"\n # Read and prepare CSV\n df_old = pd.read_csv(recipe_table_csv, sep=\";\")\n df_new = pd.read_csv(updated_images_csv, sep=\",\").reset_index()\n df_new.columns = df_new.iloc[0].tolist()\n df_new = df_new.drop(0)\n df_new[\"id\"] = df_new[\"id\"].astype(int)\n\n # Make dict with IDs and URLs\n old_images = df_old.set_index(\"id\").to_dict()[\"image_url\"]\n new_images = df_new.set_index(\"id\").to_dict()[\"image_url\"]\n\n # Match new image links with old IDs\n new_dict = {k: new_images.get(k, v) for k, v in old_images.items()}\n\n # Make new df with updated images\n updated_images = pd.DataFrame.from_dict(\n new_dict, orient=\"index\", columns=[\"image_url\"]\n )\n\n # Add image resizing\n updated_images = updated_images.reset_index()\n updated_images.columns = [\"id\", \"image_url\"]\n updated_images[\"image_url\"] = (\n updated_images[\"image_url\"] + \"?auto=format&fit=crop&w=320&h=218\"\n )\n\n # Update old recipe table with new images\n df_updated = df_old.copy()\n df_updated[\"image_url\"] = updated_images[\"image_url\"]\n\n # Remove links broken in old and new table for certain recipes\n broken_links = [741, 808, 824, 880, 889, 938, 945]\n for i in broken_links:\n df_updated = df_updated.drop(df_updated.loc[df_updated[\"id\"] == i].index)\n\n # Drop recipes without a foodgroup entry\n df_updated = df_updated.dropna(subset=[\"food_group\"])\n\n # Broken links in new table that were working fine in old\n revert_links = [320, 335, 349, 495, 647, 764]\n for elem in revert_links:\n reverted_image = df_old.at[df_old[df_old[\"id\"] == elem].index[0], \"image_url\"]\n df_updated.at[\n df_updated[df_updated[\"id\"] == elem].index[0], \"image_url\"\n ] = reverted_image\n\n # Reassigning ginger salmon to fish instead of chicken\n gin_salm = \"Ginger salmon with buckwheat noodles & peanuts\"\n df_updated.at[\n df_updated[df_updated[\"title\"] == gin_salm].index[0], \"food_group\"\n ] = \"Fish\"\n\n # Change 4 shellfish entries to correct food groups\n # IDs: 31, 102, 366, 407\n df_updated.at[df_updated[df_updated[\"id\"] == 31].index[0], \"food_group\"] = \"Beef\"\n df_updated.at[\n df_updated[df_updated[\"id\"] == 102].index[0], \"food_group\"\n ] = \"Chicken\"\n df_updated.at[\n df_updated[df_updated[\"id\"] == 366].index[0], \"food_group\"\n ] = \"Chicken\"\n df_updated.at[df_updated[df_updated[\"id\"] == 407].index[0], \"food_group\"] = \"Fish\"\n\n # Upload to data folder\n df_updated.to_csv(\"../data/recipe_table_new.csv\", sep=\";\", index=False)\n\n print(\"---New recipe table generated as 'recipe_table_new.csv'---\")\n\n\ndef get_embedding(recipe_dict):\n \"\"\"\n Takes a recipe dictionary and calculates recipe vector.\n Take mean of recipe vector to give embedding, in line with original original get_delta12 function\n \"\"\"\n list_of_embeddings = []\n for recipe in recipe_dict:\n recipe_embedding = np.mean(preprocesser.recipe2vec(recipe))\n list_of_embeddings.append(recipe_embedding)\n return list_of_embeddings\n\n\ndef get_vector(recipe):\n \"\"\"\n Takes a single recipe and returns 45-D recipe vector\n \"\"\"\n return preprocesser.recipe2vec(recipe)\n\n\ndef get_vector_list(recipe_dict):\n \"\"\"\n Takes recipe list and returns list of recipe vectors\n \"\"\"\n list_of_vectors = []\n for recipe in recipe_dict:\n recipe_vector = preprocesser.recipe2vec(recipe)\n list_of_vectors.append(recipe_vector)\n return list_of_vectors\n\n\ndef get_euc_dist_from_origin(recipe_array):\n \"\"\"\n Takes single recipe array and calculates magnitude from the origin\n \"\"\"\n recipe_array = recipe_array.reshape(1, -1)\n dist = euclidean_distances(\n recipe_array,\n [\n [\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0,\n ]\n ],\n )\n return dist\n\n\ndef get_top_twenty_words(df, n_comp):\n \"\"\"\n Takes dataframe of text (title, description, key ingredient) and uses truncated SVD to reduce down to n components\n \"\"\"\n vectorizer = TfidfVectorizer(stop_words=\"english\", strip_accents=\"unicode\")\n vec = vectorizer.fit_transform(df)\n tfidf = pd.DataFrame(vec.todense(), columns=vectorizer.get_feature_names())\n svd = TruncatedSVD(n_components=n_comp, n_iter=7, random_state=42)\n svd_ft = svd.fit_transform(tfidf)\n df_comp = pd.DataFrame(svd.components_, columns=tfidf.columns)\n df_comp = df_comp.T\n return np.mean(df_comp.abs(), axis=1).sort_values(ascending=False).head(10)\n", "sub_path": "game/src/feature_generator.py", "file_name": "feature_generator.py", "file_ext": "py", "file_size_in_byte": 5960, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pandas.read_csv", "line_number": 19, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 20, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 94, "usage_type": "call"}, {"api_name": "game.flaskapp_andrius.api.preprocesser.recipe2vec", "line_number": 94, "usage_type": "call"}, {"api_name": "game.flaskapp_andrius.api.preprocesser", "line_number": 94, "usage_type": "name"}, {"api_name": "game.flaskapp_andrius.api.preprocesser.recipe2vec", "line_number": 103, "usage_type": "call"}, {"api_name": "game.flaskapp_andrius.api.preprocesser", "line_number": 103, "usage_type": "name"}, {"api_name": "game.flaskapp_andrius.api.preprocesser.recipe2vec", "line_number": 112, "usage_type": "call"}, {"api_name": "game.flaskapp_andrius.api.preprocesser", "line_number": 112, "usage_type": "name"}, {"api_name": "sklearn.metrics.pairwise.euclidean_distances", "line_number": 122, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.decomposition.TruncatedSVD", "line_number": 184, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 186, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 188, "usage_type": "call"}]} +{"seq_id": "642845258", "text": "from typing import Tuple, Optional, List, Dict\nimport math\nimport random\n\n\ndef get_utility_2d(board: Tuple[Optional[int], ...]) -> Optional[int]:\n\n # check rows and cols\n for i in range(4):\n\n # columns\n column = board[i:16:4]\n if (column[0] and column.count(column[0]) == 4):\n return column[0]\n\n # rows\n row = board[i*4: 4*i+4]\n if (row[0] and row.count(row[0]) == 4):\n return row[0]\n\n # check diagonals\n diag1 = [board[i] for i in [0, 5, 10, 15]]\n diag2 = [board[i] for i in [3, 6, 9, 12]]\n\n if (diag1[0] and diag1.count(diag1[0]) == 4):\n return diag1[0]\n if (diag2[0] and diag2.count(diag2[0]) == 4):\n return diag2[0]\n\n # if still spots\n if 0 in board:\n return None\n\n return 0\n\n\ndef get_utility_3d(board: Tuple[Tuple[Optional[int], ...], ...]\n ) -> Optional[int]:\n\n b1, b2, b3, b4 = board\n winners = [1, -1]\n\n # slicing board in x,y,z dirs\n for i in range(4):\n\n # xy plane\n sliceboard = board[i]\n result = get_utility_2d(sliceboard)\n if result in winners:\n return result\n\n # yz plane\n sliceboard = b1[i*4: 4*i+4] + b2[i*4: 4*i+4] + \\\n b3[i*4: 4*i+4] + b4[i*4: 4*i+4]\n result = get_utility_2d(sliceboard)\n if result in winners:\n return result\n\n # xz plane\n sliceboard = b1[i*4: 4*i+4] + b2[i*4: 4*i+4] + \\\n b3[i*4: 4*i+4] + b4[i*4: 4*i+4]\n result = get_utility_2d(sliceboard)\n if result in winners:\n return result\n\n # test the remaining diagonals\n\n diag1 = board[0][0], board[1][5], board[2][10], board[3][15]\n diag2 = board[3][0], board[2][5], board[1][10], board[0][15]\n diag3 = board[0][3], board[1][6], board[2][9], board[3][12]\n diag4 = board[3][3], board[2][6], board[1][9], board[0][12]\n\n for diag in [diag1, diag2, diag3, diag4]:\n if (diag[0] and diag.count(diag[0]) == 4):\n return diag[0]\n\n # test if still open spaces\n for b in board:\n if 0 in b:\n return None\n\n return 0\n\n\ndef unpack_board(board: Tuple[Tuple[Optional[int], ...], ...]\n ) -> List[Optional[int]]:\n b1, b2, b3, b4 = board\n return list(b1 + b2 + b3 + b4)\n\n\ndef repack_board(board: List[Optional[int]]\n ) -> Tuple[Tuple[Optional[int], ...], ...]:\n return (tuple(board[0:16]),\n tuple(board[16:32]),\n tuple(board[32:48]),\n tuple(board[48:64]))\n\n\nclass Node:\n def __init__(self, board, player, parent=None, move_played=None):\n self.board = board\n self.children = []\n self.parent = parent\n self.move_played = move_played\n\n self.total_tries = 0\n self.num_wins = 0\n\n self.player = player\n\n\ndef get_next_move_indicies(board: Tuple[Tuple[Optional[int], ...], ...]\n ) -> List[int]:\n unpacked_b = unpack_board(board)\n indicies = []\n\n while 0 in unpacked_b:\n free_spot = unpacked_b.index(0)\n indicies.append(free_spot)\n unpacked_b[free_spot] = 1\n\n return indicies\n\n\n# given the indicie, return a board with the correct player now in that spot\ndef make_move(board: Tuple[Tuple[Optional[int], ...], ...],\n player: int, move_index: int\n ) -> Tuple[Tuple[Optional[int], ...], ...]:\n unpacked_b = unpack_board(board)\n unpacked_b[move_index] = player\n return repack_board(unpacked_b)\n\n\ndef backprop(current_node: Node, result: int) -> None:\n\n # recursively update the results until get to parent\n while current_node.parent:\n\n current_node.total_tries += 1\n\n if result == current_node.player:\n current_node.num_wins += 1\n elif result == 0:\n current_node.num_wins += 0.5\n\n current_node = current_node.parent\n\n # updating the root\n current_node.total_tries += 1\n\n\ndef rollout(current_node: Node) -> int:\n\n # choose random (valid) moves until we get to a terminal state\n current_board = current_node.board\n result = get_utility_3d(current_board)\n player = current_node.player\n\n turn = True\n\n while result is None:\n\n if not turn:\n player = player * -1\n\n next_move_indicies = get_next_move_indicies(current_board)\n current_board = make_move(\n current_board, player, random.choice(next_move_indicies))\n result = get_utility_3d(current_board)\n turn = not turn\n\n return result\n\n\ndef calculate_ucb(node: Node) -> float:\n n = node.total_tries\n if n == 0:\n return math.inf\n\n w = node.num_wins\n c = math.sqrt(4)\n bigN = node.parent.total_tries\n\n return w/n + c * math.sqrt(math.log(bigN) / n)\n\n\ndef generate_children(node: Node) -> None:\n\n next_move_indicies = get_next_move_indicies(node.board)\n\n for move_index in next_move_indicies:\n new_board = make_move(node.board, node.player * -1, move_index)\n child = Node(new_board, node.player * -1, node, move_index)\n node.children.append(child)\n\n\ndef get_best_move_currently(root: Node) -> int:\n\n # find the best ratio of wins to total tries\n ratios = [child.num_wins / child.total_tries for child in root.children]\n best_ratio_index = ratios.index(max(ratios))\n\n return root.children[best_ratio_index].move_played\n\n\ndef find_best_move(board: Tuple[Tuple[Optional[int], ...], ...],\n player: int, best_move: List[int], table: Dict\n ) -> Optional[int]:\n\n num_rollouts = 40\n num_steps = 1000\n\n # best_move[0] = 0\n\n # create root node\n root = Node(board, player * -1)\n\n # create child nodes for all the next possible moves\n generate_children(root)\n\n # rollouts and backprops for each child node to get some initial statistics\n for child in root.children:\n for _ in range(num_rollouts):\n res = rollout(child)\n backprop(child, res)\n\n for _ in range(num_steps):\n\n # select successive child nodes (by ucb) until a leaf node L is reached\n current = root\n while current.children:\n\n # figure out the child with best ucb and choose it\n highest_ucb = 0\n best_child = None\n for child in current.children:\n ucb = calculate_ucb(child)\n if ucb > highest_ucb:\n highest_ucb = ucb\n best_child = child\n current = best_child\n\n # if the leaf ends the game, don't generate children\n utility = get_utility_3d(current.board)\n\n # check if the leaf node L has been visited yet (if total_tries == 0)\n if current.total_tries == 0:\n res = rollout(current)\n backprop(current, res)\n\n # if it has been visited, expand and then randomly pick\n # and also this will never happen until ALL the child have been\n # visited at least once, cause if it hasn't been visited it\n # will get chosen by highest ucb\n elif utility is None:\n generate_children(current)\n random_child = random.choice(current.children)\n res = rollout(random_child)\n backprop(random_child, res)\n\n best_move[0] = get_best_move_currently(root)\n\n return best_move[0]\n", "sub_path": "agent.py", "file_name": "agent.py", "file_ext": "py", "file_size_in_byte": 7331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "typing.Tuple", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 91, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 92, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 92, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 112, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 113, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 128, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 128, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 168, "usage_type": "call"}, {"api_name": "math.inf", "line_number": 178, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 181, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 184, "usage_type": "call"}, {"api_name": "math.log", "line_number": 184, "usage_type": "call"}, {"api_name": "typing.Tuple", "line_number": 206, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 206, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 207, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 207, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 257, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 208, "usage_type": "name"}]} +{"seq_id": "67490221", "text": "import matplotlib\nimport os\nimport cv2\nimport numpy as np\nimport time\nimport recognize_number as rn\n\ndef test():\n #jump(1000)# out of windows\n #jump(100)# a little move\n #jump(1)\n #jump(20)\n #jump(800)\n #jump(900)\n # time ={100,900}\n pass\n\ndef main_process(action,last_reward,terminate=False):\n # process action\n if terminate:\n tap_to_start()\n terminate=False\n\n jump(action)\n time.sleep(2.6)\n # get observer\n observer_path = screencrop()\n observer,goal_crop_path = observer_preprocess(observer_path)\n #cv2.imshow(\"observer\",observer)\n #cv2.waitKey(0)\n #process rewards\n # cut pictureto four part 0,0,0,0\n num = cut_process(goal_crop_path)\n reward=0\n if num[0] == -1:\n reward=last_reward\n terminate = True\n observer = -1\n else:\n reward_now = rn.get_reward(num[1:])\n reward = reward_now - last_reward\n print(reward,terminate)\n return reward, observer,terminate\n\n\n\n\n## cmd\ndef tap_to_start():\n os.system(\"adb shell input tap 570 1570\")\n\n\ndef jump(time):\n os.system(\"adb shell input swipe 19 19 19 19 %s\" % time)\n\n\ndef screencrop():\n paths = os.getcwd()\n save_path = os.path.join(paths,\"test_pic/autojump.png\")\n os.system(\"adb shell screencap -p /sdcard/autojump.png\")\n os.system(\"adb pull /sdcard/autojump.png \"+save_path)\n return save_path\n\n## image process\ndef is_end(img):\n h,w = img.shape\n for i in range(int(h/4)):\n for j in range(int(w/4)):\n if img[i,j] != 0:\n return False\n return True\n\ndef cut_process(goal_pic_path):\n img = cv2.imread(goal_pic_path,-1)\n #print(img.shape)\n #print(img)\n if is_end(img):\n return -1,0,0,0,0\n else:\n div = 8\n return 1,img[:,:div],img[:,div:2*div],img[:,2*div:3*div],img[:,3*div:4*div]\n\ndef observer_preprocess(path):\n img = cv2.imread(path)\n img = cv2.resize(img, (108, 192))\n\n img_crop = img[20:30, 11:43]\n img_crop = cv2.cvtColor(img_crop,cv2.COLOR_BGR2GRAY)\n img_crop = black_white(img_crop)\n\n #cv2.imshow(\"imgs\", img_crop)\n paths = os.getcwd()\n save_path = os.path.join(paths, \"test_pic/goal_crop.jpg\")\n cv2.imwrite(save_path,img_crop)\n return img,save_path\n\ndef black_white(img):\n h,w = img.shape\n for i in range(h):\n for j in range(w):\n if img[i,j] > 128:\n img[i,j] = 255\n else:\n img[i,j] = 0\n return img\n\ndef recognizeGoal(path):\n # this part is used to recognizing goal and return the reward\n pass\n\ndef recognizeEnd(path):\n # this function is used to recognizing whether game is over\n pass\n\ndef jumpgame(action):\n # this function is used to play the game with the \"action\",and\n # return the terminate, observer, reward\n # reword: num\n # observer: a image\n # terminate: a bool value\n observer = 0\n terminate = 0\n reward = 0\n return reward, observer,terminate\ndef get_image(filename):\n path = screencrop()\n img = cv2.imread(path)\n img = cv2.resize(img, (108, 192))\n\n img_crop = img[20:30, 11:19]\n img_crop = cv2.cvtColor(img_crop, cv2.COLOR_BGR2GRAY)\n img_crop = black_white(img_crop)\n\n #cv2.imshow(\"imgs\", img_crop)\n #cv2.waitKey(0)\n paths = os.getcwd()\n save_path = os.path.join(paths, \"./game/number_data/crop_\"+str(filename)+\"_gray.jpg\")\n cv2.imwrite(save_path, img_crop)\n\nif __name__ == \"__main__\":\n #screencrop()\n #cropimage()\n #collectnumber(\"6.png\")\n #test()\n #transfer_gray()\n #analaysis()\n main_process(800,terminate=False,last_reward=0)\n #get_image(6)", "sub_path": "jumpgame.py", "file_name": "jumpgame.py", "file_ext": "py", "file_size_in_byte": 3620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "recognize_number.get_reward", "line_number": 40, "usage_type": "call"}, {"api_name": "os.system", "line_number": 50, "usage_type": "call"}, {"api_name": "os.system", "line_number": 54, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 60, "usage_type": "call"}, {"api_name": "os.system", "line_number": 61, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 74, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 94, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 128, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 131, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 136, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "30124517", "text": "import pandas as pd\nfrom imblearn import combine\nfrom sklearn import metrics, model_selection\nimport xgboost as xgb\nfrom scripts.data_util import build_dataset, train_val_split\n\nfrom scripts.outlier_scores import *\n\nSEED = 123\nNUM_TESTS = 1\n\n\ndef to_dataset(creditcard_data):\n x = creditcard_data.drop(columns=['Time', 'Class'])\n y = creditcard_data['Class']\n return x, y\n\ndef threshold_proba(pred_prob,num=10):\n class_arr = []\n for t in np.linspace(0,1,num):\n cs = [int(x) for x in pred_prob >= t]\n class_arr.append(cs)\n return class_arr\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n\n train_x, train_y, test_x, test_y = build_dataset('../data/old/creditcard_train.csv', '../data/old/creditcard_test.csv',\n scaler='standard')#,\n #generated_frauds_path='../data/gen_frauds10000.csv')\n\n train_x = pd.DataFrame(train_x)\n test_x = pd.DataFrame(test_x)\n # scaler = preprocessing.StandardScaler()\n # scaler.fit(train_x)\n #\n # train_x = scaler.transform(train_x)\n # test_x = scaler.transform(test_x)\n\n outlier_model_combinations = [\n [],\n # [(ZScore, {})],\n # [(PCA, {'m': 1, 'whiten': True})],\n # [(PCA_RE, {'m': 1, 'whiten': True})],\n # [(IF, {})],\n # [(GM, {'m': 1})],\n # [\n # (ZScore, {}),\n # (PCA, {'m': 1, 'whiten': True}),\n # (PCA_RE, {'m': 1, 'whiten': True}),\n # (IF, {}),\n # (GM, {'m': 1})\n # ]\n ]\n\n seeds = np.random.randint(low=np.iinfo(np.int32).max, size=NUM_TESTS)\n history = []\n\n params = {\n 'eta': 0.3,\n 'max_depth': 5,\n 'num_class': 2,\n # 'n_estimators': 200,\n 'learning_rate': 0.1,\n 'objective': 'multi:softprob'\n }\n\n steps = 500\n\n for outlier_models_signatures in outlier_model_combinations:\n outlier_models_names = [model(**params).name for model, params in outlier_models_signatures]\n\n print(f'current outlier models: {outlier_models_names}')\n\n performance_scores = []\n\n for i, seed in enumerate(seeds):\n outlier_models = [outlier_model(**{**params, 'random_state': seed})\n for outlier_model, params in outlier_models_signatures]\n\n train_outlier_scores_data = {}\n test_outlier_scores_data = {}\n\n for outlier_model in outlier_models:\n # fit the outlier model on only the non-fraudulent transactions\n # print(train_y == 0)\n # print(train_x.loc[train_y == 0)])\n\n outlier_model.fit(train_x.loc[train_y == 0])\n train_outlier_scores = outlier_model.score(train_x)\n test_outlier_scores = outlier_model.score(test_x)\n train_outlier_scores_data[outlier_model.name] = train_outlier_scores\n test_outlier_scores_data[outlier_model.name] = test_outlier_scores\n\n train_x, train_y, val_x, val_y = train_val_split(train_x, train_y, stratify=train_y)\n\n train = xgb.DMatrix(train_x, label=train_y)\n test = xgb.DMatrix(test_x, label=test_y)\n val = xgb.DMatrix(val_x, label=val_y)\n\n gs_params = {\n 'max_depth': range(3,15,2),\n 'min_child_weight': range(1,6,2)\n }\n\n classifier = xgb.train(params,train,steps)\n # classifier = xgb.XGBClassifier(seed=seed, n_jobs=4)\n # classifier.fit(train_x_with_scores, train_y)\n\n\n pred_test_y = classifier.predict(test)[:,1]\n pred_val_y = classifier.predict(val)[:, 1]\n # pred_test_y = classifier.predict_proba(test_x_with_scores)[:,1]\n print(pred_test_y)\n\n auc_pr = metrics.average_precision_score(test_y, pred_test_y)\n\n pr_classes = threshold_proba(pred_val_y,51)\n f1s = [metrics.f1_score(val_y, pred_y) for pred_y in pr_classes]\n print(f'MAX F1 {max(f1s)} index: {f1s.index(max(f1s))}')\n\n performance_scores.append({\n 'auc_pr': auc_pr,\n 'f1' : f1s\n })\n\n print(f'[Iteration {i + 1}/{NUM_TESTS}] AUC-PR: {auc_pr:0.4f}')\n\n auc_pr_scores = [score['auc_pr'] for score in performance_scores]\n mean_auc_pr = np.mean(auc_pr_scores)\n auc_pr_std = np.std(auc_pr_scores)\n\n print(f'avg AUC-PR: {mean_auc_pr} (\\u00B1{auc_pr_std})')\n\n print(f\"F1: {performance_scores}\")\n", "sub_path": "scripts/classifier_xgboost.py", "file_name": "classifier_xgboost.py", "file_ext": "py", "file_size_in_byte": 4568, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scripts.data_util.build_dataset", "line_number": 29, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "scripts.data_util.train_val_split", "line_number": 96, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 98, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 99, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 100, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 107, "usage_type": "call"}, {"api_name": "sklearn.metrics.average_precision_score", "line_number": 117, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 117, "usage_type": "name"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 120, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 120, "usage_type": "name"}]} +{"seq_id": "101745947", "text": "import math\nfrom sklearn.preprocessing import Binarizer\nfrom sklearn.preprocessing import Normalizer\n\n#Runs \"simulation_parameters.py\" and keeps its variables [simulation parameters]\nexec(open(\"simulation_parameters.py\").read(), globals())\n\n##########################################################\n#Data Loading\n\n\ndef create_noisy_features(features, labels, noise_std_converted, \n min_pow_cutoff, scaler = None, only_16_bf = False, \n undersampling = 1):\n\n #undersamples the BFs, if wanted\n if only_16_bf:\n mask = np.ones(time_slots*beamformings, dtype=bool)\n for i in range(time_slots*beamformings):\n #DIM 1 = BF, DIM 2 = TS\n if (i//time_slots)%2 == 0:\n mask[i] = False\n features = features[:,mask]\n print(\"[warning: using 16 bfs]\", features.shape)\n \n #undersamples spacially, if wanted\n if undersampling > 1:\n undersampling = int(undersampling) #just in case\n \n mask = np.ones(labels.shape[0], dtype=bool)\n for i in range(labels.shape[0]):\n label_x_scaled = int(labels[i,0] * 400)\n if label_x_scaled % undersampling > 0:\n mask[i] = False\n else:\n label_y_scaled = int(labels[i,1] * 400)\n if label_y_scaled % undersampling > 0:\n mask[i] = False\n\n features = features[mask,:]\n labels = labels[mask,:]\n \n print(\"[warning: undersampling by {0}]\".format(undersampling))\n print(features.shape)\n \n \n #the features here should be in range [0, ~1.2]\n noise = np.random.normal(scale = noise_std_converted, size = features.shape)\n noisy_features = features + noise\n noisy_features[noisy_features < min_pow_cutoff] = 0\n \n \n #removes the entries containing only 0\n mask = np.ones(labels.shape[0], dtype=bool)\n for i in range(labels.shape[0]):\n \n this_samples_sum = np.sum(noisy_features[i,:])\n if this_samples_sum < 0.01:\n mask[i] = False\n \n noisy_features = noisy_features[mask,:]\n noisy_labels = labels[mask,:]\n\n #doublecheck\n assert noisy_features.shape[0] == noisy_labels.shape[0]\n assert noisy_labels.shape[1] == 2\n assert noisy_features.shape[1] == features.shape[1]\n\n #Applies the preprocessing, if needed\n if scaler is not None:\n noisy_features = scaler.fit_transform(noisy_features)\n return([noisy_features, noisy_labels])\n \n \n \n \ndef position_to_class(labels, lateral_partition):\n\n class_indexes = []\n n_classes = lateral_partition ** 2\n \n for i in range(labels.shape[0]):\n \n x_index = int(math.floor(labels[i,0] * lateral_partition))\n if(x_index == lateral_partition): x_index = lateral_partition-1\n \n y_index = int(math.floor(labels[i,1] * lateral_partition))\n if(y_index == lateral_partition): y_index = lateral_partition-1\n \n true_index = (y_index * lateral_partition) + x_index\n \n class_indexes.append(true_index)\n \n class_indexes = np.asarray(class_indexes)\n \n return(class_indexes)\n \n \n\n\n#Loads the dataset, stored as 3 numpy arrays [plus a couple of doublechecks]\nprint(\"Loading the dataset...\", end='', flush=True)\nwith open(preprocessed_file, 'rb') as f:\n features, labels, invalid_slots = pickle.load(f)\n \ninput_size = features.shape[1]\nprint(\" done! Features shape:\", features.shape)\n\n\nassert features.shape[0] == labels.shape[0]\n\nif (removed_invalid_slots == False):\n assert predicted_input_size == input_size\n \n \n#DATA PREPROCESSING [binarize/normalize]:\nif binary_scaler: \n scaler = Binarizer(0.1, copy=False)\n scaler_name = 'binarized'\nelse: \n scaler = Normalizer(copy=False)\n scaler_name = 'normalized'\n \n \n \n#Data Loading\n##########################################################", "sub_path": "ml_training/load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 3956, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "math.floor", "line_number": 84, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Binarizer", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.Normalizer", "line_number": 121, "usage_type": "call"}]} +{"seq_id": "422919229", "text": "import base64\nimport os\n\nfrom settings import CREDENTIAL_PATH\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\n\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = CREDENTIAL_PATH\nDISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'\n\n\nclass GoogleVisionAPI:\n \"\"\"Construct and use the Google Vision API service.\"\"\"\n\n def __init__(self):\n\n self.credentials = GoogleCredentials.get_application_default()\n\n self.service = discovery.build('vision', 'v1', credentials=self.credentials, discoveryServiceUrl=DISCOVERY_URL)\n\n def detect_text(self, img_path):\n \"\"\" Uses the Vision API to detect text in the given file. \"\"\"\n\n request_list = []\n feature_type = 'DOCUMENT_TEXT_DETECTION'\n\n with open(img_path, 'rb') as img_file:\n content_json_obj = {'content': base64.b64encode(img_file.read()).decode('UTF-8')}\n\n feature_json_obj = [{'type': feature_type}]\n\n request_list.append(\n {'image': content_json_obj,\n 'features': feature_json_obj}\n )\n\n request = self.service.images().annotate(\n body={'requests': request_list})\n\n try:\n response = request.execute()\n\n ret_json = response['responses'][0]\n\n return ret_json\n\n except Exception as e2:\n print(\"Key error: %s\" % e2)\n\n\nif __name__ == '__main__':\n\n vision = GoogleVisionAPI()\n\n \"\"\"Call the Vision API on a file and index the results.\"\"\"\n texts = vision.detect_text(['']),\n print(texts)\n", "sub_path": "utils/google_ocr.py", "file_name": "google_ocr.py", "file_ext": "py", "file_size_in_byte": 1604, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "settings.CREDENTIAL_PATH", "line_number": 8, "usage_type": "name"}, {"api_name": "oauth2client.client.GoogleCredentials.get_application_default", "line_number": 17, "usage_type": "call"}, {"api_name": "oauth2client.client.GoogleCredentials", "line_number": 17, "usage_type": "name"}, {"api_name": "googleapiclient.discovery.build", "line_number": 19, "usage_type": "call"}, {"api_name": "googleapiclient.discovery", "line_number": 19, "usage_type": "name"}, {"api_name": "base64.b64encode", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "98172948", "text": "import pymysql\n\ndef run_sql(sql):\n try:\n #获取一个数据库连接,注意如果是UTF-8类型的,需要制定数据库\n conn=pymysql.connect(host='10.9.170.118', user='root', passwd='Zqbxzxh1', db='lottery', port=3306, charset='utf8')\n cur=conn.cursor()#获取一个游标\n cur.execute(sql)\n data=cur.fetchall()\n cur.close() #关闭游标\n conn.commit()\n conn.close()\n return data\n except Exception as ex:\n print(ex)\n return None\n\ndef get_data(num):\n sql = \"select `date`, `time`, `index`, `num1`, `num2`, `num3`, `num4`, `num5`, `sum` from fast_ssc order by `index` desc limit {}\".format(num)\n return run_sql(sql)\n\n\ndata_list = get_data(500000)\ndata_list = [[str(item) for item in one] for one in data_list]\ndata_list = data_list[::-1]\nwith open(\"data.log\", \"w\") as f:\n for item in data_list:\n f.write(\",\".join(item)+\"\\n\")\n", "sub_path": "load_data.py", "file_name": "load_data.py", "file_ext": "py", "file_size_in_byte": 928, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "pymysql.connect", "line_number": 6, "usage_type": "call"}]} +{"seq_id": "539281320", "text": "import traceback\nfrom functools import partial\n\n\nclass HTTPStatus(Exception):\n \"\"\"Base class for all HTTP Exceptions.\n\n :param code: HTTP status code.\n :param text: HTTP status text.\n :param keepheaders: If set, appliation keeps the :attr:`.Response.headers`\n when exception is occured.\n :param headers: Some extra HTTP headers to be added to the\n :attr:`.Response.headers` when exception is raised.\n \"\"\"\n\n def __init__(self, code, text, keepheaders=False, headers=None):\n self.keepheaders = keepheaders\n self.headers = headers or []\n self.status = f'{code} {text}'\n super().__init__(self.status)\n\n def setupresponse(self, response, stacktrace=False):\n response.status = self.status\n body = [self.status]\n if stacktrace:\n body.append(traceback.format_exc())\n\n response.body = '\\r\\n'.join(body)\n\n if not self.keepheaders:\n response.headers.clear()\n\n response.headers += self.headers\n response.type = 'text/plain'\n response.charset = 'utf-8'\n\n\n#: Alias for :class:`.HTTPStatus`\nstatus = HTTPStatus\n\n#: HTTP 400 Bad Request exception factory\nbadrequest = partial(status, 400, 'Bad Request')\n\n#: HTTP 401 Unauthorized exception factory\nunauthorized = partial(status, 401, 'Unauthorized')\n\n#: HTTP 403 Forbidden exception factory\nforbidden = partial(status, 403, 'Forbidden')\n\n#: HTTP 404 Not Found exception factory\nnotfound = partial(status, 404, 'Not Found')\n\n#: HTTP 405 Method Not Allowed exception factory\nmethodnotallowed = partial(status, 405, 'Method Not Allowed')\n\n#: HTTP 409 Conflict exception factory\nconflict = partial(status, 409, 'Conflict')\n\n#: HTTP 410 Gone exception factory\ngone = partial(status, 410, 'Gone')\n\n#: HTTP 412 Precondition Failed exception factory\npreconditionfailed = partial(status, 412, 'Precondition Failed')\n\n#: HTTP 304 Not Modified exception factory\nnotmodified = partial(status, 304, 'Not Modified')\n\n#: HTTP 500 Internal Server Error exception factory\ninternalservererror = partial(status, 500, 'Internal Server Error')\n\n#: HTTP 502 Bad Gateway exception factory\nbadgateway = partial(status, 502, 'Bad Gateway')\n\n\ndef redirect(code, text, location):\n return status(code, text, headers=[('Location', location)])\n\n#: HTTP 301 Moved Permanently exception factory\nmovedpermanently = partial(redirect, 301, 'Moved Permanently')\n\n#: HTTP 302 Found exception factory\nfound = partial(redirect, 302, 'Found')\n\n", "sub_path": "yhttp/statuses.py", "file_name": "statuses.py", "file_ext": "py", "file_size_in_byte": 2513, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "traceback.format_exc", "line_number": 26, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 42, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 45, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 48, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 51, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 54, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 57, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 60, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 63, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 66, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 69, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 72, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 79, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "624249629", "text": "# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.2\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\nimport time\nimport json\nfrom tqdm import tqdm\n\n\ndef call_api(author_id): \n response = requests.get(\"https://www.semanticscholar.org/author/\" + str(author_id))\n\n if response.ok:\n soup = BeautifulSoup(response.text)\n author_natural_name = soup.title.text[:-19]\n author = dict()\n \n for div in soup.find_all(\"div\", class_=\"author-detail-card__stats-row\"):\n label = div.find(\"span\", class_=\"author-detail-card__stats-row__label\")\n\n if label.text == \"Publications\": \n elem = label\n for _ in range(2):\n elem = elem.next\n author[\"publications\"] = int(elem.text.replace(\",\", \"\"))\n\n elif label.text == \"h-index\": \n elem = label\n for _ in range(5):\n elem = elem.next\n author[\"h-index\"] = int(elem.text)\n\n elif label.text == \"Citations\": \n elem = label\n for _ in range(2):\n elem = elem.next\n author[\"citations\"] = int(elem.text.replace(\",\", \"\"))\n for key, value in author.items():\n if value < 0:\n raise ValueError(\"{} has value of {}.\".format(key, value))\n\n return author\n else:\n raise NameError(\"[{}] Could not get respone for authorID {}.\".format(response.status_code, author_id))\n\n\n\nauthor_jsons_path = \"data/semantic_scholar/authors/\"\nauthor_jsons = os.listdir(author_jsons_path)\n\nnot_done = []\nfor idx, jf in enumerate(author_jsons):\n with open(author_jsons_path + jf) as filebuffer: \n author = json.load(filebuffer)\n \n if \"h-index\" not in author.keys() or \"publications\" not in author.keys() or \"citations\" not in author.keys():\n print(jf)\n not_done.append(jf)\n\n\nlen(not_done)\n\nfor jf in tqdm(not_done):\n with open(author_jsons_path + jf) as filebuffer:\n author = json.load(filebuffer)\n \n author_id = os.path.splitext(jf)[0]\n try:\n author_meta = call_api(author_id)\n except Exception as e:\n tqdm.write(\"{}\".format(e))\n continue\n \n author.update(author_meta)\n \n with open(author_jsons_path + jf, \"w\") as filebuffer:\n json.dump(author, filebuffer)\n \n\nwith open(author_jsons_path + \"1491355593.json\") as f:\n print(json.load(f))\n", "sub_path": "archive/authors_from_semantic_scholar/semantic_scholar_remaining.py", "file_name": "semantic_scholar_remaining.py", "file_ext": "py", "file_size_in_byte": 2713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "requests.get", "line_number": 24, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 27, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 62, "usage_type": "call"}, {"api_name": "json.load", "line_number": 67, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 76, "usage_type": "call"}, {"api_name": "json.load", "line_number": 78, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 80, "usage_type": "call"}, {"api_name": "os.path", "line_number": 80, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm.write", "line_number": 84, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 84, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 90, "usage_type": "call"}, {"api_name": "json.load", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "103350545", "text": "\n\"\"\"\nRuns one instance of the environment and optimizes using the Soft Actor\nCritic algorithm. Can use a GPU for the agent (applies to both sample and\ntrain). No parallelism employed, everything happens in one python process; can\nbe easier to debug.\n\nRequires OpenAI gym (and maybe mujoco). If not installed, move on to next\nexample.\n\n\"\"\"\nimport random\nimport numpy as np\n\nfrom rlpyt.samplers.serial.sampler import SerialSampler\nfrom rlpyt.envs.gym import make as gym_make\nfrom rlpyt.algos.qpg.sac import SAC\nfrom rlpyt.agents.qpg.sac_agent import SacAgent\nfrom rlpyt.algos.pg.a2c import A2C\n# from rlpyt.agents.pg.atari import AtariFfAgent\nfrom rlpyt.agents.pg.gaussian import GaussianPgAgent\nfrom rlpyt.agents.pg.categorical import CategoricalPgAgent\nfrom rlpyt.runners.minibatch_rl import MinibatchRlEval\nfrom rlpyt.utils.logging.context import logger_context\n\nimport gym\nimport torch\nfrom torch import nn\nfrom rlpyt.models.mlp import MlpModel\nfrom rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims, to_onehot\nimport torch.nn.functional as F\n\n\n\nclass ModelCls(nn.Module):\n def __init__(self, ob_dim, ac_dim, **kwargs):\n super().__init__()\n self.ob_dim = ob_dim\n self.ac_dim = ac_dim\n self.model = MlpModel(self.ob_dim, [64, 64], output_size=None,\n nonlinearity=torch.nn.Tanh, # torch.nn.Tanh\n )\n self.pi = torch.nn.Linear(64, self.ac_dim)\n self.model2 = MlpModel(self.ob_dim, [64, 64], output_size=None,\n nonlinearity=torch.nn.ReLU, # torch.nn.Tanh,\n )\n self.value = torch.nn.Linear(64, 1)\n\n def forward(self, observation, prev_action, prev_reward):\n \"\"\"Feedforward layers process as [T*B,H]. Return same leading dims as\n input, can be [T,B], [B], or [].\"\"\"\n # observation = observation.squeeze(-1)\n # print(\"ob shape: {}\".format(observation.shape))\n # ob2 = observation[1:]\n # ob1 = to_onehot(observation[0], 100)\n # print(\"ob1 shape: {}, ob2 shape {}\".format(ob1.shape, ob2.shape))\n # observation = torch.cat([ob1, ob2], dim=-1)\n # print(\"After ob shape: {}\".format(observation.shape))\n # print(\"ob onehot:\", observation)\n input = observation.type(torch.float) # Expect torch.uint8 inputs\n\n # Infer (presence of) leading dimensions: [T,B], [B], or [].\n lead_dim, T, B, input_shape = infer_leading_dims(input, 1)\n\n fc_out = self.model(input.view(T * B, *input_shape)) # Fold if T dimension.\n pi = F.softmax(self.pi(fc_out), dim=-1)\n\n fc_out = self.model2(input.view(T * B, *input_shape)) # Fold if T dimension.\n v = self.value(fc_out).squeeze(-1)\n # Restore leading dimensions: [T,B], [B], or [], as input.\n pi, v = restore_leading_dims((pi, v), lead_dim, T, B)\n return pi, v\n\n\ndef build_and_train(env_id=\"Hopper-v3\", run_ID=0, cuda_idx=None):\n seed = 1\n # random.seed(seed)\n # np.random.seed(seed)\n # torch.manual_seed(seed)\n # 但是torch.cuda.manual_seed(seed)在没有gpu时也可调用,这样写没什么坏处\n # torch.cuda.manual_seed(seed)\n # cuDNN在使用deterministic模式时(下面两行),可能会造成性能下降(取决于model)\n # torch.backends.cudnn.deterministic = True\n # torch.backends.cudnn.benchmark = False\n\n batch_steps = 1e3\n log_steps = batch_steps\n n_steps = 30 * batch_steps\n eval_steps = batch_steps\n eval_trajs = 100\n lr = 5e-3\n animate = True\n sampler = SerialSampler(\n EnvCls=gym_make,\n env_kwargs=dict(id=env_id),\n eval_env_kwargs=dict(id=env_id),\n batch_T=int(batch_steps), # One time-step per sampler iteration.\n batch_B=1, # One environment (i.e. sampler Batch dimension).\n max_decorrelation_steps=0,\n eval_n_envs=1,\n eval_max_steps=int(eval_steps),\n eval_max_trajectories=eval_trajs,\n animate=animate,\n )\n\n # env = sampler.collector.envs[0]\n env = gym.make(env_id)\n discrete = isinstance(env.action_space, gym.spaces.Discrete)\n ob_dim = env.observation_space.shape[0]\n ac_dim = env.action_space.n if discrete else env.action_space.shape[0]\n print(\"discrete\", discrete, \"ob dim\", ob_dim, \"ac_dim\", ac_dim)\n # algo = SAC() # Run with defaults.\n # agent = SacAgent()\n adv_norm = True\n algo = A2C(learning_rate=lr,\n discount=0.99, # 0.99\n gae_lambda=0.99,\n value_loss_coeff=0.5, # 0.5\n entropy_loss_coeff=0.00, # 0.01\n normalize_advantage=adv_norm,\n clip_grad_norm=1e6,\n )\n # algo.bootstrap_value = False\n model_kwargs = dict(ob_dim=ob_dim, ac_dim=ac_dim)\n agent = CategoricalPgAgent(\n ModelCls=ModelCls,\n model_kwargs=model_kwargs,\n initial_model_state_dict=None,\n )\n runner = MinibatchRlEval(\n algo=algo,\n agent=agent,\n sampler=sampler,\n n_steps=n_steps,\n log_interval_steps=log_steps,\n affinity=dict(cuda_idx=cuda_idx),\n seed=seed,\n )\n config = dict(env_id=env_id)\n name = \"a2c_\" + env_id\n log_dir = \"example_2\"\n with logger_context(log_dir, run_ID, name, config):\n runner.train()\n\n # Close gym env, when using env.render()\n for env in sampler.collector.envs:\n env.close()\n\n traj_infos, eval_time = runner.evaluate_agent(n_steps)\n # runner.log_diagnostics(50e3, traj_infos, eval_time)\n rews = [x.Return for x in traj_infos]\n avg_rew = np.mean(rews)\n std_rew = np.std(rews)\n print(\"length of trajs: {}, avg_rew {}, std_rew {}\".format(\n len(traj_infos), avg_rew, std_rew))\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--env_id', help='environment ID', default='CartPole-v0')\n # parser.add_argument('--env_id', help='environment ID', default='MountainCar-v0')\n # parser.add_argument('--env_id', help='environment ID', default='Hopper-v3')\n parser.add_argument('--run_ID', help='run identifier (logging)', type=int, default=0)\n parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=None)\n args = parser.parse_args()\n build_and_train(\n env_id=args.env_id,\n run_ID=args.run_ID,\n cuda_idx=args.cuda_idx,\n )\n", "sub_path": "examples/example_2_mod.py", "file_name": "example_2_mod.py", "file_ext": "py", "file_size_in_byte": 6476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.nn.Module", "line_number": 35, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "rlpyt.models.mlp.MlpModel", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rlpyt.models.mlp.MlpModel", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 45, "usage_type": "attribute"}, {"api_name": "torch.nn.Linear", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.float", "line_number": 60, "usage_type": "attribute"}, {"api_name": "rlpyt.utils.tensor.infer_leading_dims", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 66, "usage_type": "name"}, {"api_name": "rlpyt.utils.tensor.restore_leading_dims", "line_number": 71, "usage_type": "call"}, {"api_name": "rlpyt.samplers.serial.sampler.SerialSampler", "line_number": 93, "usage_type": "call"}, {"api_name": "rlpyt.envs.gym.make", "line_number": 94, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 107, "usage_type": "call"}, {"api_name": "gym.spaces", "line_number": 108, "usage_type": "attribute"}, {"api_name": "rlpyt.algos.pg.a2c.A2C", "line_number": 115, "usage_type": "call"}, {"api_name": "rlpyt.agents.pg.categorical.CategoricalPgAgent", "line_number": 125, "usage_type": "call"}, {"api_name": "rlpyt.runners.minibatch_rl.MinibatchRlEval", "line_number": 130, "usage_type": "call"}, {"api_name": "rlpyt.utils.logging.context.logger_context", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 153, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 160, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 160, "usage_type": "attribute"}]} +{"seq_id": "257853320", "text": "import datetime\n\ncont1=0\ncont=0\n\natual=int(datetime.date.today().year)\n\nfor ano in range(0,7):\n ano1=int(input('Digite o seu ano de nascimento: '))\n if ano1>atual:\n ano1=int(input('Ano invalido, colocar novamente:'))\n if atual-ano1>=18:\n cont+=1\n else:\n cont1+=1\nprint(f'{cont} atingiram a maioridade')\nprint(f'{cont1} nao atingiram a maioridade')\n", "sub_path": "#045.1-Estrutora de Repetição(for)/#054 - Grupo da Maioridade.py", "file_name": "#054 - Grupo da Maioridade.py", "file_ext": "py", "file_size_in_byte": 381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "datetime.date.today", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 6, "usage_type": "attribute"}]} +{"seq_id": "356724174", "text": "#!/usr/bin/env python\n\n\"\"\"\n Copyright 2021 Esri\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.​\n\"\"\"\n\nimport sys, json, argparse\n\ndef _arg_parser():\n parser = argparse.ArgumentParser(description=\"Update properties for App Gateway\")\n parser.add_argument(\"--agpf\", default=None, help=\"App Gateway Properties Temp File Name\")\n parser.add_argument(\"--tmpf\", default=None, help=\"Template Parameters File Name\")\n return parser.parse_args()\n\ndef delete_keys_from_dict(d, to_delete):\n if isinstance(to_delete, str):\n to_delete = [to_delete]\n if isinstance(d, dict):\n for single_to_delete in set(to_delete):\n if single_to_delete in d:\n del d[single_to_delete]\n for k, v in d.items():\n delete_keys_from_dict(v, to_delete)\n elif isinstance(d, list):\n for i in d:\n delete_keys_from_dict(i, to_delete)\n return d\n\ndef _main(args):\n ag = json.load(open(args.agpf))\n params = json.load(open(args.tmpf))\n isBaseDeployment = False if 'federateSite' in params['parameters'].keys() else True\n delete_keys_from_dict(ag,\"resourceGroup\")\n delete_keys_from_dict(ag,\"provisioningState\")\n \n del ag['operationalState']\n #sslCertificatesArrayList = ag['sslCertificates']\n #for cert in sslCertificatesArrayList:\n # del cert['properties']['publicCertData'] ##publicCertData\n # if cert['name'] == \"frontendCert\":\n # cert['properties']['data'] =\"[parameters('sslCertificateData')]\"\n # cert['properties']['password'] =\"[parameters('sslCertificatePassword')]\"\n if isBaseDeployment:\n deploymentPrefix = params['parameters']['deploymentPrefix']['value']\n \n trustedRootCertificateArrayList = ag['trustedRootCertificates']\n for cert in trustedRootCertificateArrayList:\n if cert['name'] == \"serverBackendSSLCert\": \n cert['properties']['data'] = \"[split(reference(concat('generateSSLCertificatesCustomExtension-',deployment().name),'2018-05-01').outputs.instanceView.value.substatuses[0].message, '###DATA###')[0]]\"\n if cert['name'] == \"portalBackendSSLCert\": \n cert['properties']['data'] = \"[split(reference(concat('generateSSLCertificatesCustomExtension-',deployment().name),'2018-05-01').outputs.instanceView.value.substatuses[0].message, '###DATA###')[1]]\"\n ag['trustedRootCertificates'] = trustedRootCertificateArrayList\n\n backendAddressPoolsArrayList = ag['backendAddressPools']\n for backendPool in backendAddressPoolsArrayList:\n if backendPool['name'] == (deploymentPrefix + \"ServerBackendPool\"):\n backendPool['properties'] = {\n \"copy\": [\n {\n \"name\": \"backendAddresses\",\n \"count\": \"[variables('numberOfServerVirtualMachines')]\",\n \"input\": {\n \"fqdn\": \"[concat( variables('serverVirtualMachineNames')[copyIndex('backendAddresses')], '.', if(equals(string(parameters('joinWindowsDomain')), 'True'),parameters('windowsDomainName'), reference(concat(variables('serverVirtualMachineNames')[copyIndex('backendAddresses')],'-',variables('nicName'))).dnsSettings.internalDomainNameSuffix))]\"\n }\n }\n ]\n }\n if backendPool['name'] == (deploymentPrefix + \"PortalBackendPool\"):\n backendPool['properties'] = {\n \"copy\": [\n {\n \"name\": \"backendAddresses\",\n \"count\": \"[variables('numberOfPortalVirtualMachines')]\",\n \"input\": {\n \"fqdn\": \"[concat( variables('portalVirtualMachineNames')[copyIndex('backendAddresses')], '.', if(equals(string(parameters('joinWindowsDomain')), 'True'),parameters('windowsDomainName'), reference(concat(variables('portalVirtualMachineNames')[copyIndex('backendAddresses')],'-',variables('nicName'))).dnsSettings.internalDomainNameSuffix))]\"\n }\n }\n ]\n }\n ag['backendAddressPools'] = backendAddressPoolsArrayList\n else:\n securityTagOption = 'Federated' if params['parameters']['federateSite'] is True else 'StandAlone'\n serverRole = params['parameters']['serverRole']['value']\n serverContext = params['parameters']['serverContext']['value']\n geoeventServerContext = params['parameters']['geoeventServerContext']['value'] if serverRole == \"GeoEventServer\" else None\n\n trustedRootCertificateArrayList = ag['trustedRootCertificates']\n certCheck = False\n for cert in trustedRootCertificateArrayList:\n if cert['name'] == (serverContext + \"-\" + securityTagOption + \"ServerBackendSSLCert\"): \n cert['properties']['data'] = \"[split(reference(concat('generateSSLCertificatesCustomExtension-',deployment().name),'2018-05-01').outputs.instanceView.value.substatuses[0].message, '###DATA###')[0]]\"\n certCheck = True\n break\n if not certCheck:\n serverTrustedRootCertificate = {\n \"name\":\"[variables('serverBackendSSLCertName')]\",\n \"properties\":{ \n \"data\":\"[split(reference(concat('generateSSLCertificatesCustomExtension-',deployment().name),'2018-05-01').outputs.instanceView.value.substatuses[0].message, '###DATA###')[0]]\"\n }\n }\n trustedRootCertificateArrayList.append(serverTrustedRootCertificate)\n ag['trustedRootCertificates'] = trustedRootCertificateArrayList\n\n backendAddressPoolsArrayList = ag['backendAddressPools']\n backendPoolCheck = False\n for backendPool in backendAddressPoolsArrayList:\n if backendPool['name'] == (serverContext + \"-\" + securityTagOption + \"ServerBackendPool\"):\n backendPool['properties'] = {\n \"copy\": [\n {\n \"name\":\"backendAddresses\",\n \"count\":\"[variables('numberOfVirtualMachines')]\",\n \"input\":{\n \"fqdn\":\"[concat( variables('virtualMachineNames')[copyIndex('backendAddresses')], '.', if(equals(string(parameters('joinWindowsDomain')), 'True'),parameters('windowsDomainName'), reference(concat(variables('virtualMachineNames')[copyIndex('backendAddresses')],'-',variables('nicName'))).dnsSettings.internalDomainNameSuffix))]\"\n } \n }\n ]\n }\n backendPoolCheck = True\n break\n if not backendPoolCheck:\n serverBackendAddressPool ={\n \"name\":\"[variables('serverBackendPoolName')]\",\n \"properties\":{\n \"copy\": [\n {\n \"name\":\"backendAddresses\",\n \"count\":\"[variables('numberOfVirtualMachines')]\",\n \"input\":{\n \"fqdn\":\"[concat( variables('virtualMachineNames')[copyIndex('backendAddresses')], '.', if(equals(string(parameters('joinWindowsDomain')), 'True'),parameters('windowsDomainName'), reference(concat(variables('virtualMachineNames')[copyIndex('backendAddresses')],'-',variables('nicName'))).dnsSettings.internalDomainNameSuffix))]\"\n } \n }\n ]\n }\n }\n backendAddressPoolsArrayList.append(serverBackendAddressPool)\n ag['backendAddressPools'] = backendAddressPoolsArrayList\n\n backendHttpSettingsArrayList = ag['backendHttpSettingsCollection']\n if not any(x for x in backendHttpSettingsArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"ServerHttpsSetting\")):\n serverBackendHttpSetting = {\n \"name\":\"[variables('serverBackendHttpsSettingName')]\",\n \"properties\":{\n \"port\": (11443 if serverRole == \"NotebookServer\" else (20443 if serverRole == \"MissionServer\" else 6443)),\n \"protocol\":\"Https\",\n \"cookieBasedAffinity\":\"Disabled\",\n \"connectionDraining\":{\n \"enabled\":True,\n \"drainTimeoutInSec\":60\n },\n \"pickHostNameFromBackendAddress\":True,\n \"path\":\"/arcgis/\",\n \"requestTimeout\": (900 if serverRole == \"NotebookServer\" else 180),\n \"probe\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/probes/', variables('serverBackendProbeName'))]\"\n },\n \"trustedRootCertificates\":[\n {\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/trustedRootCertificates/', variables('serverBackendSSLCertName'))]\"\n }\n ]\n }\n }\n backendHttpSettingsArrayList.append(serverBackendHttpSetting)\n \n if serverRole == \"GeoEventServer\":\n if not any(x for x in backendHttpSettingsArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"GeoeventServerHttpsSetting\")):\n geoeventServerBackendHttpSetting = {\n \"name\":\"[variables('geoeventServerBackendHttpsSettingName')]\",\n \"properties\":{\n \"port\": 6143,\n \"protocol\":\"Https\",\n \"cookieBasedAffinity\":\"Disabled\",\n \"connectionDraining\":{\n \"enabled\":True,\n \"drainTimeoutInSec\":60\n },\n \"pickHostNameFromBackendAddress\":True,\n \"path\":\"/geoevent/\",\n \"requestTimeout\":180,\n \"probe\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/probes/', variables('geoeventServerProbeName'))]\"\n },\n \"trustedRootCertificates\":[\n {\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/trustedRootCertificates/', variables('serverBackendSSLCertName'))]\"\n }\n ]\n }\n }\n backendHttpSettingsArrayList.append(geoeventServerBackendHttpSetting)\n if not any(x for x in backendHttpSettingsArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"WSGeoeventServerHttpsSetting\")):\n wsGeoeventServerBackendHttpSetting = {\n \"name\":\"[variables('wsGeoeventServerBackendHttpsSettingName')]\",\n \"properties\":{\n \"port\": 6143,\n \"protocol\":\"Https\",\n \"cookieBasedAffinity\":\"Disabled\",\n \"connectionDraining\":{\n \"enabled\":True,\n \"drainTimeoutInSec\":60\n },\n \"pickHostNameFromBackendAddress\":True,\n \"path\":\"/arcgis/\",\n \"requestTimeout\":180,\n \"probe\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/probes/', variables('geoeventServerProbeName'))]\"\n },\n \"trustedRootCertificates\":[\n {\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/trustedRootCertificates/', variables('serverBackendSSLCertName'))]\"\n }\n ]\n }\n }\n backendHttpSettingsArrayList.append(wsGeoeventServerBackendHttpSetting)\n \n if serverRole == \"MissionServer\":\n if not any(x for x in backendHttpSettingsArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"WSMissionServerHttpsSetting\")):\n wsMissionServerBackendHttpSetting = {\n \"name\":\"[variables('wsMissionServerBackendHttpsSettingName')]\",\n \"properties\":{\n \"port\": 20301,\n \"protocol\":\"Https\",\n \"cookieBasedAffinity\":\"Disabled\",\n \"connectionDraining\":{\n \"enabled\":True,\n \"drainTimeoutInSec\":60\n },\n \"pickHostNameFromBackendAddress\":True,\n \"path\":\"/arcgis/\",\n \"requestTimeout\":180,\n \"probe\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/probes/', variables('serverBackendProbeName'))]\"\n },\n \"trustedRootCertificates\":[\n {\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/trustedRootCertificates/', variables('serverBackendSSLCertName'))]\"\n }\n ]\n }\n }\n backendHttpSettingsArrayList.append(wsMissionServerBackendHttpSetting)\n\n ag['backendHttpSettingsCollection'] = backendHttpSettingsArrayList\n\n urlPathMapArrayList = ag['urlPathMaps'][0]['properties']['pathRules']\n if not any(x for x in urlPathMapArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"ServerPathRule\")):\n pathRule = {\n \"name\":\"[variables('serverPathRuleName')]\",\n \"properties\":{\n \"paths\":[\n \"[concat('/', parameters('serverContext'), '/*')]\"\n ],\n \"backendAddressPool\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendAddressPools/',variables('serverBackendPoolName'))]\"\n },\n \"backendHttpSettings\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendHttpSettingsCollection/', variables('serverBackendHttpsSettingName'))]\"\n },\n \"rewriteRuleSet\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/rewriteRuleSets/',variables('serverRewriteRuleSetName'))]\"\n }\n }\n }\n urlPathMapArrayList.append(pathRule)\n \n if serverRole == \"GeoEventServer\":\n if not any(x for x in urlPathMapArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"GeoeventServerPathRule\")):\n geoeventPathRule = {\n \"name\":\"[variables('geoeventServerPathRuleName')]\",\n \"properties\":{\n \"paths\":[\n \"[concat('/', parameters('geoeventServerContext'), '/*')]\"\n ],\n \"backendAddressPool\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendAddressPools/',variables('serverBackendPoolName'))]\"\n },\n \"backendHttpSettings\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendHttpSettingsCollection/', variables('geoeventServerBackendHttpsSettingName'))]\"\n },\n \"rewriteRuleSet\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/rewriteRuleSets/',variables('geoeventServerRewriteRuleSetName'))]\"\n }\n }\n }\n urlPathMapArrayList.append(geoeventPathRule)\n if not any(x for x in urlPathMapArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"WSGeoeventServerPathRule\")):\n wsGeoeventPathRule = {\n \"name\":\"[variables('wsGeoeventServerPathRuleName')]\",\n \"properties\":{\n \"paths\":[\n \"[concat('/', parameters('geoeventServerContext'), 'wss/*')]\"\n ],\n \"backendAddressPool\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendAddressPools/',variables('serverBackendPoolName'))]\"\n },\n \"backendHttpSettings\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendHttpSettingsCollection/', variables('wsGeoeventServerBackendHttpsSettingName'))]\"\n },\n \"rewriteRuleSet\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/rewriteRuleSets/',variables('wsGeoeventServerRewriteRuleSetName'))]\"\n }\n }\n }\n urlPathMapArrayList.append(wsGeoeventPathRule)\n\n if serverRole == \"MissionServer\":\n if not any(x for x in urlPathMapArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"WSMissionServerPathRule\")):\n wsMissionPathRule = {\n \"name\":\"[variables('wsMissionServerPathRuleName')]\",\n \"properties\":{\n \"paths\":[\n \"[concat('/', parameters('serverContext'), 'wss/*')]\"\n ],\n \"backendAddressPool\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendAddressPools/',variables('serverBackendPoolName'))]\"\n },\n \"backendHttpSettings\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/backendHttpSettingsCollection/', variables('wsMissionServerBackendHttpsSettingName'))]\"\n },\n \"rewriteRuleSet\":{\n \"id\":\"[concat(resourceId(parameters('appGatewayResourceGroupName'),'Microsoft.Network/applicationGateways', parameters('appGatewayName')), '/rewriteRuleSets/',variables('wsMissionServerRewriteRuleSetName'))]\"\n }\n }\n }\n urlPathMapArrayList.append(wsMissionPathRule)\n\n ag['urlPathMaps'][0]['properties']['pathRules'] = urlPathMapArrayList\n \n probesArrayList = ag[\"probes\"]\n if not any(x for x in probesArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"ServerProbeName\")):\n probe = {\n \"name\":\"[variables('serverBackendProbeName')]\",\n \"properties\":{\n \"protocol\":\"Https\",\n \"path\":\"/arcgis/rest/info/healthcheck\",\n \"interval\":30,\n \"timeout\":30,\n \"unhealthyThreshold\":3,\n \"pickHostNameFromBackendHttpSettings\":True,\n \"minServers\":0,\n \"match\":{\n \"statusCodes\":[\"200-404\"]\n }\n }\n }\n if serverRole == \"MissionServer\":\n probe.properties[\"port\"] = 20443\n probesArrayList.append(probe)\n\n if serverRole == \"GeoEventServer\":\n if not any(x for x in probesArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"GeoeventServerProbeName\")):\n probe = {\n \"name\":\"[variables('geoeventServerProbeName')]\",\n \"properties\":{\n \"protocol\":\"Https\",\n \"path\":\"/geoevent/manager\",\n \"interval\":30,\n \"timeout\":30,\n \"unhealthyThreshold\":3,\n \"pickHostNameFromBackendHttpSettings\":True,\n \"minServers\":0,\n \"match\":{\n \"statusCodes\":[\"200-399\"]\n }\n }\n }\n probesArrayList.append(probe)\n ag['probes'] = probesArrayList\n \n rewriteRuleSetArrayList = ag['rewriteRuleSets']\n if not any(x for x in rewriteRuleSetArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"ServerRewriteRuleSet\")):\n rewriteRuleSet = {\n \"name\":\"[variables('serverRewriteRuleSetName')]\",\n \"properties\":{\n \"rewriteRules\":[\n {\n \"ruleSequence\":50,\n \"name\":\"XForwardedHostRewrite\",\n \"conditions\":[],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[\n {\n \"headerName\":\"X-Forwarded-Host\",\n \"headerValue\":\"{http_req_host}\"\n }\n ],\n \"responseHeaderConfigurations\":[]\n }\n },\n {\n \"ruleSequence\":100,\n \"name\":\"ServerRewriteRule\",\n \"conditions\":[{\n \"variable\" : \"http_resp_Location\",\n \"pattern\" : r\"[concat('(https?):\\/\\/[^\\/]+:11443\\/(?:arcgis|',parameters('serverContext'),')(.*)$')]\" if serverRole == \"NotebookServer\" else (\"[concat('(https?):\\/\\/[^\\/]+:20443\\/(?:arcgis|',parameters('serverContext'),')(.*)$')]\" if serverRole == \"MissionServer\" else r\"[concat('(https?):\\/\\/[^\\/]+:6443\\/(?:arcgis|',parameters('serverContext'),')(.*)$')]\" ),\n \"ignoreCase\" : True,\n \"negate\" : False\n }],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[],\n \"responseHeaderConfigurations\":[\n {\n \"headerName\":\"RewriteLocationValue\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('serverContext'),'{http_resp_Location_2}')]\"\n },\n {\n \"headerName\":\"Location\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('serverContext'),'{http_resp_Location_2}')]\"\n }\n ]\n }\n }\n ]\n }\n }\n rewriteRuleSetArrayList.append(rewriteRuleSet)\n if serverRole == \"GeoEventServer\":\n if not any(x for x in rewriteRuleSetArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"GeoeventServerRewriteRuleSet\")):\n geoeventRewriteRuleSet = {\n \"name\":\"[variables('geoeventServerRewriteRuleSetName')]\",\n \"properties\":{\n \"rewriteRules\":[\n {\n \"ruleSequence\":50,\n \"name\":\"XForwardedHostRewrite\",\n \"conditions\":[],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[\n {\n \"headerName\":\"X-Forwarded-Host\",\n \"headerValue\":\"{http_req_host}\"\n }\n ],\n \"responseHeaderConfigurations\":[]\n }\n },\n {\n \"ruleSequence\":100,\n \"name\":\"geoeventServerRewriteRule\",\n \"conditions\":[{\n \"variable\" : \"http_resp_Location\",\n \"pattern\" : r\"[concat('(https?):\\/\\/[^\\/]+:6143\\/(?:geoevent|',parameters('geoeventServerContext'),')(.*)$')]\",\n \"ignoreCase\" : True,\n \"negate\" : False\n }],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[],\n \"responseHeaderConfigurations\":[\n {\n \"headerName\":\"RewriteLocationValue\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('geoeventServerContext'),'{http_resp_Location_2}')]\"\n },\n {\n \"headerName\":\"Location\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('geoeventServerContext'),'{http_resp_Location_2}')]\"\n }\n ]\n }\n }\n ]\n }\n }\n rewriteRuleSetArrayList.append(geoeventRewriteRuleSet)\n if not any(x for x in rewriteRuleSetArrayList if x['name'] == (geoeventServerContext + \"-\" + securityTagOption + \"WSGeoeventServerRewriteRuleSet\")):\n wsGeoeventRewriteRuleSet = {\n \"name\":\"[variables('wsGeoeventServerRewriteRuleSetName')]\",\n \"properties\":{\n \"rewriteRules\":[\n {\n \"ruleSequence\":50,\n \"name\":\"XForwardedHostRewrite\",\n \"conditions\":[],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[\n {\n \"headerName\":\"X-Forwarded-Host\",\n \"headerValue\":\"{http_req_host}\"\n }\n ],\n \"responseHeaderConfigurations\":[]\n }\n },\n {\n \"ruleSequence\":100,\n \"name\":\"WSGeoeventServerRewriteRule\",\n \"conditions\":[{\n \"variable\" : \"http_resp_Location\",\n \"pattern\" : r\"[concat('(wss?):\\/\\/[^\\/]+:6143\\/(?:arcgis|',parameters('geoeventServerContext'),')(.*)$')]\",\n \"ignoreCase\" : True,\n \"negate\" : False\n }],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[],\n \"responseHeaderConfigurations\":[\n {\n \"headerName\":\"RewriteLocationValue\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('geoeventServerContext'),'{http_resp_Location_2}')]\"\n },\n {\n \"headerName\":\"Location\",\n \"headerValue\": \"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('geoeventServerContext'),'wss','{http_resp_Location_2}')]\"\n }\n ]\n }\n }\n ]\n }\n }\n rewriteRuleSetArrayList.append(wsGeoeventRewriteRuleSet)\n if serverRole == \"MissionServer\":\n if not any(x for x in rewriteRuleSetArrayList if x['name'] == (serverContext + \"-\" + securityTagOption + \"WSMissionServerRewriteRuleSet\")):\n wsMissionRewriteRuleSet = {\n \"name\":\"[variables('wsMissionServerRewriteRuleSetName')]\",\n \"properties\":{\n \"rewriteRules\":[\n {\n \"ruleSequence\":50,\n \"name\":\"XForwardedHostRewrite\",\n \"conditions\":[],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[\n {\n \"headerName\":\"X-Forwarded-Host\",\n \"headerValue\":\"{http_req_host}\"\n }\n ],\n \"responseHeaderConfigurations\":[]\n }\n },\n {\n \"ruleSequence\":100,\n \"name\":\"WSMissionServerRewriteRule\",\n \"conditions\":[{\n \"variable\" : \"http_resp_Location\",\n \"pattern\" : r\"[concat('(wss?):\\/\\/[^\\/]+:20301\\/(?:arcgis|',parameters('serverContext'),')(.*)$')]\",\n \"ignoreCase\" : True,\n \"negate\" : False\n }],\n \"actionSet\":{\n \"requestHeaderConfigurations\":[],\n \"responseHeaderConfigurations\":[\n {\n \"headerName\":\"RewriteLocationValue\",\n \"headerValue\":\"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('serverContext'),'{http_resp_Location_2}')]\"\n },\n {\n \"headerName\":\"Location\",\n \"headerValue\": \"[concat('{http_resp_Location_1}://{http_req_host}/',parameters('serverContext'),'wss','{http_resp_Location_2}')]\"\n }\n ]\n }\n }\n ]\n }\n }\n rewriteRuleSetArrayList.append(wsMissionRewriteRuleSet)\n ag['rewriteRuleSets'] = rewriteRuleSetArrayList\n print(json.dumps(ag, indent=4))\nif __name__ == \"__main__\":\n sys.exit(_main(_arg_parser()))", "sub_path": "Releases/10.9.1/getAppGatewayObject.py", "file_name": "getAppGatewayObject.py", "file_ext": "py", "file_size_in_byte": 34016, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 19, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "json.load", "line_number": 40, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 569, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 571, "usage_type": "call"}]} +{"seq_id": "113454195", "text": "\"\"\":mod:`mulre.web` --- Mulre web\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\"\"\"\nfrom flask import Flask\n\nfrom . import stream, tag, user, yarn\nfrom .db import setup_session\nfrom .util import MethodRewriteMiddleware\n\n\ndef create_app(config):\n \"\"\"The application factory.\n\n :param config: The instance relative configuration file to use.\n :returns: A Mulre Flask app.\n :rtype: :class:`flask.Flask`\n\n \"\"\"\n app = Flask(__name__, instance_relative_config=True)\n app.wsgi_app = MethodRewriteMiddleware(app.wsgi_app)\n app.config.from_pyfile(config)\n setup_session(app)\n app.register_blueprint(stream.bp, url_prefix='/streams')\n app.register_blueprint(tag.bp, url_prefix='/tags')\n app.register_blueprint(user.bp)\n app.register_blueprint(yarn.bp, url_prefix='/yarns')\n return app\n", "sub_path": "mulre/web/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "flask.Flask", "line_number": 20, "usage_type": "call"}, {"api_name": "util.MethodRewriteMiddleware", "line_number": 21, "usage_type": "call"}, {"api_name": "db.setup_session", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "549207940", "text": "import matplotlib.pyplot as plt\nimport numpy as np\n\nplt.rcParams[\"font.family\"] = \"serif\"\ndata_path=\"H:/Dropbox/Research/Experiment/Results/Paper1/StrawLit.csv\"\ntypes = ['int','f8','int','f8']\ndata=np.genfromtxt(data_path,dtype=types,names=True, delimiter=',')\n\nplt.scatter(data['McCabePerp_year'],data['McCabePerp'])\nplt.scatter(data['McCabePara_year'],data['McCabePara'])\n\nplt.axis([1990,2010,0.02,0.2])\n\nplt.xlabel(\"Year\")\nplt.ylabel(\"k (W/mK)\")\n\nplt.suptitle(\"State-of-the-art in thermal conductivity measurement of Straw\")\nplt.show()\n", "sub_path": "SpyderPy/DataVisualisation/plot_StrawLiterature.py", "file_name": "plot_StrawLiterature.py", "file_ext": "py", "file_size_in_byte": 539, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "matplotlib.pyplot.rcParams", "line_number": 4, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "numpy.genfromtxt", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 12, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 12, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "482810431", "text": "from django.core import mail\nfrom rest_framework import permissions\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .models import Contact\nfrom realest_estate.setting.local_settings import EMAIL_HOST_USER\n\nclass ContactCreateView(APIView):\n permission_classes = (permissions.AllowAny, )\n def post(self, request, format=None):\n data = self.request.data\n try:\n email_message = (f\"Name: {data['name']}\\n\"\n f\"Email: {data['email']}\\n\\n\"\n f\"Message:\\n {data['message']}\")\n mail.send_mail(data['subject'],\n email_message,\n EMAIL_HOST_USER,\n ['lolopaw24@gmail.com'],\n fail_silently=False)\n contact = Contact(name=data['name'], email=data['email'],\n subject=data['subject'], message=data['message'])\n contact.save()\n return Response({'success': 'Message sent successfully'})\n except:\n return Response({'error': 'Message failed to send'})\n", "sub_path": "backend/contacts/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1155, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "rest_framework.views.APIView", "line_number": 8, "usage_type": "name"}, {"api_name": "rest_framework.permissions.AllowAny", "line_number": 9, "usage_type": "attribute"}, {"api_name": "rest_framework.permissions", "line_number": 9, "usage_type": "name"}, {"api_name": "django.core.mail.send_mail", "line_number": 16, "usage_type": "call"}, {"api_name": "realest_estate.setting.local_settings.EMAIL_HOST_USER", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.core.mail", "line_number": 16, "usage_type": "name"}, {"api_name": "models.Contact", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "217312073", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 14:29:08 2020\n\n@author: Chris\n\"\"\"\n\nimport networkx as nx\n\nG = nx.Graph()\n\nlocation_list = [('FB', 1), ('PB', 2), ('VEB', 3), ('BB', 4), ('CB', 5), ('VOB', 6), \n ('RFK', 7), ('IK', 8), ('CK', 9), ('HK', 10), ('SK', 11), ('RMK', 12)]\n\nfor loc in location_list:\n G.add_node(loc[0],location=loc[1])\n\npermlist = [('IK', 'CK'), ('IK', 'HK'), ('HK', 'RFK'),('RMK', 'HK')]\nG.add_edges_from(permlist)", "sub_path": "Baseline/Network.py", "file_name": "Network.py", "file_ext": "py", "file_size_in_byte": 460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "networkx.Graph", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "529693015", "text": "# TODO: Replace from...import... with import...\n\nimport enum\nimport random\n\nimport constants\nimport fsm\nimport mcast_receive_handler\nimport mcast_send_handler\nimport neighbor\nimport offer\nimport utils\nimport timer\n# TODO: Change this import\nfrom packet_common import create_packet_header, encode_protocol_packet, decode_protocol_packet\n\nimport common.constants\nimport common.ttypes\n# TODO: Change this import\nfrom encoding.ttypes import NodeCapabilities, LIEPacket\nimport encoding.ttypes\n\n# TODO: LIEs arriving with a TTL larger than 1 MUST be ignored.\n\n# TODO: Implement configuration of POD numbers\n\n# TODO: Send LIE packets with network control precedence.\n\n# TODO: Add IPv6 support\n\n# TODO: Have a mechanism to detect that an interface comes into / goes out of existence\n\n# TODO: Have a mechanism to detect IPv4 or IPv6 address changes on an interface\n\nclass Interface:\n\n UNDEFINED_OR_ANY_POD = 0\n\n def generate_advertised_name(self):\n return self._node.name + '-' + self._interface_name\n\n def get_mtu(self):\n # TODO: Find a portable (or even non-portable) way to get the interface MTU\n # TODO: Find a way to be informed whenever the interface MTU changes\n #!!! mtu = 1500\n mtu = 1400\n return mtu\n\n @staticmethod\n def generate_nonce():\n # 63 bits instead of 64 because nonce field is a signed i64\n nonce = random.getrandbits(63)\n return nonce\n\n class State(enum.Enum):\n ONE_WAY = 1\n TWO_WAY = 2\n THREE_WAY = 3\n\n class Event(enum.Enum):\n TIMER_TICK = 1\n LEVEL_CHANGED = 2\n HAL_CHANGED = 3\n HAT_CHANGED = 4\n HALS_CHANGED = 5\n LIE_RECEIVED = 6\n NEW_NEIGHBOR = 7\n VALID_REFLECTION = 8\n NEIGHBOR_DROPPED_REFLECTION = 9\n NEIGHBOR_CHANGED_LEVEL = 10\n NEIGHBOR_CHANGED_ADDRESS = 11\n NEIGHBOR_CHANGED_MINOR_FIELDS = 12\n UNACCEPTABLE_HEADER = 13\n HOLD_TIME_EXPIRED = 14\n MULTIPLE_NEIGHBORS = 15\n LIE_CORRUPT = 16\n SEND_LIE = 17\n\n verbose_events = [Event.TIMER_TICK, Event.LIE_RECEIVED, Event.SEND_LIE]\n\n def action_store_hal(self):\n # TODO: Need to implement ZTP state machine first\n pass\n\n def action_store_hat(self):\n # TODO: Need to implement ZTP state machine first\n pass\n\n def action_store_hals(self):\n # TODO: Need to implement ZTP state machine first\n pass\n\n def action_update_level(self):\n # TODO: Need to implement ZTP state machine and/or configuration first\n pass\n\n def send_protocol_packet(self, protocol_packet):\n if self._tx_fail:\n self.debug(self._tx_log, \"Failed send {}\".format(protocol_packet))\n else:\n encoded_protocol_packet = encode_protocol_packet(protocol_packet)\n self._mcast_send_handler.send_message(encoded_protocol_packet)\n self.debug(self._tx_log, \"Send {}\".format(protocol_packet))\n\n def action_send_lie(self):\n packet_header = create_packet_header(self._node)\n capabilities = NodeCapabilities(\n flood_reduction=True,\n leaf_indications=common.ttypes.LeafIndications.leaf_only_and_leaf_2_leaf_procedures)\n if self._neighbor:\n neighbor_system_id = self._neighbor.system_id\n neighbor_link_id = self._neighbor.local_id\n lie_neighbor = encoding.ttypes.Neighbor(neighbor_system_id, neighbor_link_id)\n else:\n neighbor_system_id = None\n lie_neighbor = None\n lie_packet = LIEPacket(\n name=self._advertised_name,\n local_id=self._local_id,\n flood_port=self._node.rx_tie_port,\n link_mtu_size=self._mtu,\n neighbor=lie_neighbor,\n pod=self._pod,\n nonce=Interface.generate_nonce(),\n capabilities=capabilities,\n holdtime=3,\n not_a_ztp_offer=self._node.send_not_a_ztp_offer_on_intf(self._interface_name),\n you_are_not_flood_repeater=False, # TODO: Set you_are_not_flood_repeater\n label=None)\n packet_content = encoding.ttypes.PacketContent(lie=lie_packet)\n protocol_packet = encoding.ttypes.ProtocolPacket(packet_header, packet_content)\n self.send_protocol_packet(protocol_packet)\n tx_offer = offer.TxOffer(\n self._interface_name,\n self._node.system_id,\n packet_header.level,\n lie_packet.not_a_ztp_offer,\n self._fsm.state)\n self._node.record_tx_offer(tx_offer)\n\n def action_cleanup(self):\n self._neighbor = None\n\n def check_reflection(self):\n # Does the received LIE packet (which is now stored in _neighbor) report us as the neighbor?\n if self._neighbor.neighbor_system_id != self._node.system_id:\n self.info(self._log,\n \"Neighbor does not report us as neighbor (system-id {:16x} instead of {:16x}\"\n .format(self._neighbor.neighbor_system_id, self._node.system_id))\n return False\n if self._neighbor.neighbor_link_id != self._local_id:\n self.info(self._log, \"Neighbor does not report us as neighbor (link-id {} instead of {}\"\n .format(self._neighbor.neighbor_link_id, self._local_id))\n return False\n return True\n\n def check_three_way(self):\n # Section B.1.5\n # CHANGE: This is a little bit different from the specificaiton\n # (see comment [CheckThreeWay])\n if self._fsm.state == self.State.ONE_WAY:\n pass\n elif self._fsm.state == self.State.TWO_WAY:\n if self._neighbor.neighbor_system_id is None:\n pass\n elif self.check_reflection():\n self._fsm.push_event(self.Event.VALID_REFLECTION)\n else:\n self._fsm.push_event(self.Event.MULTIPLE_NEIGHBORS)\n else: # state is THREE_WAY\n if self._neighbor.neighbor_system_id is None:\n self._fsm.push_event(self.Event.NEIGHBOR_DROPPED_REFLECTION)\n elif self.check_reflection():\n pass\n else:\n self._fsm.push_event(self.Event.MULTIPLE_NEIGHBORS)\n\n def check_minor_change(self, new_neighbor):\n # TODO: what if link_mtu_size changes?\n # TODO: what if pod changes?\n # TODO: what if capabilities changes?\n # TODO: what if holdtime changes?\n # TODO: what if not_a_ztp_offer changes?\n # TODO: what if you_are_not_flood_repeater changes?\n # TODO: what if label changes?\n minor_change = False\n if new_neighbor.flood_port != self._neighbor.flood_port:\n msg = (\"Neighbor flood-port changed from {} to {}\"\n .format(self._neighbor.flood_port, new_neighbor.flood_port))\n minor_change = True\n elif new_neighbor.name != self._neighbor.name:\n msg = (\"Neighbor name changed from {} to {}\"\n .format(self._neighbor.name, new_neighbor.name))\n minor_change = True\n elif new_neighbor.local_id != self._neighbor.local_id:\n msg = (\"Neighbor local-id changed from {} to {}\"\n .format(self._neighbor.local_id, new_neighbor.local_id))\n minor_change = True\n if minor_change:\n self.info(self._log, msg)\n return minor_change\n\n def send_offer_to_ztp_fsm(self, offering_neighbor):\n offer_for_ztp = offer.RxOffer(\n self._interface_name,\n offering_neighbor.system_id,\n offering_neighbor.level,\n offering_neighbor.not_a_ztp_offer,\n self._fsm.state)\n self._node.fsm.push_event(self._node.Event.NEIGHBOR_OFFER, offer_for_ztp)\n\n def this_node_is_leaf(self):\n return self._node.level_value() == common.constants.leaf_level\n\n def hat_not_greater_remote_level(self, remote_level):\n if self._node.highest_adjacency_three_way is None:\n return True\n return self._node.highest_adjacency_three_way <= remote_level\n\n def both_nodes_support_leaf_2_leaf(self, protocol_packet):\n header = protocol_packet.header\n lie = protocol_packet.content.lie\n if not self.this_node_is_leaf():\n # This node is not a leaf\n return False\n if header.level != 0:\n # Remote node is not a leaf\n return False\n if not self._node.leaf_2_leaf:\n # This node does not support leaf-2-leaf\n return False\n if lie.capabilities is None:\n # Remote node does not support leaf-2-leaf\n return False\n if (lie.capabilities.leaf_indications !=\n common.ttypes.LeafIndications.leaf_only_and_leaf_2_leaf_procedures):\n # Remote node does not support leaf-2-leaf\n return False\n return True\n\n def neither_leaf_and_ldiff_one(self, protocol_packet):\n # Neither node is leat and the level difference is at most one\n header = protocol_packet.header\n if self.this_node_is_leaf():\n # This node is leaf\n return False\n if header.level == 0:\n # Remote node is leaf\n return False\n assert self._node.level_value() is not None\n assert header.level is not None\n if abs(header.level - self._node.level_value()) > 1:\n # Level difference is greater than 1\n return False\n return True\n\n def is_received_lie_acceptable(self, protocol_packet):\n # Check whether a received LIE message is acceptable for the purpose of progressing towards\n # a 3-way adjacency. This implements the rules specified in sections B.1.4.1 and B.1.4.2 of\n # the specification.\n # DEV-4: This also implements the rules specified in the 8 bullet points in section 4.2.2,\n # some of which are also present in section B.1.4 and some of which are missing from section\n # B.1.4.\n # The return value of this function is (accept, rule, offer_to_ztp, warning) where\n # - accept: True if the received LIE message is acceptable, False if not\n # - rule: A short human-readble string describing the rule used to accept or reject the\n # LIE message\n # - offer_to_ztp: True if an offer should be sent to the ZTP FSM. Note that we even send\n # offers to the the ZTP FSM for most rejected LIE messages, and the ZTP FSM stores these\n # as \"removed offers\" for debugging.\n # - warning: If True, log a warning message, if False, log an info message.\n #\n # TODO: Add counters for each of these conditions\n #\n header = protocol_packet.header\n lie = protocol_packet.content.lie\n if not header:\n return (False, \"Missing header\", False, True)\n if header.major_version != constants.RIFT_MAJOR_VERSION:\n # Section B.1.4.1 (1st OR clause) / section 4.2.2.2\n return (False, \"Different major protocol version\", False, True)\n if not self.is_valid_received_system_id(header.sender):\n # Section B.1.4.1 (3rd OR clause) / section 4.2.2.2\n return (False, \"Invalid system ID\", False, True)\n if self._node.system_id == header.sender:\n # Section 4.2.2.5 (DEV-4: rule is missing in section B.1.4)\n return (False, \"Remote systed ID is same as local system ID (loop)\", False, False)\n if self._mtu != lie.link_mtu_size:\n # Section 4.2.2.6\n return (False, \"MTU mismatch\", False, True)\n if header.level is None:\n # Section B.1.4.2 (1st OR clause) / section 4.2.2.7\n return (False, \"Remote level (in received LIE) is undefined\", True, False)\n if self._node.level_value() is None:\n # Section B.1.4.2 (2nd OR clause) / section 4.2.2.7\n return (False, \"My level is undefined\", True, False)\n if ((self._pod != self.UNDEFINED_OR_ANY_POD) and\n (lie.pod != self.UNDEFINED_OR_ANY_POD) and\n (self._pod != lie.pod)):\n # Section 4.2.2.1 (DEV-4: rule is missing in section B.1.4)\n return (False, \"PoD mismatch\", True, True)\n if self._mtu != lie.link_mtu_size:\n # Section 4.2.2.6 (DEV-4: rule is missing in section B.1.4)\n return (False, \"MTU mismatch\", True, True)\n # DEV-4: The following rules are correctly specified in 4.2.2.8 and incorrectly in B.1.4.2\n if self.this_node_is_leaf() and self.hat_not_greater_remote_level(header.level):\n # Section 4.2.2.8 (1st OR clause) / DEV-4:Different in section B.1.4.2 (3rd OR clause)\n return (True, \"This node is leaf and HAT not greater than remote level\", True, False)\n if not self.this_node_is_leaf() and (header.level == 0):\n # Section 4.2.2.8 (2nd OR clause) / DEV-4: Missing in section B.1.4\n return (True, \"This node is not leaf and neighbor is leaf\", True, False)\n if self.both_nodes_support_leaf_2_leaf(protocol_packet):\n # Section 4.2.2.8 (3rd OR clause) / DEV-4: Missing in section B.1.4\n return (True, \"Both nodes are leaf and support leaf-2-leaf\", True, False)\n if self.neither_leaf_and_ldiff_one(protocol_packet):\n # Section 4.2.2.8 (4th OR clause) / DEV-4:Different in section B.1.4.3.2 (4th OR clause)\n return (True, \"Neither node is leaf and level difference is at most one\", True, False)\n return (False, \"Level mismatch\", True, True)\n\n def action_process_lie(self, event_data):\n (protocol_packet, (from_address, from_port)) = event_data\n # TODO: This is a simplistic way of implementing the hold timer. Use a real timer instead.\n self._time_ticks_since_lie_received = 0\n # Sections B.1.4.1 and B.1.4.2\n new_neighbor = neighbor.Neighbor(protocol_packet, from_address, from_port)\n (accept, rule, offer_to_ztp, warning) = self.is_received_lie_acceptable(protocol_packet)\n if not accept:\n self._lie_accept_or_reject = \"Rejected\"\n self._lie_accept_or_reject_rule = rule\n if warning:\n self.warning(self._rx_log, \"Received LIE packet rejected: {}\".format(rule))\n else:\n self.info(self._rx_log, \"Received LIE packet rejected: {}\".format(rule))\n self.action_cleanup()\n if offer_to_ztp:\n self.send_offer_to_ztp_fsm(new_neighbor)\n self._fsm.push_event(self.Event.UNACCEPTABLE_HEADER)\n return\n self._lie_accept_or_reject = \"Accepted\"\n self._lie_accept_or_reject_rule = rule\n # Section B.1.4.3\n # Note: We send an offer to the ZTP state machine directly from here instead of pushing an\n # UDPATE_ZTP_OFFER event (see deviation DEV-2 in doc/deviations)\n self.send_offer_to_ztp_fsm(new_neighbor)\n if not self._neighbor:\n self.info(self._log, \"New neighbor detected with system-id {}\"\n .format(utils.system_id_str(protocol_packet.header.sender)))\n self._neighbor = new_neighbor\n self._fsm.push_event(self.Event.NEW_NEIGHBOR)\n self.check_three_way()\n return\n # Section B.1.4.3.1\n if new_neighbor.system_id != self._neighbor.system_id:\n self.info(self._log, \"Neighbor system-id changed from {} to {}\"\n .format(utils.system_id_str(self._neighbor.system_id),\n utils.system_id_str(new_neighbor.system_id)))\n self._fsm.push_event(self.Event.MULTIPLE_NEIGHBORS)\n return\n # Section B.1.4.3.2\n if new_neighbor.level != self._neighbor.level:\n self.info(self._log, \"Neighbor level changed from {} to {}\"\n .format(self._neighbor.level, new_neighbor.level))\n self._fsm.push_event(self.Event.NEIGHBOR_CHANGED_LEVEL)\n return\n # Section B.1.4.3.3\n if new_neighbor.address != self._neighbor.address:\n self.info(self._log, \"Neighbor address changed from {} to {}\"\n .format(self._neighbor.address, new_neighbor.address))\n self._fsm.push_event(self.Event.NEIGHBOR_CHANGED_ADDRESS)\n return\n # Section B.1.4.3.4\n if self.check_minor_change(new_neighbor):\n self._fsm.push_event(self.Event.NEIGHBOR_CHANGED_MINOR_FIELDS)\n self._neighbor = new_neighbor # TODO: The draft does not specify this, but it is needed\n # Section B.1.4.3.5\n self.check_three_way()\n\n def action_check_hold_time_expired(self):\n # TODO: This is a (too) simplistic way of managing timers in the draft; use an explicit\n # timer.\n # If time_ticks_since_lie_received is None, it means the timer is not running\n if self._time_ticks_since_lie_received is None:\n return\n self._time_ticks_since_lie_received += 1\n if self._neighbor and self._neighbor.holdtime:\n holdtime = self._neighbor.holdtime\n else:\n holdtime = common.constants.default_holdtime\n if self._time_ticks_since_lie_received >= holdtime:\n self._fsm.push_event(self.Event.HOLD_TIME_EXPIRED)\n\n def action_hold_time_expired(self):\n self._node.expire_offer(self._interface_name)\n\n _state_one_way_transitions = {\n Event.TIMER_TICK: (None, [], [Event.SEND_LIE]),\n Event.LEVEL_CHANGED: (State.ONE_WAY, [action_update_level], [Event.SEND_LIE]),\n Event.HAL_CHANGED: (None, [action_store_hal]),\n Event.HAT_CHANGED: (None, [action_store_hat]),\n Event.HALS_CHANGED: (None, [action_store_hals]),\n Event.LIE_RECEIVED: (None, [action_process_lie]),\n Event.NEW_NEIGHBOR: (State.TWO_WAY, [], [Event.SEND_LIE]),\n Event.UNACCEPTABLE_HEADER: (State.ONE_WAY, []),\n Event.HOLD_TIME_EXPIRED: (None, [action_hold_time_expired]),\n Event.SEND_LIE: (None, [action_send_lie]),\n # Removed. See deviation DEV-2 in doc/deviations.md. TODO: remove line completely.\n # Event.UPDATE_ZTP_OFFER: (None, [action_send_offer_to_ztp_fsm])\n }\n\n _state_two_way_transitions = {\n Event.TIMER_TICK: (None, [action_check_hold_time_expired], [Event.SEND_LIE]),\n Event.LEVEL_CHANGED: (State.ONE_WAY, [action_update_level]),\n Event.HAL_CHANGED: (None, [action_store_hal]),\n Event.HAT_CHANGED: (None, [action_store_hat]),\n Event.HALS_CHANGED: (None, [action_store_hals]),\n Event.HALS_CHANGED: (None, [action_store_hals]),\n Event.LIE_RECEIVED: (None, [action_process_lie]),\n Event.VALID_REFLECTION: (State.THREE_WAY, []),\n Event.NEIGHBOR_CHANGED_LEVEL: (State.ONE_WAY, []),\n Event.NEIGHBOR_CHANGED_ADDRESS: (State.ONE_WAY, []),\n Event.UNACCEPTABLE_HEADER: (State.ONE_WAY, []),\n Event.HOLD_TIME_EXPIRED: (State.ONE_WAY, [action_hold_time_expired]),\n Event.MULTIPLE_NEIGHBORS: (State.ONE_WAY, []),\n Event.LIE_CORRUPT: (State.ONE_WAY, []), # This transition is not in draft\n Event.SEND_LIE: (None, [action_send_lie])}\n\n _state_three_way_transitions = {\n Event.TIMER_TICK: (None, [action_check_hold_time_expired], [Event.SEND_LIE]),\n Event.LEVEL_CHANGED: (State.ONE_WAY, [action_update_level]),\n Event.HAL_CHANGED: (None, [action_store_hal]),\n Event.HAT_CHANGED: (None, [action_store_hat]),\n Event.HALS_CHANGED: (None, [action_store_hals]),\n Event.LIE_RECEIVED: (None, [action_process_lie]),\n Event.NEIGHBOR_DROPPED_REFLECTION: (State.TWO_WAY, []),\n Event.NEIGHBOR_CHANGED_LEVEL: (State.ONE_WAY, []),\n Event.NEIGHBOR_CHANGED_ADDRESS: (State.ONE_WAY, []),\n Event.UNACCEPTABLE_HEADER: (State.ONE_WAY, []),\n Event.HOLD_TIME_EXPIRED: (State.ONE_WAY, [action_hold_time_expired]),\n Event.MULTIPLE_NEIGHBORS: (State.ONE_WAY, []),\n Event.LIE_CORRUPT: (State.ONE_WAY, []), # This transition is not in draft\n Event.SEND_LIE: (None, [action_send_lie]),\n }\n\n _transitions = {\n State.ONE_WAY: _state_one_way_transitions,\n State.TWO_WAY: _state_two_way_transitions,\n State.THREE_WAY: _state_three_way_transitions\n }\n\n _state_entry_actions = {\n State.ONE_WAY: [action_cleanup, action_send_lie]\n }\n\n fsm_definition = fsm.FsmDefinition(\n state_enum=State,\n event_enum=Event,\n transitions=_transitions,\n state_entry_actions=_state_entry_actions,\n initial_state=State.ONE_WAY,\n verbose_events=verbose_events)\n\n def debug(self, logger, msg):\n logger.debug(\"[{}] {}\".format(self._log_id, msg))\n\n def info(self, logger, msg):\n logger.info(\"[{}] {}\".format(self._log_id, msg))\n\n def warning(self, logger, msg):\n logger.warning(\"[{}] {}\".format(self._log_id, msg))\n\n def __init__(self, node, config):\n # TODO: process bandwidth field in config\n self._node = node\n self._interface_name = config['name']\n # TODO: Make the default metric/bandwidth depend on the speed of the interface\n self._metric = self.get_config_attribute(config, 'metric',\n common.constants.default_bandwidth)\n self._advertised_name = self.generate_advertised_name()\n self._log_id = node.log_id + \"-{}\".format(self._interface_name)\n self._ipv4_address = utils.interface_ipv4_address(self._interface_name,\n self._node.engine.tx_src_address)\n self._rx_lie_ipv4_mcast_address = self.get_config_attribute(\n config, 'rx_lie_mcast_address', constants.DEFAULT_LIE_IPV4_MCAST_ADDRESS)\n self._tx_lie_ipv4_mcast_address = self.get_config_attribute(\n config, 'tx_lie_mcast_address', constants.DEFAULT_LIE_IPV4_MCAST_ADDRESS)\n self._rx_lie_ipv6_mcast_address = self.get_config_attribute(\n config, 'rx_lie_v6_mcast_address', constants.DEFAULT_LIE_IPV6_MCAST_ADDRESS)\n self._tx_lie_ipv6_mcast_address = self.get_config_attribute(\n config, 'tx_lie_v6_mcast_address', constants.DEFAULT_LIE_IPV6_MCAST_ADDRESS)\n self._rx_lie_port = self.get_config_attribute(config, 'rx_lie_port',\n constants.DEFAULT_LIE_PORT)\n self._tx_lie_port = self.get_config_attribute(config, 'tx_lie_port',\n constants.DEFAULT_LIE_PORT)\n self._rx_tie_port = self.get_config_attribute(config, 'rx_tie_port',\n constants.DEFAULT_TIE_PORT)\n self._rx_fail = False\n self._tx_fail = False\n self._log = node.log.getChild(\"if\")\n self.info(self._log, \"Create interface\")\n self._rx_log = self._log.getChild(\"rx\")\n self._tx_log = self._log.getChild(\"tx\")\n self._fsm_log = self._log.getChild(\"fsm\")\n self._local_id = node.allocate_interface_id()\n self._mtu = self.get_mtu()\n self._pod = self.UNDEFINED_OR_ANY_POD\n self._neighbor = None\n self._time_ticks_since_lie_received = None\n self._lie_accept_or_reject = \"No LIE Received\"\n self._lie_accept_or_reject_rule = \"-\"\n self._fsm = fsm.Fsm(\n definition=self.fsm_definition,\n action_handler=self,\n log=self._fsm_log,\n log_id=self._log_id)\n if self._node.running:\n self.run()\n self._fsm.start()\n\n def run(self):\n self._mcast_send_handler = mcast_send_handler.McastSendHandler(\n interface_name=self._interface_name,\n mcast_ipv4_address=self._tx_lie_ipv4_mcast_address,\n port=self._tx_lie_port,\n interface_ipv4_address=self._node.engine.tx_src_address,\n multicast_loopback=self._node.engine.multicast_loopback)\n # TODO: Use source address\n (_, source_port) = self._mcast_send_handler.source_address_and_port()\n self._lie_udp_source_port = source_port\n self._mcast_receive_handler = mcast_receive_handler.McastReceiveHandler(\n self._interface_name,\n self._rx_lie_ipv4_mcast_address,\n self._rx_lie_port,\n self.receive_mcast_message,\n self._node.engine.tx_src_address)\n self._one_second_timer = timer.Timer(1.0,\n lambda: self._fsm.push_event(self.Event.TIMER_TICK))\n\n def get_config_attribute(self, config, attribute, default):\n if attribute in config:\n return config[attribute]\n else:\n return default\n\n def is_valid_received_system_id(self, system_id):\n if system_id == 0:\n return False\n return True\n\n def receive_mcast_message(self, message, from_address_and_port):\n # TODO: Handle decoding errors (does decode_protocol_packet throw an exception in\n # that case? Try it...)\n protocol_packet = decode_protocol_packet(message)\n if self._rx_fail:\n self.debug(self._tx_log, \"Failed receive {}\".format(protocol_packet))\n return\n if protocol_packet.header.sender == self._node.system_id:\n self.debug(self._rx_log, \"Looped receive {}\".format(protocol_packet))\n return\n self.debug(self._rx_log, \"Receive {}\".format(protocol_packet))\n if not protocol_packet.content:\n self.warning(self._rx_log, \"Received packet without content\")\n return\n if protocol_packet.content.lie:\n event_data = (protocol_packet, from_address_and_port)\n self._fsm.push_event(self.Event.LIE_RECEIVED, event_data)\n if protocol_packet.content.tide:\n # TODO: process TIDE\n pass\n if protocol_packet.content.tire:\n # TODO: process TIDE\n pass\n if protocol_packet.content.tie:\n # TODO: process TIDE\n pass\n\n def set_failure(self, tx_fail, rx_fail):\n self._tx_fail = tx_fail\n self._rx_fail = rx_fail\n\n def failure_str(self):\n if self._tx_fail:\n if self._rx_fail:\n return \"failed\"\n else:\n return \"tx-failed\"\n else:\n if self._rx_fail:\n return \"rx-failed\"\n else:\n return \"ok\"\n\n @staticmethod\n def cli_summary_headers():\n return [\n [\"Interface\", \"Name\"],\n [\"Neighbor\", \"Name\"],\n [\"Neighbor\", \"System ID\"],\n [\"Neighbor\", \"State\"]]\n\n def cli_summary_attributes(self):\n if self._neighbor:\n return [\n self._interface_name,\n self._neighbor.name,\n utils.system_id_str(self._neighbor.system_id),\n self._fsm.state.name]\n else:\n return [\n self._interface_name,\n \"\",\n \"\",\n self._fsm.state.name]\n\n def cli_detailed_attributes(self):\n return [\n [\"Interface Name\", self._interface_name],\n [\"Advertised Name\", self._advertised_name],\n [\"Interface IPv4 Address\", self._ipv4_address],\n [\"Metric\", self._metric],\n [\"Receive LIE IPv4 Multicast Address\", self._rx_lie_ipv4_mcast_address],\n [\"Transmit LIE IPv4 Multicast Address\", self._tx_lie_ipv4_mcast_address],\n [\"Receive LIE IPv6 Multicast Address\", self._rx_lie_ipv6_mcast_address],\n [\"Transmit LIE IPv6 Multicast Address\", self._tx_lie_ipv6_mcast_address],\n [\"Receive LIE Port\", self._rx_lie_port],\n [\"Transmit LIE Port\", self._tx_lie_port],\n [\"Receive TIE Port\", self._rx_tie_port],\n [\"System ID\", utils.system_id_str(self._node.system_id)],\n [\"Local ID\", self._local_id],\n [\"MTU\", self._mtu],\n [\"POD\", self._pod],\n [\"Failure\", self.failure_str()],\n [\"State\", self._fsm.state.name],\n [\"Received LIE Accepted or Rejected\", self._lie_accept_or_reject],\n [\"Received LIE Accept or Reject Reason\", self._lie_accept_or_reject_rule],\n [\"Neighbor\", \"True\" if self._neighbor else \"False\"]\n ]\n\n def cli_detailed_neighbor_attrs(self):\n if self._neighbor:\n return self._neighbor.cli_detailed_attributes()\n else:\n return None\n\n @property\n def fsm(self):\n return self._fsm\n", "sub_path": "rift/interface.py", "file_name": "interface.py", "file_ext": "py", "file_size_in_byte": 28655, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "random.getrandbits", "line_number": 52, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 55, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 60, "usage_type": "attribute"}, {"api_name": "packet_common.encode_protocol_packet", "line_number": 101, "usage_type": "call"}, {"api_name": "packet_common.create_packet_header", "line_number": 106, "usage_type": "call"}, {"api_name": "encoding.ttypes.NodeCapabilities", "line_number": 107, "usage_type": "call"}, {"api_name": "common.constants.ttypes", "line_number": 109, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 109, "usage_type": "name"}, {"api_name": "encoding.ttypes.ttypes.Neighbor", "line_number": 113, "usage_type": "call"}, {"api_name": "encoding.ttypes.ttypes", "line_number": 113, "usage_type": "attribute"}, {"api_name": "encoding.ttypes", "line_number": 113, "usage_type": "name"}, {"api_name": "encoding.ttypes.LIEPacket", "line_number": 117, "usage_type": "call"}, {"api_name": "encoding.ttypes.ttypes.PacketContent", "line_number": 130, "usage_type": "call"}, {"api_name": "encoding.ttypes.ttypes", "line_number": 130, "usage_type": "attribute"}, {"api_name": "encoding.ttypes", "line_number": 130, "usage_type": "name"}, {"api_name": "encoding.ttypes.ttypes.ProtocolPacket", "line_number": 131, "usage_type": "call"}, {"api_name": "encoding.ttypes.ttypes", "line_number": 131, "usage_type": "attribute"}, {"api_name": "encoding.ttypes", "line_number": 131, "usage_type": "name"}, {"api_name": "offer.TxOffer", "line_number": 133, "usage_type": "call"}, {"api_name": "offer.RxOffer", "line_number": 204, "usage_type": "call"}, {"api_name": "common.constants.constants", "line_number": 213, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 213, "usage_type": "name"}, {"api_name": "common.constants.ttypes", "line_number": 236, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 236, "usage_type": "name"}, {"api_name": "constants.RIFT_MAJOR_VERSION", "line_number": 279, "usage_type": "attribute"}, {"api_name": "neighbor.Neighbor", "line_number": 325, "usage_type": "call"}, {"api_name": "utils.system_id_str", "line_number": 347, "usage_type": "call"}, {"api_name": "utils.system_id_str", "line_number": 355, "usage_type": "call"}, {"api_name": "utils.system_id_str", "line_number": 356, "usage_type": "call"}, {"api_name": "common.constants.constants", "line_number": 388, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 388, "usage_type": "name"}, {"api_name": "fsm.FsmDefinition", "line_number": 454, "usage_type": "call"}, {"api_name": "common.constants.constants", "line_number": 477, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 477, "usage_type": "name"}, {"api_name": "utils.interface_ipv4_address", "line_number": 480, "usage_type": "call"}, {"api_name": "constants.DEFAULT_LIE_IPV4_MCAST_ADDRESS", "line_number": 483, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_LIE_IPV4_MCAST_ADDRESS", "line_number": 485, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_LIE_IPV6_MCAST_ADDRESS", "line_number": 487, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_LIE_IPV6_MCAST_ADDRESS", "line_number": 489, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_LIE_PORT", "line_number": 491, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_LIE_PORT", "line_number": 493, "usage_type": "attribute"}, {"api_name": "constants.DEFAULT_TIE_PORT", "line_number": 495, "usage_type": "attribute"}, {"api_name": "fsm.Fsm", "line_number": 510, "usage_type": "call"}, {"api_name": "mcast_send_handler.McastSendHandler", "line_number": 520, "usage_type": "call"}, {"api_name": "mcast_receive_handler.McastReceiveHandler", "line_number": 529, "usage_type": "call"}, {"api_name": "timer.Timer", "line_number": 535, "usage_type": "call"}, {"api_name": "packet_common.decode_protocol_packet", "line_number": 552, "usage_type": "call"}, {"api_name": "utils.system_id_str", "line_number": 605, "usage_type": "call"}, {"api_name": "utils.system_id_str", "line_number": 627, "usage_type": "call"}]} +{"seq_id": "290772080", "text": "import json\nimport argparse\nimport h5py as h5\nimport os\nimport numpy as np\n\nfrom models.img_ques_attention import ImgQuesAttentionNet\nfrom models.show_n_tell import ShowNTellNet\nfrom models.ques_attention import QuesAttentionShowNTellNet\nfrom models.conv_attention import ConvAttentionNet\nfrom models.time_dist_cnn import TimeDistributedCNNNet\nfrom datagen import DataGenerator\nfrom keras.preprocessing.image import load_img\nfrom keras.preprocessing.image import img_to_array\n\n\ndef write_predictions(filepath, y_pred, ix_to_ans, question_ids):\n answers = [ix_to_ans[str(ix)] for ix in y_pred]\n qa_pairs = []\n for ques, ans in zip(question_ids, answers):\n qa_pairs.append({'answer': ans, 'question_id': ques})\n\n with open(filepath + '.json', 'w') as pred_file:\n json.dump(qa_pairs, pred_file)\n\n\ndef main(args):\n lstm_dim = 512\n n_answers = 1001\n question_embed_dim = 256\n\n qa_data = h5.File(os.path.join(args.data_path, \"data_prepro.h5\"), \"r\")\n\n with open(os.path.join(args.data_path, \"data_prepro.json\"), \"r\") as file:\n prepro_data = json.load(file)\n\n if args.extracted:\n img_feat = h5.File(os.path.join(args.data_path, \"data_img.h5\"), \"r\")['images_test']\n else:\n print(\"Loading images\")\n img_feat = [\n img_to_array(load_img(os.path.join(args.data_path, image_filename), target_size=(224, 224)), dtype='uint8',\n data_format='channels_first')\n for image_filename in prepro_data['unique_img_test']]\n img_feat = np.array(img_feat, dtype=np.uint8)\n\n VOCAB_SIZE = len(prepro_data['ix_to_word'])\n MAX_QUESTION_LEN = qa_data['ques_test'].shape[1]\n SOS = VOCAB_SIZE + 1\n # Add 1 for SOS and 1 for '0' -> padding\n VOCAB_SIZE += 2\n\n # Add SOS char at the beginning for every question\n questions = np.zeros((qa_data['ques_test'].shape[0], MAX_QUESTION_LEN + 1))\n questions[:, 1:] = qa_data['ques_test']\n questions[:, 0] = SOS\n\n ques_to_img = np.array(qa_data['img_pos_test'])\n\n ix_to_ans = prepro_data['ix_to_ans']\n question_ids = np.array(qa_data['question_id_test']).tolist()\n n_test = len(question_ids)\n\n # Define appropriate model\n if args.model_type == 'img_ques_att':\n model = ImgQuesAttentionNet(lstm_dim=lstm_dim,\n n_answers=n_answers,\n model_path=os.path.basename(args.model_path),\n VOCAB_SIZE=VOCAB_SIZE,\n MAX_QUESTION_LEN=MAX_QUESTION_LEN,\n question_embed_dim=question_embed_dim,\n log_path=None)\n elif args.model_type == 'show_n_tell':\n model = ShowNTellNet(lstm_dim=lstm_dim,\n n_answers=n_answers,\n model_path=os.path.basename(args.model_path),\n VOCAB_SIZE=VOCAB_SIZE,\n MAX_QUESTION_LEN=MAX_QUESTION_LEN,\n question_embed_dim=question_embed_dim,\n log_path=None)\n elif args.model_type == 'ques_att':\n model = QuesAttentionShowNTellNet(lstm_dim=lstm_dim,\n n_answers=n_answers,\n model_path=os.path.basename(args.model_path),\n VOCAB_SIZE=VOCAB_SIZE,\n MAX_QUESTION_LEN=MAX_QUESTION_LEN,\n question_embed_dim=question_embed_dim,\n log_path=None)\n\n elif args.model_type == 'conv_attention':\n model = ConvAttentionNet(lstm_dim=lstm_dim,\n n_answers=n_answers,\n model_path=os.path.basename(args.model_path),\n VOCAB_SIZE=VOCAB_SIZE,\n MAX_QUESTION_LEN=MAX_QUESTION_LEN,\n question_embed_dim=question_embed_dim,\n log_path=None)\n\n elif args.model_type == 'time_dist_cnn':\n model = TimeDistributedCNNNet(lstm_dim=lstm_dim,\n n_answers=n_answers,\n model_path=os.path.basename(args.model_path),\n VOCAB_SIZE=VOCAB_SIZE,\n MAX_QUESTION_LEN=MAX_QUESTION_LEN,\n question_embed_dim=question_embed_dim,\n log_path=None)\n\n model.load_weights(weights_filename=args.model_path)\n\n chunk_size = 100000000\n y_pred = np.zeros(n_test, dtype=np.int)\n n_chunks = len(range(0, n_test, chunk_size))\n for i, batch in enumerate(range(0, n_test, chunk_size)):\n begin = batch\n end = min(n_test, batch + chunk_size)\n # Test data generator\n test_datagen = DataGenerator(img_feat=np.array(img_feat),\n questions=questions[begin: end],\n answers=[],\n ques_to_img=ques_to_img[begin: end],\n VOCAB_SIZE=VOCAB_SIZE,\n n_answers=n_answers,\n batch_size=args.batch_size,\n shuffle=False,\n split='test')\n y_pred_chunk = model.predict(test_data=test_datagen)\n if (i + 1) % 50 == 0:\n print(\"Completed testing on {}/{} chunks...\".format(i + 1, n_chunks))\n y_pred[begin: end] = y_pred_chunk\n\n write_predictions(filepath=args.dest_path,\n y_pred=y_pred,\n ix_to_ans=ix_to_ans,\n question_ids=question_ids)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_type', type=str, choices=['img_ques_att', 'show_n_tell',\n 'time_dist_cnn', 'ques_att',\n 'conv_attention'], help='type of model')\n parser.add_argument('--model_path', type=str, default='../models/model', help='path to model file')\n parser.add_argument('--data_path', type=str, default='../data/', help='path to input data')\n parser.add_argument('--dest_path', type=str, help='prediciton file full path (without the file extension)')\n parser.add_argument('--batch_size', type=int, default=32, help='batch size to use for testing')\n parser.add_argument('--extracted', action='store_true', help='True for reading extracted features False for reading raw images')\n main(parser.parse_args())", "sub_path": "src/predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 6847, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "json.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 35, "usage_type": "call"}, {"api_name": "h5py.File", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 42, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 61, "usage_type": "call"}, {"api_name": "models.img_ques_attention.ImgQuesAttentionNet", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path", "line_number": 68, "usage_type": "attribute"}, {"api_name": "models.show_n_tell.ShowNTellNet", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "models.ques_attention.QuesAttentionShowNTellNet", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path", "line_number": 84, "usage_type": "attribute"}, {"api_name": "models.conv_attention.ConvAttentionNet", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 93, "usage_type": "call"}, {"api_name": "os.path", "line_number": 93, "usage_type": "attribute"}, {"api_name": "models.time_dist_cnn.TimeDistributedCNNNet", "line_number": 100, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 111, "usage_type": "attribute"}, {"api_name": "datagen.DataGenerator", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 117, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 138, "usage_type": "call"}]} +{"seq_id": "576539602", "text": "import bs4\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup as soup\nimport csv\n\nmy_url = \"http://www.dealsource.tech/\"\n\n# openning the connection, grabbing the page.\nclient = urlopen(my_url)\npage_html = client.read()\nclient.close()\n\n# HTML Parsing.\npage_soup = soup(page_html, \"html.parser\")\n\n# list of containers.\ncontainers = page_soup.findAll(\"div\", {\"class\":\"summary-item\"})\n\n# Openning a csv file.\nfilename = \"products.csv\"\nf = open(filename, \"w\")\nheaders = \"product_name, ex_price, new_price, year, month, day\\n\"\nf.write(headers)\n\n# grabbing title, ex, new price and date for each product, then save info to products.csv\nfor container in containers:\n product_name = container.div.a[\"data-title\"]\n ex_price = container.find(\"div\", {\"class\":\"summary-excerpt\"}).text.split(\" \")[1]\n new_price = container.find(\"div\", {\"class\":\"summary-excerpt\"}).text.split(\" \")[-1]\n date = container.find(\"div\", {\"class\":\"summary-content sqs-gallery-meta-container\"}).time[\"datetime\"].split(\"-\") \n f.write(product_name.replace(\",\", \"|\") + \",\" + ex_price + \",\" + new_price + \",\" + date[0] + \",\" + date[1] + \",\" + date[-1] + \"\\n\")\n# Closing...\nf.close()\n", "sub_path": "deal-src-webscrape.py", "file_name": "deal-src-webscrape.py", "file_ext": "py", "file_size_in_byte": 1169, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "urllib.request.urlopen", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 14, "usage_type": "call"}]} +{"seq_id": "261035302", "text": "\nimport numpy as np\n\ntry:\n import quippy as qp\n from quippy import descriptors\nexcept ImportError:\n raise ImportError(\"Quippy with GAP is required for using SOAP descriptors.\")\n\nfrom ase.data import atomic_numbers\n\nDEFAULT_SOAP_PARAMS = {\n 'cutoff' : 3.0,\n 'cutoff_transition_width' : 1.0,\n 'l_max' : 6, 'n_max' : 6,\n 'atom_sigma' : 0.4\n}\n\n# From https://github.com/tqdm/tqdm/issues/506#issuecomment-373126698\nimport sys\ntry:\n ipy_str = str(type(get_ipython()))\n if 'zmqshell' in ipy_str:\n from tqdm import tqdm_notebook as tqdm\n if 'terminal' in ipy_str:\n from tqdm import tqdm\nexcept:\n if sys.stderr.isatty():\n from tqdm import tqdm\n else:\n def tqdm(iterable, **kwargs):\n return iterable\n\nclass SOAP(object):\n \"\"\"Compute the SOAP vectors in a SiteNetwork\n\n :param SiteNetwork sn:\n :param int tracer_atomic_number = None: The type of tracer atom to add. If None,\n defaults to the type of the first mobile atom in the SiteNetwork.\n :param dict soap_params = {}: Any custom SOAP params.\n \"\"\"\n\n def __init__(self, sn, tracer_atomic_number = None, soap_params = {}, verbose = True):\n\n # Make a copy of the structure\n self._structure = qp.Atoms(sn.static_structure)\n # Add a tracer\n if tracer_atomic_number is None:\n tracer_atomic_number = sn.structure.get_atomic_numbers()[sn.mobile_mask][0]\n\n self.tracer_atomic_number = tracer_atomic_number\n\n self._structure.add_atoms((0.0, 0.0, 0.0), tracer_atomic_number)\n self._tracer_index = len(self._structure) - 1\n\n # Create the descriptor\n soap_opts = dict(DEFAULT_SOAP_PARAMS)\n soap_opts.update(soap_params)\n soap_cmd_line = [\"soap\"]\n # User options\n for opt in soap_opts:\n soap_cmd_line.append(\"{}={}\".format(opt, soap_opts[opt]))\n # Stuff that's the same no matter what\n soap_cmd_line.append(\"n_Z=1\") #always one tracer\n soap_cmd_line.append(\"Z={{{}}}\".format(self.tracer_atomic_number))\n\n self._soaper = descriptors.Descriptor(\" \".join(soap_cmd_line))\n\n self.verbose = verbose\n\n @property\n def n_dim(self):\n return self._soaper.n_dim\n\n def get_descriptors(self, pts, out = None):\n assert pts.ndim == 2 and pts.shape[1] == 3\n\n if out is None:\n out = np.empty(shape = (len(pts), self.n_dim), dtype = np.float)\n\n assert out.shape == (len(pts), self.n_dim)\n\n self._structure.set_cutoff(self._soaper.cutoff())\n\n for i, pt in enumerate(tqdm(pts, desc = \"SOAP\") if self.verbose else pts):\n # Move tracer\n self._structure.positions[self._tracer_index] = pt\n\n # SOAP requires connectivity data to be computed first\n self._structure.calc_connect()\n\n #There should only be one descriptor, since there should only be one Li\n out[i] = self._soaper.calc(self._structure)['descriptor'][0]\n\n return out\n\n def soaps_similar_for_points(self, pts, threshold = 0.95):\n \"\"\"Determine if all SOAPs for points are at least threshold similar.\"\"\"\n assert pts.ndim == 2 and pts.shape[1] == 3\n\n self._structure.set_cutoff(self._soaper.cutoff())\n\n initial_soap = None\n initial_soap_norm = None\n\n for i, pt in enumerate(tqdm(pts, desc = \"SOAP\") if self.verbose else pts):\n # Move tracer\n self._structure.positions[self._tracer_index] = pt\n\n # SOAP requires connectivity data to be computed first\n self._structure.calc_connect()\n\n #There should only be one descriptor, since there should only be one Li\n soap = self._soaper.calc(self._structure)['descriptor'][0]\n\n if initial_soap is None:\n initial_soap = soap.copy()\n initial_soap_norm = np.linalg.norm(initial_soap)\n else:\n similarity = np.dot(soap, initial_soap)\n similarity /= np.linalg.norm(soap)\n similarity /= initial_soap_norm\n\n if similarity < threshold:\n return False\n\n return True\n", "sub_path": "sitator/site_descriptors/SOAP.py", "file_name": "SOAP.py", "file_ext": "py", "file_size_in_byte": 4188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "sys.stderr.isatty", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 28, "usage_type": "attribute"}, {"api_name": "quippy.Atoms", "line_number": 46, "usage_type": "call"}, {"api_name": "quippy.descriptors.Descriptor", "line_number": 67, "usage_type": "call"}, {"api_name": "quippy.descriptors", "line_number": 67, "usage_type": "name"}, {"api_name": "numpy.empty", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 79, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 85, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 121, "usage_type": "attribute"}]} +{"seq_id": "568764876", "text": "#! /usr/bin/env python\n\n# 2.12 Final Project\n# Phillip Daniel April 2021\n\nimport rospy\nimport numpy as np\nfrom nav_msgs.msg import Odometry\nfrom tf.transformations import euler_from_quaternion\nfrom geometry_msgs.msg import Point, Twist\nfrom math import atan2\n\n# These global variables are the pose of the mobile robot\nx= 0.0\ny=0.0\ntheta=0.0\n\ndef newOdom(msg):\n\tglobal x\n\tglobal y\n\tglobal theta\n\n\tx=msg.pose.pose.position.x\n\ty=msg.pose.pose.position.y\n\n\n\trot_q=msg.pose.pose.orientation\n\t(roll, pitch, theta)=euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])\n\n\n\ndef searchForAprilTags(x,y,theta):\n\t\n\tclass myClass(object):\n\t\tpass\n\t\n\t# Tag one\n\t# Visibility information\n\ttag1Vis=myClass()\n\ttag1Vis.label='tag1'\n\t# Tag Position Relative to Global (0,0)\n\ttag1Vis.x=-3.85\n\ttag1Vis.y=-1.79\n\t# Robot Position Bounding Box\n\ttag1Vis.xMin=tag1Vis.x\n\ttag1Vis.xMax=10\n\ttag1Vis.yMin=-100\n\ttag1Vis.yMax=100\n\n\t\n\t# Tag one\n\t# Visibility information\n\ttag2Vis=myClass()\n\ttag2Vis.label='tag2'\n\t# Tag Position Relative to Global (0,0)\n\ttag2Vis.x=-3.3\n\ttag2Vis.y=-7.0\n\t# Robot Position Bounding Box\n\ttag2Vis.xMin=-5.5\n\ttag2Vis.xMax=10\n\ttag2Vis.yMin=-100\n\ttag2Vis.yMax=100\n\n\t\n\t# Tag one\n\t# Visibility information\n\ttag3Vis=myClass()\n\ttag3Vis.label='tag3'\n\t# Tag Position Relative to Global (0,0)\n\ttag3Vis.x=-6.66\n\ttag3Vis.y=-6.72\n\t# Robot Position Bounding Box\n\ttag3Vis.xMin=tag3Vis.x\n\ttag3Vis.xMax=0\n\ttag3Vis.yMin=tag2Vis.y\n\ttag3Vis.yMax=-4.5\n\n\t\n\t# Tag one\n\t# Visibility information\n\ttag4Vis=myClass()\n\ttag4Vis.label='tag4'\n\t# Tag Position Relative to Global (0,0)\n\ttag4Vis.x=-6.23\n\ttag4Vis.y=-0.19\n\t# Robot Position Bounding Box\n\ttag4Vis.xMin=-10\n\ttag4Vis.xMax=-5.83\n\ttag4Vis.yMin=-100\n\ttag4Vis.yMax=100\n\n\t\n\t# Tag one\n\t# Visibility information\n\ttag5Vis=myClass()\n\ttag5Vis.label='tag5'\n\t# Tag Position Relative to Global (0,0)\n\ttag5Vis.x=-9\n\ttag5Vis.y=-4.88\n\t# Robot Position Bounding Box\n\ttag5Vis.xMin=-10\n\ttag5Vis.xMax=-5.83\n\ttag5Vis.yMin=-4.88\n\ttag5Vis.yMax=0.0\n\n\t\n\n\tdef computeLineOfSight(tagVis, x, y, theta): #Pass in the class that describes visibility informaiton of each of the April tag. This returns information from each tag that is visible.\n\t\t# Robot's blind spot half-angle (Rad)\n\t\tcameraFOV=1.12\n\t\tthetaBlind=(2*np.pi-cameraFOV)/2\n\t\t\n\t\t# Compute relative angle between the April Tag and the robot (reference notes)\n\t\tV=np.array([tagVis.x-x, tagVis.y-y])\n\t\txHat=np.array([np.cos(theta),np.sin(theta)])\n\t\tyHat=np.array([-np.sin(theta),np.cos(theta)])\n\t\talpha=atan2(np.dot(V,yHat), np.dot(V, xHat))\n\n\n\t\t# Check if the robot is pointed at the tag, and in the proper bounding box to see the tag\n\t\tclass relativePositionFromAprilTag(object):\n\t\t\tpass\n\n\t\trelPos=relativePositionFromAprilTag() \n\n\n\t\tif x>tagVis.xMin and xtagVis.yMin and y thetaBlind or alpha < -thetaBlind ):\n\t\t\trelPos.label=tagVis.label\n\t\t\trelPos.x=-V[0]\n\t\t\trelPos.y=-V[1]\n\t\telse:\n\t\t\trelPos.label='no tag'\n\t\treturn relPos\n\n\trelPos = myClass()\n\n\trelPos.relPos1=computeLineOfSight(tag1Vis, x, y, theta)\n\trelPos.relPos2=computeLineOfSight(tag2Vis, x, y, theta)\n\trelPos.relPos3=computeLineOfSight(tag3Vis, x, y, theta)\n\trelPos.relPos4=computeLineOfSight(tag4Vis, x, y, theta)\n\trelPos.relPos5=computeLineOfSight(tag5Vis, x, y, theta)\n\n\treturn relPos\n\n\t\ndef setSpeeds(xDot, yDot, thetaDot):\n\tdef clamp(num):\n\t\treturn max(min(num,1),-1)\n\n\txDot=clamp(xDot)\n\tyDot=clamp(yDot)\n\tthetaDot=clamp(thetaDot)\n\n\tmaxSpeed=1.4 #Maximum Linear Speed m/s\n\tmaxRot=.5 #Maximum Angular Velocit rad/s\n\n\tspeed.linear.x = xDot*maxSpeed\n\tspeed.linear.y = yDot*maxSpeed\n\tspeed.angular.z = thetaDot*maxRot\n\n\treturn speed\n\n\n\nrospy.init_node(\"speed_controller\")\n\nsub = rospy.Subscriber(\"/odom\", Odometry, newOdom)\npub = rospy.Publisher(\"/cmd_vel\", Twist, queue_size=1)\n\nspeed = Twist()\n\nr = rospy.Rate(4)\n\n\n# This is a way to specify a location with respect to the environment's coordinate system\ngoal = Point()\ngoal.x=-1\ngoal.y=-1\n\n# This is the main loop\nwhile not rospy.is_shutdown():\n\n\t# This is how to set the speed of the mobile robot. Choose a value between -1 < x < 1 for each speed. 1 Corresponds to the maximum speed.\n\txDot=0\n\tyDot=0\n\tthetaDot=.75\n\tspeed=setSpeeds(xDot, yDot, thetaDot)\n\n\t# This function searches for April tags based on the global pose of the robot\n\ttagInfo = searchForAprilTags(x,y,theta)\n\n\n\tif tagInfo.relPos1.label != 'no tag':\n\t\t# The relative position of the mobile robot with respect to each April tag that it sees is given by 'tagInfo.relPos#.x/y'\n\t\tprint('Robot\\'s relative position: ',tagInfo.relPos1.x, tagInfo.relPos1.y)\n\n\t\t# If a tag is visible, this will return 'tag#' otherwise it returns 'no tag'\n\t\tprint('Tag label = ', tagInfo.relPos1.label)\n\n\t# You will also be able to compute the orientation of the mobil robot.\n\tprint('Robot orientation = ', theta)\n\n\t# This sends the speed command to the mobile robot\n\tpub.publish(speed)\n\n\t# This sets the loop rate\n\tr.sleep()\n", "sub_path": "simple_controller/src/controller.py", "file_name": "controller.py", "file_ext": "py", "file_size_in_byte": 4885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tf.transformations.euler_from_quaternion", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 116, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 117, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 165, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 167, "usage_type": "call"}, {"api_name": "nav_msgs.msg.Odometry", "line_number": 167, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 168, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 168, "usage_type": "argument"}, {"api_name": "geometry_msgs.msg.Twist", "line_number": 170, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 172, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Point", "line_number": 176, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 181, "usage_type": "call"}]} +{"seq_id": "385726250", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 11 15:58:03 2019\n\n@author: Home\n\"\"\"\n\n#from base_FE2 import Mesh, Node, Element, Triangle,Segment\nfrom scipy.sparse import lil_matrix, csr_matrix\nfrom scipy.sparse.linalg import spsolve\nimport numpy as np\n\n\nclass FE_method :\n def __init__(this,mesh):\n this.mesh=mesh\n \n #default :\n this.coeff_d=1\n this.t=0\n this.dt=0 \n \"\"\"\n conditions\n \"\"\"\n def init_cond(this,coeff_d, dt, U0):\n this.coeff_d =coeff_d\n this.dt = dt\n this.U0=U0\n this.Uold=this.U0 #*np.ones(this.Ns)\n \n ' Condition dirichlet '\n for id_s in this.mesh.Nodes_bords[1]:\n this.Uold[id_s-1] = 10\n\n for id_s in this.mesh.Nodes_bords[2]:\n this.Uold[id_s-1] = 0\n \n def matrice_mass(this): #\"lumped mass matrix\"\n this.M = lil_matrix((this.mesh.Ns, this.mesh.Ns))\n \n for p in range(0, this.mesh.Nt):\n for i in range(0, 3):\n I = this.mesh.Triangles[p].sommets[i]\n for j in range(0, 3):\n J = this.mesh.Triangles[p].sommets[j]\n if i == j : # 2\n this.M[I-1,J-1] += this.mesh.aire_element(p+1)/6.0\n else: # 1\n this.M[I-1,J-1] += this.mesh.aire_element(p+1)/12.0\n \n #this.M=this.M.tocsr() \n return this.M\n\n def matrice_rigidite(this):\n this.D = lil_matrix((this.mesh.Ns, this.mesh.Ns))\n \n for p in range(0, this.mesh.Nt):\n B = this.mesh.matrice_B(p)\n bTb = np.dot(np.transpose(B),B)\n\n for i in range(0, 3):\n I = this.mesh.Triangles[p].sommets[i]\n for j in range(0, 3):\n J = this.mesh.Triangles[p].sommets[j]\n this.D[I-1,J-1] += (this.mesh.aire_element(p+1) ) * np.dot( np.transpose(this.mesh.grad_phi_ref[j]) ,np.dot(bTb, this.mesh.grad_phi_ref[i]))\n \n #this.D=this.D.tocsr()\n return this.D\n \n\n \"\"\"\n Matrix A's calculation: \n \"\"\"\n def matrice_A(this):\n # this.A = this.M + this.D\n #this.A = lil_matrix((this.mesh.Ns, this.mesh.Ns))#, dtype = np.complex)\n\n this.A= lil_matrix(this.M + this.coeff_d*this.dt*this.D)\n print(\"A[i,i]\",this.M[1,1]+ this.dt*this.D[1,1])\n print(\"A[i,j] vrai\",this.A[1,1])\n \n ' condition dirichlet bord'\n for id_s in this.mesh.Nodes_bords[1]: # Gauche\n this.A[int(id_s) -1,:] = 0\n this.A[int(id_s) -1,int(id_s) -1] = 1\n \n for id_s in this.mesh.Nodes_bords[2]: # Droite\n this.A[int(id_s) -1,:] = 0\n this.A[int(id_s) -1,int(id_s) -1] = 1\n\n return this.A\n \n \n def vector_b(this):\n \n this.b=np.dot(this.M.toarray(),this.Uold)\n\n ' Condition dirichlet '\n for id_s in this.mesh.Nodes_bords[1]: #bord Gauche\n this.b[id_s-1] = 10\n\n for id_s in this.mesh.Nodes_bords[2]: #bord Droit\n this.b[id_s-1] = 0\n\n ' Condition neumann bord int fonction constante'\n# for p in range(0,this.b_int_size):\n# taille=this.aire_seg(p+1,2)\n# p1=this.Bord_exts[p].sommets[0]\n# p2=this.Bord_exts[p].sommets[1]\n# this.b[p1]+=taille*this.u_inc()\n# this.b[p2]+=taille*this.u_inc()\n \n return this.b\n\n def vector_U(this):\n this.vector_b()\n this.U = spsolve(this.A.tocsr(), this.b)\n this.Uold=this.U\n \n return this.U\n \n def maj_matrices(this):\n this.matrice_mass()\n this.matrice_rigidite()\n this.matrice_A()\n this.vector_b()\n this.vector_U()\n return", "sub_path": "diff_instationnaire/convergence_test/matrices_FE.py", "file_name": "matrices_FE.py", "file_ext": "py", "file_size_in_byte": 3775, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "scipy.sparse.lil_matrix", "line_number": 39, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 65, "usage_type": "call"}, {"api_name": "scipy.sparse.lil_matrix", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 96, "usage_type": "call"}, {"api_name": "scipy.sparse.linalg.spsolve", "line_number": 117, "usage_type": "call"}]} +{"seq_id": "620893612", "text": "import time\nimport pickle\n\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Categorical, Normal\nfrom stable_baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer\nfrom stable_baselines.common.vec_env import VecNormalize\n\nfrom environments.utils import makeEnv\nfrom rl_baselines.base_classes import BaseRLObject\nfrom rl_baselines.utils import WrapFrameStack, CustomDummyVecEnv, loadRunningAverage, MultiprocessSRLModel\nfrom rl_baselines.models.sac_models import MLPPolicy, MLPQValueNetwork, MLPValueNetwork, NatureCNN\nfrom state_representation.episode_saver import LogRLStates\nfrom srl_zoo.utils import printYellow\n\n\ndef l2Loss(tensor):\n \"\"\"\n L2 loss given a tensor\n :param tensor: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return (tensor.float() ** 2).mean()\n\n\ndef toTensor(arr, device):\n \"\"\"\n Returns a pytorch Tensor object from a numpy array\n :param arr: (numpy array)\n :param device: (th.device)\n :return: (Tensor)\n \"\"\"\n return th.from_numpy(arr).to(device)\n\n\ndef detachToNumpy(tensor):\n \"\"\"\n Gets a pytorch tensor and returns a numpy array\n Detach creates a new Tensor,\n detached from the current graph whose node will never require gradient.\n :param tensor: (th.Tensor)\n :return: (numpy float)\n \"\"\"\n return tensor.to(th.device('cpu')).detach().numpy()\n\n\ndef softUpdate(*, source, target, factor):\n \"\"\"\n Update (softly) the weights of target network towards the weights of a source network.\n The amount of change is regulated by a factor.\n :param source: (Pytorch Model)\n :param target: (Pytorch Model)\n :param factor: (float) soft update factor in [0, 1]\n \"\"\"\n for source_param, target_param in zip(source.parameters(), target.parameters()):\n target_param.data.copy_(\n source_param.data * factor + target_param.data * (1.0 - factor)\n )\n\n\ndef hardUpdate(*, source, target):\n \"\"\"\n Copy the weights from source network to target network\n :param source: (Pytorch Model)\n :param target: (Pytorch Model)\n \"\"\"\n for source_param, target_param in zip(source.parameters(), target.parameters()):\n target_param.data.copy_(source_param.data)\n\n\ndef channelFirst(tensor):\n \"\"\"\n Permute the dimension to match pytorch convention\n for images (BCHW: Batch x Channel x Height x Width).\n :param tensor: (th.Tensor)\n :return: (th.Tensor)\n \"\"\"\n return tensor.permute(0, 3, 1, 2)\n\n\nclass SACModel(BaseRLObject):\n \"\"\"\n Class containing an implementation of soft actor critic\n Note: the policy with CNN on raw pixels is currenlty slow (5 FPS)\n Also, one difference with the paper is that the policy for continuous actions\n is a gaussian and not a mixture of gaussians.\n \"\"\"\n\n def __init__(self):\n super(SACModel, self).__init__()\n self.device = None\n self.cuda = False\n self.policy_net, self.q_value_net, self.value_net, self.target_value_net = None, None, None, None\n self.deterministic = False # Only available during testing\n self.continuous_actions = False\n self.encoder_net = None\n self.using_images = False\n # Min and max value for the std of the gaussian policy\n self.log_std_min = -20\n self.log_std_max = 2\n\n def save(self, save_path, _locals=None):\n assert self.policy_net is not None, \"Error: must train or load model before use\"\n with open(save_path, \"wb\") as f:\n pickle.dump(self.__getstate__(), f)\n # Move networks back to the right device\n self.__setstate__(self.__dict__)\n\n @classmethod\n def load(cls, load_path, args=None):\n with open(load_path, \"rb\") as f:\n class_dict = pickle.load(f)\n loaded_model = SACModel()\n loaded_model.__dict__ = class_dict\n return loaded_model\n\n @classmethod\n def makeEnv(cls, args, env_kwargs=None, load_path_normalise=None):\n # Even though SAC is single core only, we need to use the pipe system to work\n if env_kwargs is not None and env_kwargs.get(\"use_srl\", False):\n srl_model = MultiprocessSRLModel(1, args.env, env_kwargs)\n env_kwargs[\"state_dim\"] = srl_model.state_dim\n env_kwargs[\"srl_pipe\"] = srl_model.pipe\n\n env = CustomDummyVecEnv([makeEnv(args.env, args.seed, 0, args.log_dir, env_kwargs=env_kwargs)])\n\n if args.srl_model != \"raw_pixels\":\n env = VecNormalize(env, norm_obs=True, norm_reward=False)\n env = loadRunningAverage(env, load_path_normalise=load_path_normalise)\n\n # Normalize only raw pixels\n # WARNING: when using framestacking, the memory used by the replay buffer can grow quickly\n return WrapFrameStack(env, args.num_stack, normalize=args.srl_model == \"raw_pixels\")\n\n def customArguments(self, parser):\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disable cuda for the neural network')\n parser.add_argument('--buffer-size', type=int, default=int(1e3), help=\"Replay buffer size\")\n parser.add_argument('-lr', '--learning-rate', type=float, default=3e-4, help=\"Learning rate\")\n parser.add_argument('--gamma', type=float, default=0.99, help=\"Discount factor\")\n parser.add_argument('--w-reg', type=float, default=1e-3, help=\"Weight for policy network regularization\")\n parser.add_argument('--soft-update-factor', type=float, default=1e-2,\n help=\"Rate for updating target net weights\")\n parser.add_argument('--print-freq', type=int, default=500, help=\"Print Frequency (every n steps)\")\n parser.add_argument('--batch-size', type=int, default=128, help=\"Minibatch size for each gradient update\")\n parser.add_argument('--gradient-steps', type=int, default=1, help=\"How many gradient update after each step\")\n parser.add_argument('--reward-scale', type=float, default=1.0,\n help=\"Scaling factor for raw reward. (entropy factor)\")\n parser.add_argument('--log-states', action='store_true', default=False,\n help='Log the states encountered during RL training (only valid with SRL models)')\n return parser\n\n def moveToDevice(self, device, d):\n \"\"\"\n Move the different networks to a given device (cpu|cuda)\n :param device: (th.device)\n :param d: (dict) the class dictionnary\n \"\"\"\n keys = ['value_net', 'target_value_net', 'q_value_net', 'policy_net']\n if self.using_images:\n keys += ['encoder_net']\n\n for key in keys:\n d[key] = d[key].to(device)\n\n # used to prevent pickling of pytorch device object, as they cannot be pickled\n def __getstate__(self):\n d = self.__dict__.copy()\n self.moveToDevice(th.device('cpu'), d)\n\n if 'device' in d:\n d['device'] = 'cpu'\n return d\n\n # restore torch device from a pickle using the same config, if cuda is available\n def __setstate__(self, d):\n if 'device' in d:\n d['device'] = th.device(\"cuda\" if th.cuda.is_available() and d['cuda'] else \"cpu\")\n\n self.moveToDevice(d['device'], d)\n self.__dict__.update(d)\n\n def sampleAction(self, obs):\n \"\"\"\n Sample action from Normal or Categorical distribution\n (continuous vs discrete actions) and return the log probability\n + policy parameters for regularization\n :param obs: (th.Tensor)\n :return: (tuple(th.Tensor))\n \"\"\"\n if self.continuous_actions:\n mean_policy, log_std = self.policy_net(obs)\n # Clip the value of the standard deviation\n log_std = th.clamp(log_std, self.log_std_min, self.log_std_max)\n std = th.exp(log_std)\n distribution = Normal(mean_policy, std)\n # Used only during testing\n if self.deterministic:\n pre_tanh_value = mean_policy\n else:\n pre_tanh_value = distribution.sample().detach()\n # Squash the value\n action = F.tanh(pre_tanh_value)\n # Correction to the log prob because of the squashing function\n epsilon = 1e-6\n log_pi = distribution.log_prob(pre_tanh_value) - th.log(1 - action ** 2 + epsilon)\n log_pi = log_pi.sum(-1, keepdim=True)\n else:\n mean_policy, log_std = self.policy_net(obs)\n # Here mean policy is the energy of each action\n distribution = Categorical(logits=mean_policy)\n if self.deterministic:\n action = th.argmax(F.softmax(mean_policy, dim=1), dim=1)\n else:\n action = distribution.sample().detach()\n # Only valid for continuous actions\n pre_tanh_value = action * 0.0\n log_std = log_std * 0.0\n log_pi = distribution.log_prob(action).unsqueeze(1)\n\n return action, log_pi, pre_tanh_value, mean_policy, log_std\n\n def encodeObservation(self, obs):\n \"\"\"\n Convert observation to pytorch tensor\n and encode it (extract features) if needed using a CNN\n :param obs:(numpy array)\n :return: (th.Tensor)\n \"\"\"\n obs = self.toFloatTensor(obs)\n if self.using_images:\n obs = self.encoder_net(channelFirst(obs))\n return obs\n\n def getActionProba(self, obs, dones=None):\n \"\"\"\n Returns the action probability for the given observation\n :param obs: (numpy array)\n :param dones: ([bool])\n :return: (numpy float) the action probability\n \"\"\"\n with th.no_grad():\n obs = self.encodeObservation(obs)\n mean_policy, _ = self.policy_net(obs)\n\n if self.continuous_actions:\n # In the case of continuous action\n # we return the mean of the gaussian policy\n # instead of probability\n action = mean_policy\n else:\n # In the case of discrete actions\n # mean_policy correspond to the energy|logits for each action\n # we need to apply a softmax in order to get a probability\n action = F.softmax(mean_policy, dim=-1)\n return detachToNumpy(action)\n\n def getAction(self, obs, dones=None):\n \"\"\"\n From an observation returns the associated action\n :param obs: (numpy array)\n :param dones: ([bool])\n :return: (numpy float)\n \"\"\"\n with th.no_grad():\n obs = self.encodeObservation(obs)\n action, _, _, _, _ = self.sampleAction(obs)\n\n return detachToNumpy(action)[0]\n\n def toFloatTensor(self, x):\n \"\"\"\n Convert a numpy array to a torch float tensor\n :param x: (np.array)\n :return: (th.Tensor)\n \"\"\"\n return toTensor(x, self.device).float()\n\n @classmethod\n def getOptParam(cls):\n return {\n \"learning_rate\": (float, (1e-2, 1e-5)),\n \"gamma\": (float, (0, 1)),\n \"w_reg\": (float, (0, 1)),\n \"soft_update_factor\": (float, (0, 1)),\n \"batch_size\": (int, (32, 256)),\n \"gradient_step\": (int, (1, 10)),\n \"reward_scale\": (float, (0, 100))\n }\n\n def train(self, args, callback, env_kwargs=None, train_kwargs=None):\n env = self.makeEnv(args, env_kwargs=env_kwargs)\n\n # set hyperparameters\n args.__dict__.update(train_kwargs)\n\n self.cuda = th.cuda.is_available() and not args.no_cuda\n self.device = th.device(\"cuda\" if self.cuda else \"cpu\")\n self.using_images = args.srl_model == \"raw_pixels\"\n\n assert not (args.log_states and self.using_images), \"SRL logger can only be used with SRL models\"\n\n if args.log_states:\n srl_logger = LogRLStates(args.log_dir)\n else:\n srl_logger = None\n\n self.continuous_actions = args.continuous_actions\n\n if args.continuous_actions:\n action_space = np.prod(env.action_space.shape)\n else:\n action_space = env.action_space.n\n\n if args.srl_model != \"raw_pixels\":\n input_dim = np.prod(env.observation_space.shape)\n else:\n n_channels = env.observation_space.shape[-1]\n # We use an additional CNN when using images\n # to extract features\n self.encoder_net = NatureCNN(n_channels).to(self.device)\n input_dim = 512 # output dim of the encoder net\n\n self.policy_net = MLPPolicy(input_dim, action_space).to(self.device)\n self.q_value_net = MLPQValueNetwork(input_dim, action_space, args.continuous_actions).to(self.device)\n self.value_net = MLPValueNetwork(input_dim).to(self.device)\n self.target_value_net = MLPValueNetwork(input_dim).to(self.device)\n\n # Make sure target net has the same weights as value_net\n hardUpdate(source=self.value_net, target=self.target_value_net)\n\n value_criterion = nn.MSELoss()\n q_value_criterion = nn.MSELoss()\n\n replay_buffer = ReplayBuffer(args.buffer_size)\n\n policy_optimizer = th.optim.Adam(self.policy_net.parameters(), lr=args.learning_rate)\n value_optimizer = th.optim.Adam(self.value_net.parameters(), lr=args.learning_rate)\n q_optimizer = th.optim.Adam(self.q_value_net.parameters(), lr=args.learning_rate)\n\n obs = env.reset()\n start_time = time.time()\n if srl_logger is not None:\n srl_logger.reset(obs, env.getOriginalObs())\n\n for step in range(args.num_timesteps):\n action = self.getAction(obs[None])\n new_obs, reward, done, info = env.step(action)\n # Log states\n if srl_logger is not None:\n srl_logger.step(new_obs, env.getOriginalObs(), action, reward, done)\n\n # Fill the replay buffer\n replay_buffer.add(obs, action, reward, new_obs, float(done))\n obs = new_obs\n\n # Callback for plotting and saving best model\n if callback is not None:\n callback(locals(), globals())\n\n if done:\n obs = env.reset()\n if srl_logger is not None:\n srl_logger.reset(obs, env.getOriginalObs())\n # Update the different networks\n for _ in range(args.gradient_steps):\n # Check that there is enough data in the buffer replay\n if step < args.batch_size:\n break\n\n # Sample a minibatch from the replay buffer\n batch_obs, actions, rewards, batch_next_obs, dones = map(lambda x: self.toFloatTensor(x),\n replay_buffer.sample(args.batch_size))\n\n if self.using_images:\n # Extract features from the images\n batch_obs = self.encoder_net(channelFirst(batch_obs))\n batch_next_obs = self.encoder_net(channelFirst(batch_next_obs))\n\n rewards = rewards.unsqueeze(1)\n dones = dones.unsqueeze(1)\n\n value_pred = self.value_net(batch_obs)\n q_value = self.q_value_net(batch_obs, actions)\n # Sample actions and retrieve log proba\n # pre_tanh_value, mean_policy and log_std are only used for regularization\n new_actions, log_pi, pre_tanh_value, mean_policy, log_std = self.sampleAction(batch_obs)\n\n # Q-Value function loss\n target_value_pred = self.target_value_net(batch_next_obs)\n # TD error with reward scaling\n next_q_value = args.reward_scale * rewards + (1 - dones) * args.gamma * target_value_pred.detach()\n loss_q_value = 0.5 * q_value_criterion(q_value, next_q_value.detach())\n\n # Value Function loss\n q_value_new_actions = self.q_value_net(batch_obs, new_actions)\n next_value = q_value_new_actions - log_pi\n loss_value = 0.5 * value_criterion(value_pred, next_value.detach())\n\n # Policy Loss\n # why not log_pi.exp_() ?\n loss_policy = (log_pi * (log_pi - q_value_new_actions + value_pred).detach()).mean()\n # Regularization\n if self.continuous_actions:\n loss_policy += args.w_reg * sum(map(l2Loss, [mean_policy, log_std]))\n\n q_optimizer.zero_grad()\n # Retain graph if we are using a CNN for extracting features\n loss_q_value.backward(retain_graph=self.using_images)\n q_optimizer.step()\n\n value_optimizer.zero_grad()\n loss_value.backward(retain_graph=self.using_images)\n value_optimizer.step()\n\n policy_optimizer.zero_grad()\n loss_policy.backward()\n policy_optimizer.step()\n\n # Softly update target value_pred network\n softUpdate(source=self.value_net, target=self.target_value_net, factor=args.soft_update_factor)\n\n if (step + 1) % args.print_freq == 0:\n print(\"{} steps - {:.2f} FPS\".format(step, step / (time.time() - start_time)))\n", "sub_path": "rl_baselines/rl_algorithm/sac.py", "file_name": "sac.py", "file_ext": "py", "file_size_in_byte": 17398, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "torch.from_numpy", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 47, "usage_type": "call"}, {"api_name": "rl_baselines.base_classes.BaseRLObject", "line_number": 84, "usage_type": "name"}, {"api_name": "pickle.dump", "line_number": 108, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 115, "usage_type": "call"}, {"api_name": "rl_baselines.utils.MultiprocessSRLModel", "line_number": 124, "usage_type": "call"}, {"api_name": "rl_baselines.utils.CustomDummyVecEnv", "line_number": 128, "usage_type": "call"}, {"api_name": "environments.utils.makeEnv", "line_number": 128, "usage_type": "call"}, {"api_name": "stable_baselines.common.vec_env.VecNormalize", "line_number": 131, "usage_type": "call"}, {"api_name": "rl_baselines.utils.loadRunningAverage", "line_number": 132, "usage_type": "call"}, {"api_name": "rl_baselines.utils.WrapFrameStack", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 181, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 181, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 197, "usage_type": "call"}, {"api_name": "torch.exp", "line_number": 198, "usage_type": "call"}, {"api_name": "torch.distributions.Normal", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.nn.functional.tanh", "line_number": 206, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 206, "usage_type": "name"}, {"api_name": "torch.log", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.distributions.Categorical", "line_number": 214, "usage_type": "call"}, {"api_name": "torch.argmax", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 216, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 245, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 258, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 268, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 300, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 300, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 301, "usage_type": "call"}, {"api_name": "state_representation.episode_saver.LogRLStates", "line_number": 307, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 314, "usage_type": "call"}, {"api_name": "numpy.prod", "line_number": 319, "usage_type": "call"}, {"api_name": "rl_baselines.models.sac_models.NatureCNN", "line_number": 324, "usage_type": "call"}, {"api_name": "rl_baselines.models.sac_models.MLPPolicy", "line_number": 327, "usage_type": "call"}, {"api_name": "rl_baselines.models.sac_models.MLPQValueNetwork", "line_number": 328, "usage_type": "call"}, {"api_name": "rl_baselines.models.sac_models.MLPValueNetwork", "line_number": 329, "usage_type": "call"}, {"api_name": "rl_baselines.models.sac_models.MLPValueNetwork", "line_number": 330, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 335, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 335, "usage_type": "name"}, {"api_name": "torch.nn.MSELoss", "line_number": 336, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 336, "usage_type": "name"}, {"api_name": "stable_baselines.deepq.replay_buffer.ReplayBuffer", "line_number": 338, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 340, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 340, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 341, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 341, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 342, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 342, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 345, "usage_type": "call"}, {"api_name": "time.time", "line_number": 427, "usage_type": "call"}]} +{"seq_id": "140765024", "text": "#!/usr/bin/env python\r\n# -*- coding: UTF-8 -*-\r\n\r\nfrom __future__ import print_function\r\n\r\nimport requests\r\nimport shelve\r\nimport time\r\nimport os\r\nimport re\r\n\r\nclass websitecrawler(object):\r\n \r\n tar_prefix=\"http://science.sciencemag.org\"\r\n target=\"http://science.sciencemag.org/content/sci/%s/%s/\"\r\n tar_content=\"http://science.sciencemag.org/content/%s/%s\"\r\n url_source_cur=\"http://science.sciencemag.org/content/current\"\r\n url_list=\"http://science.sciencemag.org/content/by/year/%d\"\r\n format_dict_volissue=r\">Vol (.*), Iss (.*)<\"\r\n format_dumb_filename=(\".full.\", \".\")\r\n format_url_vol_issue=\"content/([^/]*)/([^/]*)\"\r\n year_first=1880\r\n num_issue_per_vol=13\r\n num_issue_1stinvol_known=(6415,362)\r\n RESPONSE_FAILED=\"failed\"\r\n dir_dat='data\\\\'\r\n filename_dat='sciencemag'\r\n path_dat='data\\\\sciencemag' #auto created in __init__\r\n dict_volissue={}\r\n \r\n def __init__(self):\r\n self.path_dat=self.dir_dat+self.filename_dat\r\n self.load_data()\r\n \r\n def __enter__(self):\r\n self.__init__()\r\n return self\r\n \r\n def __exit__(self, type, value, trace):\r\n self.save_data()\r\n \r\n #function\r\n def save_data(self):\r\n if(not os.path.exists(self.dir_dat)):\r\n os.makedirs(os.path.dirname(self.dir_dat)) \r\n dbase = shelve.open(self.path_dat)\r\n dbase['dict_volissue']=self.dict_volissue\r\n dbase.close()\r\n \r\n def load_data(self):\r\n if(os.path.exists(self.path_dat+'.dat')):\r\n dbase = shelve.open(self.path_dat) \r\n #len(dbase)\r\n self.dict_volissue=dbase['dict_volissue']\r\n \r\n def get_year_cur(self):\r\n return int(time.strftime('%Y',time.localtime()))\r\n \r\n def get_filename_webpath(self,text):\r\n index=text.rindex('/')+1\r\n return text[index:len(text)]\r\n \r\n def filename_cln(self,text):\r\n return text.replace(self.format_dumb_filename[0], self.format_dumb_filename[1])\r\n \r\n def get_vol_issue(self,url):\r\n result=re.findall(self.format_url_vol_issue,url)\r\n if(len(result)==0):\r\n return (-1,-1)\r\n (vol,issue)=result[len(result)-1]\r\n return (vol,issue)\r\n \r\n def get_vol_issue_cur(self):\r\n source_url=requests.get(self.url_source_cur).url\r\n return self.get_vol_issue(source_url)\r\n \r\n def get_text(self,url):\r\n source=requests.get(url)\r\n if(source.status_code == requests.codes.ok):\r\n return source.text\r\n else:\r\n return self.RESPONSE_FAILED\r\n \r\n def get_text_volissue(self,vol,issue):\r\n source_url=self.tar_content%(str(vol),str(issue))\r\n return self.get_text(source_url)\r\n \r\n def get_text_cur(self):\r\n return self.get_text(self.url_source_cur)\r\n\r\n #outdated\r\n def get_tarloc_for_page(self,vol,issue,local):\r\n tar_page=self.target%(str(vol),str(issue))+\"%d.full.pdf\"\r\n loc_page=local%(str(vol),str(issue))+\"%d.pdf\"\r\n return (tar_page,loc_page)\r\n \r\n #main download module\r\n #returns string\r\n def down_direct(self,url,dest):\r\n if(os.path.exists(dest)):\r\n return 'already exist'\r\n if(not os.path.exists(os.path.dirname(dest))):\r\n os.makedirs(os.path.dirname(dest)) \r\n try:\r\n r=requests.get(url,stream=True, timeout=(10,20))\r\n # timer = Timer(interval, time_out)\r\n if(r.status_code == requests.codes.ok):\r\n# timer.start()\r\n# res = func(*args, **kwargs)\r\n# timer.cancel()\r\n with open(dest,\"wb\") as f:\r\n f.write(r.content)\r\n else:\r\n return str(r.status_code)\r\n except (requests.exceptions.ReadTimeout,\r\n requests.exceptions.ConnectionError,\r\n requests.exceptions.ChunkedEncodingError\r\n ) as e:\r\n if(os.path.exists(dest)):\r\n os.remove(dest)\r\n return str(e)\r\n else:\r\n pass\r\n return 'finished'\r\n \r\n def down_page(self,tar_page,loc_page,page):\r\n dest=loc_page%(page)\r\n if(os.path.exists(dest)):\r\n return True\r\n url=tar_page%(page)\r\n return self.down_direct(url,dest)\r\n \r\n def down_searchbywebcontent_findnext(self,text,index):\r\n temp=text.find('title=\"PDF\"',index)\r\n if(temp==-1): \r\n return (False,'',-1)\r\n left=text.rfind(' loading checkpoint '{}'\".format(self.args.ckpt_latest_path))\n s_epoch, global_step, best_score, optim_dict, model_dict = load_checkpoint(self.args, is_distributed=True)\n self.global_step = global_step\n self.optimizer.load_state_dict(optim_dict)\n self.model.load_state_dict(model_dict, strict=True)\n del optim_dict\n del model_dict\n print(\"=> loaded checkpoint '{}'\".format(self.args.ckpt_latest_path))\n for epoch in range(s_epoch, self.args.max_epochs):\n self.train(epoch)\n mIoU = self.validate(epoch)\n self.lr_scheduler.step(mIoU)\n if dist.get_rank() == 0:\n checkpoint = {\n 'epoch': epoch + 1,\n 'global_step': self.global_step + 1,\n 'best_score': best_score,\n 'model_dict': self.model.state_dict(),\n 'optim_dict': self.optimizer.state_dict(),\n }\n torch.save(checkpoint, self.args.ckpt_latest_path)\n if mIoU > best_score:\n best_score = mIoU\n torch.save(self.model.state_dict(), self.args.ckpt_best_path)\n\n def train(self, epoch):\n self.intersection_meter.reset()\n self.union_meter.reset()\n self.target_meter.reset()\n self.model.train()\n self.train_sampler.set_epoch(epoch)\n max_iter = self.args.max_epochs * len(self.train_loader)\n for step, batch in enumerate(self.train_loader):\n input, target = batch[0].cuda(), batch[1].cuda()\n if self.args.zoom_factor != 8:\n h = int((target.size()[1] - 1) / 8 * self.args.zoom_factor + 1)\n w = int((target.size()[2] - 1) / 8 * self.args.zoom_factor + 1)\n # 'nearest' mode doesn't support align_corners mode and 'bilinear' mode is fine for downsampling\n target = F.interpolate(target.unsqueeze(1).float(), size=(h, w), mode='bilinear', align_corners=True).squeeze(1).long()\n output, aux = self.model(input)\n main_loss = self.criterion(output, target)\n aux_loss = self.criterion(aux, target)\n loss = main_loss + self.args.aux_weight * aux_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n current_iter = epoch * len(self.train_loader) + step + 1\n current_lr = poly_learning_rate(self.args.base_lr, current_iter, max_iter, power=self.args.power)\n for idx in range(self.index_split):\n self.optimizer.param_groups[idx]['lr'] = current_lr\n for idx in range(self.index_split, len(self.optimizer.param_groups)):\n self.optimizer.param_groups[idx]['lr'] = current_lr * 10\n\n batch_size = input.size(0)\n main_loss, aux_loss, loss = [l * batch_size for l in [main_loss, aux_loss, loss]]\n count = target.new_tensor([batch_size], dtype=torch.long)\n dist.all_reduce(main_loss), dist.all_reduce(aux_loss), dist.all_reduce(loss), dist.all_reduce(count)\n main_loss, aux_loss, loss = main_loss / count.item(), aux_loss / count.item(), loss / count.item()\n\n output = torch.argmax(output, dim=1)\n intersection, union, target = intersectionAndUnionGPU(output, target, self.args.num_classes, self.args.ignore_label)\n dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target)\n intersection, union, target = [t.cpu().numpy() for t in [intersection, union, target]]\n self.intersection_meter.update(intersection)\n self.union_meter.update(union)\n self.target_meter.update(target)\n\n accuracy = sum(intersection) / (sum(target) + 1e-10)\n main_loss, aux_loss, loss = [l.cpu().detach().item() for l in [main_loss, aux_loss, loss]]\n if dist.get_rank() == 0 and self.global_step % self.args.print_freq == 0:\n print(f'Train Epoch: [{epoch}][{step}/{len(self.train_loader)}] MainLoss {main_loss:.4f} AuxLoss {aux_loss:4f} Loss {loss:.4f} Accuracy {accuracy:.4f}')\n self.writer.add_scalar('train/loss', loss, self.global_step)\n self.writer.add_scalar('train/mIoU', np.mean(intersection / (union + 1e-10)), self.global_step)\n self.writer.add_scalar('train/mAcc', np.mean(intersection / (target + 1e-10)), self.global_step)\n self.writer.add_scalar('train/aAcc', accuracy, self.global_step)\n self.writer.add_scalar('train/lr', current_lr, self.global_step)\n \n self.global_step += 1\n\n iou_class = self.intersection_meter.sum / (self.union_meter.sum + 1e-10)\n accuracy_class = self.intersection_meter.sum / (self.target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n aAcc = sum(self.intersection_meter.sum) / (sum(self.target_meter.sum) + 1e-10)\n if dist.get_rank() == 0:\n print(f'Train result at Epoch [{epoch}]: mIoU {mIoU:.4f} mAcc {mAcc:.4f} aAcc {aAcc:.4f}')\n\n def validate(self, epoch):\n self.model.eval()\n self.loss_meter.reset()\n self.intersection_meter.reset()\n self.union_meter.reset()\n self.target_meter.reset()\n with torch.no_grad():\n for _, batch in enumerate(self.val_loader):\n input = batch[0].cuda(non_blocking=True)\n target = batch[1].cuda(non_blocking=True)\n output = self.model(input)\n if self.args.zoom_factor != 8:\n output = F.interpolate(output, size=target.size()[1:], mode='bilinear', align_corners=True)\n loss = self.criterion(output, target)\n\n batch_size = input.size(0)\n loss *= batch_size\n count = target.new_tensor([batch_size], dtype=torch.long)\n dist.all_reduce(loss), dist.all_reduce(count)\n loss = loss / count.item()\n self.loss_meter.update(loss.cpu().detach().item())\n\n output = torch.argmax(output, dim=1)\n intersection, union, target = intersectionAndUnionGPU(output, target, self.args.num_classes, self.args.ignore_label)\n dist.all_reduce(intersection), dist.all_reduce(union), dist.all_reduce(target)\n self.intersection_meter.update(intersection.cpu().numpy())\n self.union_meter.update(union.cpu().numpy())\n self.target_meter.update(target.cpu().numpy())\n\n loss = self.loss_meter.compute()\n iou_class = self.intersection_meter.sum / (self.union_meter.sum + 1e-10)\n accuracy_class = self.intersection_meter.sum / (self.target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n aAcc = sum(self.intersection_meter.sum) / (sum(self.target_meter.sum) + 1e-10)\n if dist.get_rank() == 0:\n print(f'Validation result at Epoch [{epoch}]: mIoU {mIoU:.4f} mAcc {mAcc:.4f} aAcc {aAcc:.4f} Loss {loss:.4f}')\n self.writer.add_scalar('val/loss', loss, epoch)\n self.writer.add_scalar('val/mIoU', mIoU, epoch)\n self.writer.add_scalar('val/mAcc', mAcc, epoch)\n self.writer.add_scalar('val/aAcc', aAcc, epoch)\n\n return mIoU\n\ndef main_worker(local_rank, gpu_list, args):\n os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list[local_rank]\n dist.init_process_group(backend=DIST_BACKEND, init_method=DIST_INIT_METHOD, world_size=len(gpu_list), rank=local_rank)\n trainer = Trainer(args)\n trainer.run()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')\n parser.add_argument('--config', type=str, default='configs/cityscapes_pspnet101.yaml')\n parser.add_argument('-r', '--resume', action='store_true', help='resume training')\n args = parser.parse_args()\n yaml.add_constructor('!join', lambda loader, node: os.path.join(*loader.construct_sequence(node)))\n with open(args.config, 'r') as fr:\n cfg = yaml.load(fr)\n cfg['resume'] = args.resume\n args = Namespace(**cfg)\n print(args)\n check_makedirs(args.ckpt_dir)\n check_makedirs(args.log_dir)\n \n gpu_list = list(map(str, range(torch.cuda.device_count())))\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n gpu_list = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n\n mp.spawn(main_worker, args=(gpu_list, args,), nprocs=len(gpu_list))", "sub_path": "train_dist.py", "file_name": "train_dist.py", "file_ext": "py", "file_size_in_byte": 12937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "tensorboardX.SummaryWriter", "line_number": 48, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 49, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 49, "usage_type": "name"}, {"api_name": "models.pspnet.PSPNet", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler.ReduceLROnPlateau", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.nn.SyncBatchNorm", "line_number": 62, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 62, "usage_type": "name"}, {"api_name": "torch.nn.parallel.DistributedDataParallel", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.parallel", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 63, "usage_type": "name"}, {"api_name": "lib.transform.Compose", "line_number": 67, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 67, "usage_type": "name"}, {"api_name": "lib.transform.RandScale", "line_number": 68, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 68, "usage_type": "name"}, {"api_name": "lib.transform.RandRotate", "line_number": 69, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 69, "usage_type": "name"}, {"api_name": "lib.transform.RandomGaussianBlur", "line_number": 70, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 70, "usage_type": "name"}, {"api_name": "lib.transform.RandomHorizontalFlip", "line_number": 71, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 71, "usage_type": "name"}, {"api_name": "lib.transform.Crop", "line_number": 72, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 72, "usage_type": "name"}, {"api_name": "lib.transform.ToTensor", "line_number": 73, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 73, "usage_type": "name"}, {"api_name": "lib.transform.Normalize", "line_number": 74, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 74, "usage_type": "name"}, {"api_name": "lib.dataset.SegmentationDataset", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data.distributed.DistributedSampler", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 78, "usage_type": "call"}, {"api_name": "lib.transform.Compose", "line_number": 80, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 80, "usage_type": "name"}, {"api_name": "lib.transform.Crop", "line_number": 81, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 81, "usage_type": "name"}, {"api_name": "lib.transform.ToTensor", "line_number": 82, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 82, "usage_type": "name"}, {"api_name": "lib.transform.Normalize", "line_number": 83, "usage_type": "call"}, {"api_name": "lib.transform", "line_number": 83, "usage_type": "name"}, {"api_name": "lib.dataset.SegmentationDataset", "line_number": 85, "usage_type": "call"}, {"api_name": "torch.utils.data.distributed.DistributedSampler", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.distributed.get_rank", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.save", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.long", "line_number": 155, "usage_type": "attribute"}, {"api_name": "torch.distributed.all_reduce", "line_number": 156, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 156, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.distributed.all_reduce", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.distributed.get_rank", "line_number": 169, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 169, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 172, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.distributed.get_rank", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 184, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.nn.functional.interpolate", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.long", "line_number": 204, "usage_type": "attribute"}, {"api_name": "torch.distributed.all_reduce", "line_number": 205, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 205, "usage_type": "name"}, {"api_name": "torch.argmax", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.distributed.all_reduce", "line_number": 211, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 211, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 219, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 220, "usage_type": "call"}, {"api_name": "torch.distributed.get_rank", "line_number": 222, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 222, "usage_type": "name"}, {"api_name": "os.environ", "line_number": 232, "usage_type": "attribute"}, {"api_name": "torch.distributed.init_process_group", "line_number": 233, "usage_type": "call"}, {"api_name": "torch.distributed", "line_number": 233, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 238, "usage_type": "call"}, {"api_name": "yaml.add_constructor", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 242, "usage_type": "call"}, {"api_name": "os.path", "line_number": 242, "usage_type": "attribute"}, {"api_name": "yaml.load", "line_number": 244, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 246, "usage_type": "call"}, {"api_name": "torch.cuda.device_count", "line_number": 251, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 251, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 252, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 253, "usage_type": "attribute"}, {"api_name": "torch.multiprocessing.spawn", "line_number": 255, "usage_type": "call"}, {"api_name": "torch.multiprocessing", "line_number": 255, "usage_type": "name"}]} +{"seq_id": "632858265", "text": "import pygame\r\nimport random\r\nimport math\r\nimport time\r\n#import numpy as np\r\nimport os\r\nx = 0\r\ny = 60\r\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (x,y)\r\n\r\npygame.init()\r\nwin = pygame.display.set_mode((1200,800))\r\n\r\npygame.display.set_caption(\"Self driving car\")\r\n\r\nx = 20\r\ny = 20\r\nwidth = 10\r\nheight = 10\r\nvel = 5\r\n\r\ndef map1(win):\r\n width = 40\r\n #pygame.draw.rect(win, (255,255,255),(0,0,1500,20))\r\n #pygame.draw.polygon (win, (255,255,255), [(0, 200), (200, 800), (0, 1550)])\r\n #pygame.draw.polygon (win, (255,255,255), [(200, 0), (500, 500), (900, 0)])\r\n #pygame.draw.polygon (win, (255,255,255), [(900, 800), (1550, 500), (1550, 800)])\r\n\r\n map = [[0,0,0,1,1,1,1,1,1,1,0,0],\r\n [0,0,0,1,1,1,1,1,1,1,0,0],\r\n [0,0,0,1,1,1,1,1,1,1,0,0],\r\n [1,0,0,0,1,1,1,1,1,0,0,0],\r\n [1,1,0,0,0,1,1,1,1,0,0,0],\r\n [1,1,1,0,0,0,1,1,1,0,0,0],\r\n [1,1,1,1,0,0,0,0,1,0,0,0],\r\n [1,1,1,1,1,0,0,0,0,0,0,0]]\r\n\r\n map_full = []\r\n for yy,i in enumerate(map):\r\n temp = []\r\n for xx,j in enumerate(i):\r\n if j == 1:\r\n pygame.draw.rect(win,(255,255,255),(xx*100,yy*100,100,100))\r\n for i in range(100):\r\n temp.append(1)\r\n else:\r\n pygame.draw.rect(win,(0,0,0),(xx*100,yy*100,100,100))\r\n for i in range(100):\r\n temp.append(0)\r\n\r\n for i in range(100):\r\n map_full.append(temp)\r\n \r\n return map_full\r\n \r\n\r\nclass Car:\r\n \r\n def __init__(self, x, y):\r\n self.pos_x = x\r\n self.pos_y = y\r\n self.init_x = x\r\n self.init_y = y\r\n self.velocity = 5\r\n self.size = 10\r\n self.fitness = 0\r\n self.alive = True\r\n self.color = (60, 130, 240)\r\n self.survivor = False\r\n self.win = False\r\n self.seq_counter = 0\r\n self.move_sequence = []\r\n '''\r\n sequence\r\n 1 = up\r\n 2 = down\r\n 3 = left\r\n 4 = right\r\n 5 = up left\r\n 6 = up right\r\n 7 = down left\r\n 8 = down right\r\n '''\r\n \r\n def up(self):\r\n self.pos_y -= self.velocity\r\n def down(self):\r\n self.pos_y += self.velocity\r\n def right(self):\r\n self.pos_x += self.velocity\r\n def left(self):\r\n self.pos_x -= self.velocity\r\n def set_survivor(self,s):\r\n self.survivor = s\r\n self.color = (0, 183, 180)\r\n def get_survivor(self):\r\n return self.survivor\r\n def add_fitness(self):\r\n self.fitness += 1\r\n def deduct_fitness(self):\r\n self.fitness -= 1\r\n def get_fitness(self):\r\n return self.fitness\r\n def update(self,win):\r\n # Seq is empty\r\n if self.alive == True and len(self.move_sequence)==0:\r\n seq = random.randint(1,8)\r\n if seq == 1:\r\n self.up()\r\n elif seq == 2:\r\n self.down()\r\n elif seq == 3:\r\n self.left()\r\n elif seq == 4:\r\n self.right()\r\n elif seq == 5:\r\n self.up()\r\n self.left()\r\n elif seq == 6:\r\n self.up()\r\n self.right()\r\n elif seq == 7:\r\n self.down()\r\n self.left()\r\n elif seq == 8:\r\n self.down()\r\n self.right()\r\n self.move_sequence.append(seq)\r\n self.seq_counter += 1\r\n # Run out of sequence\r\n elif self.alive == True and len(self.move_sequence) == self.seq_counter:\r\n seq = self.move_sequence[-1]\r\n for rep in range(2):\r\n if seq == 1:\r\n self.up()\r\n elif seq == 2:\r\n self.down()\r\n elif seq == 3:\r\n self.left()\r\n elif seq == 4:\r\n self.right()\r\n elif seq == 5:\r\n self.up()\r\n self.left()\r\n elif seq == 6:\r\n self.up()\r\n self.right()\r\n elif seq == 7:\r\n self.down()\r\n self.left()\r\n elif seq == 8:\r\n self.down()\r\n self.right()\r\n\r\n self.move_sequence.append(seq)\r\n self.seq_counter += 1\r\n # Going through sequence\r\n elif self.alive == True and len(self.move_sequence) > self.seq_counter:\r\n seq = self.move_sequence[self.seq_counter]\r\n for rep in range(2):\r\n if seq == 1:\r\n self.up()\r\n elif seq == 2:\r\n self.down()\r\n elif seq == 3:\r\n self.left()\r\n elif seq == 4:\r\n self.right()\r\n elif seq == 5:\r\n self.up()\r\n self.left()\r\n elif seq == 6:\r\n self.up()\r\n self.right()\r\n elif seq == 7:\r\n self.down()\r\n self.left()\r\n elif seq == 8:\r\n self.down()\r\n self.right()\r\n \r\n self.seq_counter += 1\r\n\r\n elif self.alive == True:\r\n self.seq_counter -= 1\r\n\r\n\r\n pygame.draw.rect(win, self.color, (self.pos_x,self.pos_y,self.size,self.size))\r\n\r\n\r\n def dead(self):\r\n self.alive = False\r\n self.color = (200,0,0)\r\n def edo_tensei(self):\r\n self.alive = True\r\n self.color = (60, 130, 240)\r\n self.seq_counter = 0\r\n temp = [1,2,3,4,5,6,7,8]\r\n try:\r\n last_move = self.move_sequence[-1]\r\n except:\r\n last_move = random.choice(temp)\r\n if last_move == 1:\r\n temp.remove(2)\r\n elif last_move == 2:\r\n temp.remove(1)\r\n elif last_move == 3:\r\n temp.remove(4)\r\n elif last_move == 4:\r\n temp.remove(3)\r\n elif last_move == 5:\r\n temp.remove(8)\r\n elif last_move == 6:\r\n temp.remove(7)\r\n elif last_move == 7:\r\n temp.remove(6)\r\n elif last_move == 8:\r\n temp.remove(5)\r\n choice = random.choice(temp)\r\n self.move_sequence.append(choice)\r\n self.move_sequence.append(choice)\r\n self.move_sequence.append(choice)\r\n self.move_sequence.append(choice)\r\n self.move_sequence.append(choice)\r\n def x(self):\r\n return self.pos_x\r\n def y(self):\r\n return self.pos_y\r\n def get_seq(self):\r\n return self.move_sequence\r\n def get_alive(self):\r\n return self.alive\r\n def set_seq(self,new_seq):\r\n self.move_sequence = new_seq\r\n self.pos_x = self.init_x\r\n self.pos_y = self.init_y\r\n def set_win(self,s):\r\n self.win = s\r\n def get_distance(self):\r\n distance = math.sqrt(((self.pos_x-self.init_x)+(self.pos_y-self.init_y))**2)\r\n return distance\r\n def get_distance_to_goal(self,x1,y1):\r\n distance = math.sqrt(((self.pos_x-x1)+(self.pos_y-y1))**2)\r\n return distance\r\n\r\ndef selection(cars,number_of_survivor):\r\n temp = []\r\n for car in cars:\r\n car.set_survivor(False)\r\n temp.append([car,car.get_fitness()])\r\n\r\n temp.sort(key = lambda temp: temp[1], reverse = True)\r\n temp2 = []\r\n for i in temp:\r\n temp2.append(i[0])\r\n fittest = temp2[0:4]\r\n for i in fittest:\r\n i.set_survivor(True)\r\n return fittest\r\n\r\ndef crossover(survivors, cars):\r\n # Get survivors sequence\r\n seqs = []\r\n for survivor in survivors:\r\n seqs.append(survivor.get_seq())\r\n\r\n number_of_survivor = len(survivors)\r\n c1 = number_of_survivor-1\r\n c2 = 0\r\n for i,car in enumerate(cars):\r\n car.set_seq(seqs[c2])\r\n c2 += 1\r\n if c2==c1:\r\n c2 = 0\r\n \r\n return cars\r\n\r\n\r\ndef mutation(cars):\r\n # Take only 80% of the sequence\r\n for car in cars:\r\n temp = car.get_seq()\r\n last_move = temp[-1]\r\n count_last_move = 0\r\n for i in temp[::-1]:\r\n if i == last_move:\r\n count_last_move += 1\r\n \r\n \r\n try:\r\n car.set_seq(temp[0:-5])\r\n except:\r\n car.set_seq(temp[0:-1])\r\n car.edo_tensei()\r\n return cars\r\n\r\ndef normalization(cars,pattern):\r\n for car in cars:\r\n temp = car.get_seq()\r\n new = []\r\n count_x = 0\r\n count_y = 0\r\n for j,i in enumerate(temp):\r\n if i == 1:\r\n count_y -= 1\r\n elif i == 2:\r\n count_y += 1\r\n elif i == 3:\r\n count_x -= 1\r\n elif i == 4:\r\n count_x += 1\r\n if j%pattern == 0:\r\n for a in range(abs(count_x)+abs(count_y)):\r\n if count_x < 0:\r\n new.append(3)\r\n count_x += 1\r\n elif count_x > 0:\r\n new.append(4)\r\n count_x -= 1\r\n if count_y < 0:\r\n new.append(1)\r\n count_y += 1\r\n elif count_y > 0:\r\n new.append(2)\r\n count_y -= 1\r\n car.set_seq(new)\r\n return cars\r\n\r\ndef normalization2(cars,pattern):\r\n for car in cars:\r\n temp = car.get_seq()\r\n new = []\r\n count_x = 0\r\n count_y = 0\r\n last_move = 0\r\n count = 0\r\n for i in temp:\r\n if i == last_move:\r\n count += 1\r\n else:\r\n last_move = i\r\n count = 0\r\n if count == pattern:\r\n for j in range(pattern):\r\n new.append(last_move)\r\n\r\n\r\n car.set_seq(new)\r\n return cars\r\n\r\ndef find_shortest_route(cars):\r\n shortest = []\r\n min_len = 9999999999999\r\n for car in cars:\r\n if len(car.get_seq()) < min_len:\r\n min_len = len(car.get_seq())\r\n for car in cars:\r\n if len(car.get_seq()) == min_len:\r\n shortest = car.get_seq()\r\n return shortest\r\n \r\n\r\npopulation = 20\r\nnumber_of_survivor = 5\r\n# Create population\r\ncars = []\r\nfor i in range(population):\r\n car = Car(x,y)\r\n cars.append(car)\r\n\r\npause = False\r\nrun = True\r\ngen = 1\r\nprint(\"Generation:\", gen)\r\nwhile run:\r\n pygame.time.delay(10)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n run = False\r\n\r\n win.fill((0,0,0))\r\n\r\n\r\n #print(\"position:\",x,y)\r\n map = map1(win)\r\n\r\n # Count winners\r\n winners = []\r\n # Check if they are alive\r\n for car in cars:\r\n # Out of window\r\n if car.y()>800-30 or car.y()<0+10 or car.x()<0+10 or car.x()>1200-30:\r\n car.dead()\r\n # Reach goal\r\n if car.x()>=1080 and car.x()<=1080+50 and car.y()>=15 and car.y()<=15+50:\r\n car.set_win(True)\r\n winners.append(car)\r\n car.dead()\r\n # If hit wall\r\n try:\r\n if map[int(car.y())][int(car.x())] == 1:\r\n car.dead()\r\n except:\r\n car.dead() \r\n car.update(win)\r\n\r\n # Count dead cars\r\n dead_cars = 0\r\n for car in cars:\r\n if car.get_alive() == False:\r\n dead_cars += 1\r\n #print(\"dead cars:\",dead_cars)\r\n if dead_cars == population:\r\n if len(winners) > 0:\r\n shortest_seq = find_shortest_route(winners)\r\n file1 = open(\"3.txt\",\"w\")\r\n for i in shortest_seq:\r\n file1.write(str(i))\r\n\r\n run = False\r\n else:\r\n \r\n # Find who went farthest\r\n car_and_distance = []\r\n for car in cars:\r\n car_and_distance.append([car,car.get_distance()])\r\n car_and_distance.sort(key = lambda car_and_distance: car_and_distance[1], reverse = True)\r\n '''for i in car_and_distance[0:3]:\r\n print(int(i[1]),end=\":\")\r\n print(i[0].get_seq()[0],end=\" \")\r\n print()'''\r\n for car in cars:\r\n car.set_survivor(False)\r\n for i in range(number_of_survivor):\r\n car_and_distance[i][0].add_fitness()\r\n j = -1*(i+1)\r\n car_and_distance[j][0].deduct_fitness()\r\n \r\n new_cars = []\r\n for i,j in car_and_distance:\r\n new_cars.append(i)\r\n '''\r\n print(\"Farthest pos:\",new_cars[0].x(),new_cars[0].y())\r\n pygame.draw.rect(win,(255,0,255),(new_cars[0].x(),new_cars[0].y(),50,50))\r\n print(\"Farthest pos:\",new_cars[1].x(),new_cars[1].y())\r\n pygame.draw.rect(win,(255,50,255),(new_cars[1].x(),new_cars[1].y(),50,50))\r\n print(\"Farthest pos:\",new_cars[2].x(),new_cars[2].y())\r\n pygame.draw.rect(win,(255,100,255),(new_cars[2].x(),new_cars[2].y(),50,50))\r\n '''\r\n # Find who went nearest to the goal\r\n near_to_goal = []\r\n for car in new_cars:\r\n near_to_goal.append([car,car.get_distance_to_goal(1080,15)])\r\n near_to_goal.sort(key = lambda near_to_goal: near_to_goal[1])\r\n '''for i in near_to_goal[0:3]:\r\n print(int(i[1]),end=\":\")\r\n print(i[0].get_seq()[0],end=\" \")\r\n print()'''\r\n for i in range(number_of_survivor):\r\n near_to_goal[i][0].add_fitness()\r\n j = -1*(i+1)\r\n near_to_goal[j][0].deduct_fitness()\r\n new_cars = []\r\n for i,j in near_to_goal:\r\n new_cars.append(i)\r\n \r\n new_cars = cars\r\n # Evolution\r\n fittest = selection(new_cars, number_of_survivor)\r\n print(\"Fittest num:\",len(fittest))\r\n cars = []\r\n \r\n '''print(\"Fittest pos:\",fittest[0].x(),fittest[0].y())\r\n pygame.draw.rect(win,(255,255,(0*100)),(fittest[0].x(),fittest[0].y(),50,50))\r\n print(\"Fittest pos:\",fittest[1].x(),fittest[1].y())\r\n pygame.draw.rect(win,(255,255,(1*100)),(fittest[1].x(),fittest[1].y(),50,50))\r\n print(\"Fittest pos:\",fittest[2].x(),fittest[2].y())\r\n pygame.draw.rect(win,(255,255,(2*100)),(fittest[2].x(),fittest[2].y(),50,50))'''\r\n pause = True\r\n \r\n cars = crossover(fittest,new_cars)\r\n #print(cars[0].get_seq())\r\n cars = mutation(cars)\r\n #print(cars[0].get_seq())\r\n #cars = normalization2(cars,1000)\r\n gen += 1\r\n print(\"Generation:\", gen)\r\n if dead_cars == population-number_of_survivor:\r\n for car in cars:\r\n if car.get_alive()==True:\r\n #car.add_fitness()\r\n pass\r\n\r\n # Draw goal\r\n pygame.draw.rect(win, (0,250,0), (1080,15,50,50))\r\n \r\n \r\n\r\n\r\n pygame.display.update()\r\n if False:\r\n time.sleep(1)\r\n pause = False\r\n if run == False:\r\n print(\"Game over\")\r\n\r\nn = \"\\n\"*5\r\nprint(n)\r\npygame.quit()", "sub_path": "Main2.py", "file_name": "Main2.py", "file_ext": "py", "file_size_in_byte": 15264, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "69", "api": [{"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 47, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 47, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 107, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 188, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 188, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 202, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 219, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 240, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 243, "usage_type": "call"}, {"api_name": "pygame.time.delay", "line_number": 376, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 376, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 378, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 378, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 379, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 496, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 496, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 501, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 501, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 503, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 510, "usage_type": "call"}]}