'\n d = 0\n for i in range(50):\n if d < 5:\n table += '
%s
' % (data)\n d += 1\n else:\n d = 0\n table += '
'\n table += '
'\n return table\n"},"avg_line_length":{"kind":"number","value":23.3389830508,"string":"23.338983"},"max_line_length":{"kind":"number","value":166,"string":"166"},"alphanum_fraction":{"kind":"number","value":0.5541031227,"string":"0.554103"}}},{"rowIdx":46212,"cells":{"hexsha":{"kind":"string","value":"b4f5d2c23339f19e3ae93bc4428eea9de9f2fc0e"},"size":{"kind":"number","value":927,"string":"927"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"photocapture.py"},"max_stars_repo_name":{"kind":"string","value":"Torreshan/Nao"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8ad8eb9b92ce303ad79892efa2a812a1d971e058"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"photocapture.py"},"max_issues_repo_name":{"kind":"string","value":"Torreshan/Nao"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8ad8eb9b92ce303ad79892efa2a812a1d971e058"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"photocapture.py"},"max_forks_repo_name":{"kind":"string","value":"Torreshan/Nao"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8ad8eb9b92ce303ad79892efa2a812a1d971e058"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from naoqi import ALProxy\nimport qi\nimport os\nimport sys\nimport time\nimport math\nimport argparse\nIP = \"10.0.29.2\"\nPORT = 9559\n# try:\n# \tphotoCaptureProxy = ALProxy(\"ALPhotoCapture\",IP,PORT)\n# except Exception, e:\n# \tprint(str(e))\n# \texit(1)\n# photoCaptureProxy.setResolution(1)\n# photoCaptureProxy.setPictureFormat(\"jpg\")\n# photoCaptureProxy.setCameraID(0)\n# photoCaptureProxy.takePicture(\"/home/fengze/imagge/\", \"image\")\n\n#tts = ALProxy(\"ALTextToSpeech\", \"10.0.29.2\",9559)\n#tts.say(\"Fuck you Chen Zhong\")\nmotion = ALProxy(\"ALMotion\",\"10.0.29.2\",9559)\nmotion.setStiffnesses(\"Body\",1.0)\ntts = ALProxy(\"ALTextToSpeech\",\"10.0.29.2\",9559)\n#session = qi.Session()\n#session.connect(IP+str(PORT))\n#motion_service = session.service(\"ALMotion\")\n#motion_service.wakeUp()\neffectorName = \"Head\"\nisEnable = False\nmotion.wbEnableEffectorControl(effectorName,isEnable)\nmotion.moveInit()\nmotion.moveTo(0.5,0,0)\ntts.say(\"I detecting a circle\")\n"},"avg_line_length":{"kind":"number","value":26.4857142857,"string":"26.485714"},"max_line_length":{"kind":"number","value":64,"string":"64"},"alphanum_fraction":{"kind":"number","value":0.7464940669,"string":"0.746494"}}},{"rowIdx":46213,"cells":{"hexsha":{"kind":"string","value":"370c93002c073ba2292ee088f98b1ea7827c0053"},"size":{"kind":"number","value":3901,"string":"3,901"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"test/test_npu/test_grid_sampler.py"},"max_stars_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-02T03:07:35.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-02T03:07:35.000Z"},"max_issues_repo_path":{"kind":"string","value":"test/test_npu/test_grid_sampler.py"},"max_issues_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":1,"string":"1"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-11-12T07:23:03.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-12T08:28:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"test/test_npu/test_grid_sampler.py"},"max_forks_repo_name":{"kind":"string","value":"Ascend/pytorch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Copyright (c) 2020 Huawei Technologies Co., Ltd\n# Copyright (c) 2019, Facebook CORPORATION. \n# All rights reserved.\n#\n# Licensed under the BSD 3-Clause License (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://opensource.org/licenses/BSD-3-Clause\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport numpy as np\nimport sys\nimport copy\nfrom common_utils import TestCase, run_tests\nfrom common_device_type import dtypes, instantiate_device_type_tests\nfrom util_test import create_common_tensor\n\nclass TestGridSampler(TestCase):\n def cpu_op_exec(self, input1, input2, interpolation_mode = 0, padding_mode = 0, align_corners = True):\n output = torch.grid_sampler(input1, input2, interpolation_mode, padding_mode, align_corners)\n output = output.numpy()\n return output\n\n def npu_op_exec(self, input1, input2, interpolation_mode = 0, padding_mode = 0, align_corners = True):\n output = torch.grid_sampler(input1, input2, interpolation_mode, padding_mode, align_corners)\n output = output.to(\"cpu\")\n output = output.numpy()\n return output\n\n def cpu_op_fp16_exec(self, input1, input2, interpolation_mode = 0, padding_mode = 0, align_corners = True):\n input1 = input1.to(torch.float32)\n input2 = input2.to(torch.float32)\n output = torch.grid_sampler(input1, input2, interpolation_mode, padding_mode, align_corners)\n output = output.numpy()\n output = output.astype(np.float16)\n return output\n\n def test_grid_sampler(self, device):\n shape_format = [\n [[[np.float32, -1, (100, 1, 28, 28)],[np.float32, -1, (100, 1, 1, 2)]],\n [[np.float32, -1, (100, 64, 32, 28)],[np.float32, -1, (100, 1, 1, 2)]],\n [[np.float32, -1, (2000, 1, 28, 28)],[np.float32, -1, (2000, 1, 1, 2)]]],\n [[[np.float16, -1, (1, 1, 3, 3)],[np.float16, -1, (1, 2, 2, 2)]],\n [[np.float16, -1, (1, 2, 3, 4)],[np.float16, -1, (1, 2, 2, 2)]]]\n ]\n for item in shape_format[0]:\n cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)\n cpu_input2, npu_input2 = create_common_tensor(item[1], -1, 1)\n cpu_output1 = self.cpu_op_exec(cpu_input1,cpu_input2)\n npu_output1 = self.npu_op_exec(npu_input1,npu_input2)\n cpu_output2 = self.cpu_op_exec(cpu_input1,cpu_input2, 0, 1, True)\n npu_output2 = self.npu_op_exec(npu_input1,npu_input2, 0, 1, True)\n cpu_output3 = self.cpu_op_exec(cpu_input1,cpu_input2, 0, 0, False)\n npu_output3 = self.npu_op_exec(npu_input1,npu_input2, 0, 0, False)\n cpu_output4 = self.cpu_op_exec(cpu_input1,cpu_input2, 1, 0, False)\n npu_output4 = self.npu_op_exec(npu_input1,npu_input2, 1, 0, False)\n self.assertRtolEqual(cpu_output1, npu_output1)\n self.assertRtolEqual(cpu_output2, npu_output2)\n self.assertRtolEqual(cpu_output3, npu_output3)\n self.assertRtolEqual(cpu_output4, npu_output4)\n\n for item in shape_format[1]:\n cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 10)\n cpu_input2, npu_input2 = create_common_tensor(item[1], -1, 1)\n cpu_output = self.cpu_op_fp16_exec(cpu_input1,cpu_input2)\n npu_output = self.npu_op_exec(npu_input1,npu_input2)\n self.assertRtolEqual(cpu_output, npu_output)\n\ninstantiate_device_type_tests(TestGridSampler, globals(), except_for=\"cpu\")\nif __name__ == \"__main__\":\n torch.npu.set_device(\"npu:2\")\n run_tests()\n"},"avg_line_length":{"kind":"number","value":48.7625,"string":"48.7625"},"max_line_length":{"kind":"number","value":111,"string":"111"},"alphanum_fraction":{"kind":"number","value":0.6744424507,"string":"0.674442"}}},{"rowIdx":46214,"cells":{"hexsha":{"kind":"string","value":"25dccf9239f14d6781ec3852503a2e46d418763d"},"size":{"kind":"number","value":2955,"string":"2,955"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"lbry/tests/integration/test_account_commands.py"},"max_stars_repo_name":{"kind":"string","value":"Nykseli/lbry-sdk"},"max_stars_repo_head_hexsha":{"kind":"string","value":"07afc0aa0a1e6c0ef6aa284fb47513af940440c1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"lbry/tests/integration/test_account_commands.py"},"max_issues_repo_name":{"kind":"string","value":"Nykseli/lbry-sdk"},"max_issues_repo_head_hexsha":{"kind":"string","value":"07afc0aa0a1e6c0ef6aa284fb47513af940440c1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-10-27T21:53:05.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-11T03:10:54.000Z"},"max_forks_repo_path":{"kind":"string","value":"lbry/tests/integration/test_account_commands.py"},"max_forks_repo_name":{"kind":"string","value":"braveheart12/lbry-sdk"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dc709b468f9dce60d206161785def5c7ace2b763"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from lbry.testcase import CommandTestCase\n\n\nclass AccountManagement(CommandTestCase):\n\n async def test_account_list_set_create_remove_add(self):\n # check initial account\n response = await self.daemon.jsonrpc_account_list()\n self.assertEqual(len(response['lbc_regtest']), 1)\n\n # change account name and gap\n account_id = response['lbc_regtest'][0]['id']\n self.daemon.jsonrpc_account_set(\n account_id=account_id, new_name='test account',\n receiving_gap=95, receiving_max_uses=96,\n change_gap=97, change_max_uses=98\n )\n response = (await self.daemon.jsonrpc_account_list())['lbc_regtest'][0]\n self.assertEqual(response['name'], 'test account')\n self.assertEqual(\n response['address_generator']['receiving'],\n {'gap': 95, 'maximum_uses_per_address': 96}\n )\n self.assertEqual(\n response['address_generator']['change'],\n {'gap': 97, 'maximum_uses_per_address': 98}\n )\n\n # create another account\n await self.daemon.jsonrpc_account_create('second account')\n response = await self.daemon.jsonrpc_account_list()\n self.assertEqual(len(response['lbc_regtest']), 2)\n self.assertEqual(response['lbc_regtest'][1]['name'], 'second account')\n account_id2 = response['lbc_regtest'][1]['id']\n\n # make new account the default\n self.daemon.jsonrpc_account_set(account_id=account_id2, default=True)\n response = await self.daemon.jsonrpc_account_list(show_seed=True)\n self.assertEqual(response['lbc_regtest'][0]['name'], 'second account')\n\n account_seed = response['lbc_regtest'][1]['seed']\n\n # remove account\n self.daemon.jsonrpc_account_remove(response['lbc_regtest'][1]['id'])\n response = await self.daemon.jsonrpc_account_list()\n self.assertEqual(len(response['lbc_regtest']), 1)\n\n # add account\n await self.daemon.jsonrpc_account_add('recreated account', seed=account_seed)\n response = await self.daemon.jsonrpc_account_list()\n self.assertEqual(len(response['lbc_regtest']), 2)\n self.assertEqual(response['lbc_regtest'][1]['name'], 'recreated account')\n\n # list specific account\n response = await self.daemon.jsonrpc_account_list(account_id, include_claims=True)\n self.assertEqual(response['name'], 'recreated account')\n\n async def test_wallet_migration(self):\n # null certificates should get deleted\n await self.channel_create('@foo1')\n await self.channel_create('@foo2')\n await self.channel_create('@foo3')\n keys = list(self.account.channel_keys.keys())\n self.account.channel_keys[keys[0]] = None\n self.account.channel_keys[keys[1]] = \"some invalid junk\"\n await self.account.maybe_migrate_certificates()\n self.assertEqual(list(self.account.channel_keys.keys()), [keys[2]])\n"},"avg_line_length":{"kind":"number","value":43.4558823529,"string":"43.455882"},"max_line_length":{"kind":"number","value":90,"string":"90"},"alphanum_fraction":{"kind":"number","value":0.6663282572,"string":"0.666328"}}},{"rowIdx":46215,"cells":{"hexsha":{"kind":"string","value":"d3652967a0a44ada75b91bd2621fd4ee20045949"},"size":{"kind":"number","value":7471,"string":"7,471"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/WindowsForensics/Scripts/RegistryParse/RegistryParse.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/WindowsForensics/Scripts/RegistryParse/RegistryParse.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/WindowsForensics/Scripts/RegistryParse/RegistryParse.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"import codecs\nimport configparser\n\nimport demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\n\nCUSTOM_REG_TYPE = 'Custom'\n\nREGISTRY_TYPE_TO_KEY = {\n 'Users': [r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList'],\n 'MachineStartup': [r'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'],\n 'UserStartup': [r'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run'],\n 'MachineRunOnce': [r'HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce'],\n 'UserRunOnce': [r'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\RunOnce'],\n 'Services': [\"HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\"],\n 'DelayedServices': [r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\ShellServiceObjectDelayLoad'],\n 'UserRecentApps': [r'HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Search\\RecentApps'],\n 'Timezone': [r'HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\TimeZoneInformation'],\n 'Networks': [r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\WindowsNT\\CurrentVersion\\NetworkList\\Signatures\\Unmanaged',\n r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\WindowsNT\\CurrentVersion\\NetworkList\\Signatures\\Managed',\n r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\WindowsNT\\CurrentVersion\\NetworkList\\Nla\\Cache'],\n 'USB': [r'HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Enum\\USBSTOR', r'HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Enum\\USB'],\n 'LastLoggedOnUser': [r'HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Authentication\\LogonUI']\n}\n\nREGISTRY_SUB_FOLDER = {\n 'Users': 'SID'\n}\n\n\ndef parse_reg_value(value):\n value = value.strip('\"')\n try:\n if value.startswith(\"hex\"):\n value2 = \"\".join([ch for ch in value[value.find(\":\") + 1:].split(\",\") if len(ch) == 2 and ch != '00'])\n return bytearray.fromhex(value2).decode()\n if value.startswith(\"dword\"):\n return str(int(\"0x\" + value[value.find(\":\") + 1:], 16))\n return value\n except Exception:\n return value\n\n\ndef get_registry(entry_id):\n res = demisto.getFilePath(entry_id)\n path = res['path']\n with codecs.open(path, encoding='utf-16') as myfile:\n data = myfile.read()\n\n cfg = configparser.ConfigParser(strict=False, allow_no_value=True)\n cfg.optionxform = str # type: ignore[assignment, assignment]\n cfg.read_string(data[data.find(\"[\"):], )\n reg = {} # type: ignore[var-annotated]\n for section in cfg.sections():\n try:\n if section not in reg:\n reg[section] = {}\n items = cfg.items(section)\n reg[section].update(dict(items))\n except Exception:\n reg[section] = {}\n continue\n\n return reg\n\n\ndef get_sub_keys(reg, key, folder_output_key):\n all_folders = {k for k in reg if k.startswith(key)}\n users = []\n records = []\n for folder in all_folders:\n new_key = folder[len(key):].strip(\"\\\\\")\n if new_key:\n user = reg[folder]\n user = {k.strip('\"'): parse_reg_value(v) for k, v in user.items()}\n user[folder_output_key] = new_key\n for registry_key, registry_value in user.items():\n record = {\n 'Type': 'Services',\n 'RegistryPath': folder,\n 'RegistryKey': registry_key,\n 'RegistryValue': registry_value\n }\n records.append(record)\n users.append(user)\n return records, users\n\n\ndef get_reg_users(reg):\n key = REGISTRY_TYPE_TO_KEY['Users'][0]\n records, users = get_sub_keys(reg, key, 'Sid')\n return records, {'Users': users}\n\n\ndef get_reg_services(reg):\n key = REGISTRY_TYPE_TO_KEY['Services'][0]\n records, users = get_sub_keys(reg, key, 'Service')\n return records, {'Services': users}\n\n\ndef get_reg_results(reg, type_to_keys):\n records = [] # type: ignore[var-annotated]\n type_records = {} # type: ignore[var-annotated]\n for _type, keys in type_to_keys.items():\n if _type == 'Users':\n users_records, users_type_records = get_reg_users(reg)\n records += users_records\n type_records.update(users_type_records)\n elif _type == 'Services':\n\n services_records, services_type_records = get_reg_services(reg)\n records += services_records\n type_records.update(services_type_records)\n elif _type == 'LastLoggedOnUser':\n key = REGISTRY_TYPE_TO_KEY['LastLoggedOnUser'][0]\n values = reg.get(key, {})\n registry_value = values.get('\"LastLoggedOnUser\"')\n if registry_value:\n registry_value = parse_reg_value(registry_value)\n records.append({\n 'Type': 'LastLoggedOnUser',\n 'RegistryPath': key,\n 'RegistryKey': 'LastLoggedOnUser',\n 'RegistryValue': registry_value\n })\n type_records['LastLoggedOnUser'] = registry_value\n else:\n all_keys = [] # type: ignore[var-annotated]\n for key in keys:\n all_keys += [k for k in reg if k.startswith(key)]\n for key in all_keys:\n registry_keys_values = reg.get(key)\n dict_key = _type if _type != CUSTOM_REG_TYPE else key\n if dict_key not in type_records:\n type_records[dict_key] = []\n if registry_keys_values:\n registry_keys_values = {k.strip('\"'): parse_reg_value(v) for k, v in registry_keys_values.items()}\n type_records[dict_key].append(registry_keys_values)\n for registry_key, registry_value in registry_keys_values.items():\n record = {\n 'Type': _type,\n 'RegistryPath': key,\n 'RegistryKey': registry_key,\n 'RegistryValue': registry_value\n }\n records.append(record)\n return records, type_records\n\n\ndef main():\n args = demisto.args()\n reg = get_registry(args.get('entryID'))\n registry_data = args.get('registryData')\n if registry_data == 'All':\n registry_types = REGISTRY_TYPE_TO_KEY.keys()\n elif registry_data == 'None':\n registry_types = [] # type: ignore[assignment]\n else:\n registry_types = argToList(registry_data)\n registry_types = [x for x in registry_types if x in REGISTRY_TYPE_TO_KEY] # type: ignore[assignment]\n\n registry_types_to_keys = {k: REGISTRY_TYPE_TO_KEY[k] for k in registry_types}\n custom_reg_paths = args.get('customRegistryPaths')\n if custom_reg_paths:\n for reg_path in argToList(custom_reg_paths):\n reg_path = reg_path.strip()\n if reg_path:\n if CUSTOM_REG_TYPE not in registry_types_to_keys:\n registry_types_to_keys[CUSTOM_REG_TYPE] = []\n registry_types_to_keys[CUSTOM_REG_TYPE].append(reg_path)\n\n records, type_records = get_reg_results(reg, registry_types_to_keys)\n\n hr = tableToMarkdown(\"Registry Results\", records[:50])\n return_outputs(hr, {\"RegistryForensicDataRaw\": records, 'RegistryForensicData': type_records}, records)\n\n\nif __name__ in ['__main__', '__builtin__', 'builtins']:\n main()\n"},"avg_line_length":{"kind":"number","value":41.5055555556,"string":"41.505556"},"max_line_length":{"kind":"number","value":130,"string":"130"},"alphanum_fraction":{"kind":"number","value":0.6281622273,"string":"0.628162"}}},{"rowIdx":46216,"cells":{"hexsha":{"kind":"string","value":"9f44000ff388396daa8bc14c437e71a462a1e85b"},"size":{"kind":"number","value":13579,"string":"13,579"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"engine/model.py"},"max_stars_repo_name":{"kind":"string","value":"HyechurnJang/rdhcp"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6e9e983527a501b3f0d1bfcb796b62868b5b6292"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"engine/model.py"},"max_issues_repo_name":{"kind":"string","value":"HyechurnJang/rdhcp"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6e9e983527a501b3f0d1bfcb796b62868b5b6292"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"engine/model.py"},"max_forks_repo_name":{"kind":"string","value":"HyechurnJang/rdhcp"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6e9e983527a501b3f0d1bfcb796b62868b5b6292"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-07T00:24:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-10-07T00:24:45.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\r\n'''\r\nCreated on 2018. 3. 30.\r\n@author: HyechurnJang\r\n'''\r\n\r\nimport os\r\nimport pygics\r\nfrom sql import *\r\nfrom ipaddress import ip_network\r\nfrom netifaces import ifaddresses, AF_INET, AF_LINK\r\n\r\nDEBUG = True\r\n\r\ndb = Sql(\r\n Mysql(\r\n os.environ.get('RDHCP_DATABASE', 'localhost'),\r\n 'root',\r\n os.environ.get('RDHCP_PASSWORD', 'rdhcp'),\r\n 'rdhcp'\r\n )\r\n)\r\n\r\ndef cli(cmd, force=False):\r\n ret = os.system(cmd)\r\n if ret > 0 and not force: raise Exception('CMD(%s) >> %d' % (cmd, ret))\r\n\r\n@model(db)\r\nclass NTP(Model):\r\n \r\n server = String(256)\r\n \r\n def __init__(self, server):\r\n self.server = server\r\n \r\n def toDict(self):\r\n return {'server' : self.server}\r\n\r\n@model(db)\r\nclass Interface(Model):\r\n \r\n name = String(32)\r\n ns_id = Integer()\r\n ns_name = String(32)\r\n mac = String(24)\r\n ip = String(16)\r\n net = String(16)\r\n mask = String(16)\r\n cidr = String(24)\r\n prefix = String(4)\r\n \r\n def __init__(self, name):\r\n self.name = name\r\n self.ns_id = 0\r\n self.ns_name = ''\r\n \r\n def __sync__(self):\r\n addrs = ifaddresses(self.name)\r\n try: self.mac = addrs[AF_LINK][0]['addr']\r\n except: self.mac = '00:00:00:00:00:00'\r\n try:\r\n ip_0 = addrs[AF_INET][0]\r\n self.ip = ip_0['addr']\r\n self.mask = ip_0['netmask']\r\n except:\r\n self.ip = '0.0.0.0'\r\n self.mask = '255.255.255.255'\r\n try:\r\n network = ip_network(unicode('%s/%s' % (self.ip, self.mask)), strict=False)\r\n self.net = str(network.network_address)\r\n self.prefix = str(network.prefixlen)\r\n except:\r\n self.net = '255.255.255.255'\r\n self.prefix = '32'\r\n self.cidr = '%s/%s' % (self.ip, self.prefix)\r\n \r\n def sync(self):\r\n if not self.ns_id:\r\n self.__sync__()\r\n self.update()\r\n return self\r\n \r\n def deploy(self):\r\n if self.ns_id: raise Exception('interface assigned to namespace')\r\n cli('ifconfig %s %s netmask %s up' % (self.name, self.ip, self.mask))\r\n return self\r\n \r\n def setIP(self, ip, mask):\r\n if self.ns_id: raise Exception('interface assigned to namespace')\r\n cli('ifconfig %s %s netmask %s up' % (self.name, ip, mask))\r\n self.ip = ip\r\n self.mask = mask\r\n try:\r\n network = ip_network(unicode('%s/%s' % (self.ip, self.mask)), strict=False)\r\n self.net = str(network.network_address)\r\n self.prefix = str(network.prefixlen)\r\n except:\r\n self.net = '255.255.255.255'\r\n self.prefix = '32'\r\n self.cidr = '%s/%s' % (self.ip, self.prefix)\r\n self.update()\r\n return self\r\n \r\n def createNameSpace(self, ns_name, range='', gw='', dns='', ntp=''):\r\n if self.ns_id: raise Exception('interface assigned to namespace')\r\n if self.ip == '0.0.0.0': raise Exception('interface ip is not assigned')\r\n if not gw: gw = self.ip\r\n if not dns: dns = self.ip\r\n # if not ntp: ntp = os.environ.get('RDHCP_IF_MGMT_IP')\r\n if not ntp: ntp = self.ip\r\n ns = NameSpace(self, ns_name, range, gw, dns, ntp).create()\r\n self.ns_id = ns.id\r\n self.ns_name = ns.name\r\n self.update()\r\n return ns\r\n \r\n def create(self):\r\n self.__sync__()\r\n return Model.create(self)\r\n \r\n def delete(self):\r\n if self.ns_id:\r\n ns = NameSpace.get(self.ns_id)\r\n if ns:\r\n ns.__delete_namespace__()\r\n hosts = Host.list(Host.ns_id==ns.id)\r\n for host in hosts: Model.delete(host)\r\n Model.delete(ns)\r\n return Model.delete(self)\r\n \r\n def toDict(self):\r\n return {\r\n 'id' : self.id,\r\n 'name' : self.name,\r\n 'ns_id' : self.ns_id,\r\n 'ns_name' : self.ns_name,\r\n 'mac' : self.mac,\r\n 'ip' : self.ip,\r\n 'net' : self.net,\r\n 'mask' : self.mask,\r\n 'cidr' : self.cidr,\r\n 'prefix' : self.prefix\r\n }\r\n\r\n@model(db)\r\nclass NameSpace(Model):\r\n \r\n name = String(32)\r\n pid = Integer()\r\n if_id = Integer()\r\n if_name = String(32)\r\n if_mac = String(24)\r\n if_ip = String(16)\r\n net = String(16)\r\n mask = String(16)\r\n range = String(32)\r\n gw = String(16)\r\n dns = String(16)\r\n ntp = String(16)\r\n \r\n def __init__(self, intf, name, range, gw, dns, ntp):\r\n self.name = name\r\n self.pid = 0\r\n self.if_id = intf.id\r\n self.if_name = intf.name\r\n self.if_mac = intf.mac\r\n self.if_ip = intf.ip\r\n self.net = intf.net\r\n self.mask = intf.mask\r\n self.range = range\r\n self.gw = gw\r\n self.dns = dns\r\n self.ntp = ntp\r\n \r\n def __sync__(self):\r\n dummy_mac = 'aa:aa:aa' + self.if_mac[9:]\r\n route_ip = '192.254.254.%d' % self.if_id\r\n mgmt_ip = os.environ.get('RDHCP_IF_MGMT_IP')\r\n cli('ip netns add %s' % self.name)\r\n cli('ip netns exec %s ifconfig lo up' % self.name)\r\n cli('ip link set %s netns %s' % (self.if_name, self.name))\r\n cli('ip netns exec %s ifconfig %s %s netmask %s up' % (self.name, self.if_name, self.if_ip, self.mask))\r\n cli('ip link add nsve%d type veth peer name nsve%d netns %s' % (self.if_id, self.if_id, self.name))\r\n cli('ifconfig nsve%d 0.0.0.0 up' % self.if_id)\r\n cli('ip netns exec %s ifconfig nsve%d %s netmask 255.255.255.0 up' % (self.name, self.if_id, route_ip))\r\n cli('route add -host %s/32 dev nsve%d' % (route_ip, self.if_id))\r\n cli('iptables -A FORWARD -i nsve%d -j ACCEPT' % self.if_id)\r\n cli('ip netns exec %s route add -host %s/32 dev nsve%d' % (self.name, mgmt_ip, self.if_id))\r\n cli('ip netns exec %s route add default gw %s' % (self.name, mgmt_ip))\r\n cli('ip netns exec %s iptables -A FORWARD -i %s -j ACCEPT' % (self.name, self.if_name))\r\n cli('ip netns exec %s iptables -t nat -A PREROUTING -i %s -p udp -m udp --dport 69 -j DNAT --to-destination %s:69' % (self.name, self.if_name, mgmt_ip))\r\n cli('ip netns exec %s iptables -t nat -A PREROUTING -i %s -p udp -m udp --dport 123 -j DNAT --to-destination %s:123' % (self.name, self.if_name, mgmt_ip))\r\n cli('ip netns exec %s iptables -t nat -A PREROUTING -i %s -p tcp -m tcp --dport 20 -j DNAT --to-destination %s:20' % (self.name, self.if_name, mgmt_ip))\r\n cli('ip netns exec %s iptables -t nat -A PREROUTING -i %s -p tcp -m tcp --dport 21 -j DNAT --to-destination %s:21' % (self.name, self.if_name, mgmt_ip))\r\n cli('ip netns exec %s iptables -t nat -A PREROUTING -i %s -p tcp -m tcp --dport 80 -j DNAT --to-destination %s:80' % (self.name, self.if_name, mgmt_ip))\r\n cli('ip netns exec %s iptables -t nat -A POSTROUTING -o nsve%d -j MASQUERADE' % (self.name, self.if_id))\r\n cli('mkdir -p /opt/rdhcp/%s' % self.name)\r\n if not os.path.exists('/opt/rdhcp/%s/hosts' % self.name):\r\n cli('touch /opt/rdhcp/%s/hosts' % self.name)\r\n if not os.path.exists('/opt/rdhcp/%s/dhcp' % self.name):\r\n if self.range: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s\\n' % (self.mask, self.range)\r\n else: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s,%s\\n' % (self.mask, self.net, self.net)\r\n dhcp_file += 'dhcp-option=3,%s\\n' % (self.gw)\r\n dhcp_file += 'dhcp-option=6,%s\\n' % (self.dns)\r\n dhcp_file += 'dhcp-option=42,%s\\n' % (self.ntp)\r\n with open('/opt/rdhcp/%s/dhcp' % self.name, 'w') as fd: fd.write(dhcp_file)\r\n if self.pid: cli('ip netns exec %s kill -9 %d' % (self.name, self.pid), force=True)\r\n cli('ip netns exec %s /usr/sbin/dnsmasq --no-poll --no-hosts --log-facility=/opt/rdhcp/%s/log --dhcp-leasefile=/opt/rdhcp/%s/lease --pid-file=/opt/rdhcp/%s/pid --conf-file=/opt/rdhcp/%s/dhcp --addn-hosts=/opt/rdhcp/%s/hosts' % (self.name, self.name, self.name, self.name, self.name, self.name))\r\n with open('/opt/rdhcp/%s/pid' % self.name, 'r') as fd: self.pid = int(fd.read())\r\n \r\n def __delete_namespace__(self):\r\n if self.pid: cli('ip netns exec %s kill -9 %d' % (self.name, self.pid), force=True)\r\n cli('ip link del nsve%d' % self.if_id, force=True)\r\n cli('ip netns del %s' % self.name, force=True)\r\n cli('rm -rf /opt/rdhcp/%s' % self.name, force=True)\r\n cli('iptables -D FORWARD -i nsve%d -j ACCEPT' % self.if_id, force=True)\r\n pygics.sleep(1)\r\n cli('ifconfig %s %s netmask %s up' % (self.if_name, self.if_ip, self.mask), force=True)\r\n \r\n def sync(self):\r\n try: self.__sync__()\r\n except: pass\r\n return self\r\n \r\n def create(self):\r\n try: self.__sync__()\r\n except Exception as e:\r\n self.__delete_namespace__()\r\n raise e\r\n return Model.create(self)\r\n \r\n def delete(self):\r\n self.__delete_namespace__()\r\n intf = Interface.get(self.if_id)\r\n intf.ns_id = 0\r\n intf.ns_name = ''\r\n intf.update()\r\n hosts = Host.list(Host.ns_id==self.id)\r\n for host in hosts: Model.delete(host)\r\n return Model.delete(self)\r\n \r\n def createHost(self, mac, ip, name=''):\r\n return Host(self, mac, ip, name).create()\r\n \r\n def toDict(self):\r\n return {\r\n 'id' : self.id,\r\n 'name' : self.name,\r\n 'pid' : self.pid,\r\n 'if_id' : self.if_id,\r\n 'if_name' : self.if_name,\r\n 'if_mac' : self.if_mac,\r\n 'if_ip' : self.if_ip,\r\n 'net' : self.net,\r\n 'mask' : self.mask,\r\n 'range' : self.range,\r\n 'gw' : self.gw,\r\n 'dns' : self.dns,\r\n 'ntp' : self.ntp\r\n }\r\n\r\n@model(db)\r\nclass Host(Model):\r\n \r\n name = String(256)\r\n ns_id = Integer()\r\n mac = String(24)\r\n ip = String(16)\r\n \r\n def __init__(self, ns, mac, ip, name):\r\n self.ns_id = ns.id\r\n self.ns_name = ns.name\r\n self.mac = mac\r\n self.ip = ip\r\n self.name = name\r\n \r\n def create(self):\r\n ns = NameSpace.get(self.ns_id)\r\n hosts = Host.list(Host.ns_id==self.ns_id)\r\n if ns.range: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s\\n' % (ns.mask, ns.range) \r\n else: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s,%s\\n' % (ns.mask, ns.net, ns.net)\r\n hosts_file = ''\r\n dhcp_file += 'dhcp-option=3,%s\\n' % (ns.gw)\r\n dhcp_file += 'dhcp-option=6,%s\\n' % (ns.dns)\r\n dhcp_file += 'dhcp-option=42,%s\\n' % (ns.ntp)\r\n for host in hosts:\r\n dhcp_file += 'dhcp-host=%s,%s\\n' % (host.mac, host.ip)\r\n if host.name: hosts_file += '%s %s\\n' % (host.ip, host.name)\r\n dhcp_file += 'dhcp-host=%s,%s\\n' % (self.mac, self.ip)\r\n if self.name: hosts_file += '%s %s\\n' % (self.ip, self.name)\r\n with open('/opt/rdhcp/%s/dhcp' % ns.name, 'w') as fd: fd.write(dhcp_file)\r\n if hosts_file:\r\n with open('/opt/rdhcp/%s/hosts' % ns.name, 'w') as fd: fd.write(hosts_file)\r\n cli('sed -i \"/%s/d\" /opt/rdhcp/%s/lease' % (self.mac, ns.name), force=True)\r\n if ns.pid: cli('ip netns exec %s kill -9 %d' % (ns.name, ns.pid), force=True)\r\n cli('ip netns exec %s /usr/sbin/dnsmasq --no-poll --no-hosts --log-facility=/opt/rdhcp/%s/log --dhcp-leasefile=/opt/rdhcp/%s/lease --pid-file=/opt/rdhcp/%s/pid --conf-file=/opt/rdhcp/%s/dhcp --addn-hosts=/opt/rdhcp/%s/hosts' % (ns.name, ns.name, ns.name, ns.name, ns.name, ns.name))\r\n with open('/opt/rdhcp/%s/pid' % ns.name, 'r') as fd:\r\n ns.pid = int(fd.read())\r\n ns.update()\r\n return Model.create(self)\r\n \r\n def delete(self):\r\n Model.delete(self)\r\n ns = NameSpace.get(self.ns_id)\r\n hosts = Host.list(Host.ns_id==self.ns_id)\r\n if ns.range: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s\\n' % (ns.mask, ns.range) \r\n else: dhcp_file = 'dhcp-option=1,%s\\ndhcp-range=%s,%s\\n' % (ns.mask, ns.net, ns.net)\r\n hosts_file = ''\r\n dhcp_file += 'dhcp-option=3,%s\\n' % (ns.gw)\r\n dhcp_file += 'dhcp-option=6,%s\\n' % (ns.dns)\r\n dhcp_file += 'dhcp-option=42,%s\\n' % (ns.ntp)\r\n for host in hosts:\r\n dhcp_file += 'dhcp-host=%s,%s\\n' % (host.mac, host.ip)\r\n if host.name: hosts_file += '%s %s\\n' % (host.ip, host.name)\r\n with open('/opt/rdhcp/%s/dhcp' % ns.name, 'w') as fd: fd.write(dhcp_file)\r\n if hosts_file:\r\n with open('/opt/rdhcp/%s/hosts' % ns.name, 'w') as fd: fd.write(hosts_file)\r\n cli('sed -i \"/%s/d\" /opt/rdhcp/%s/lease' % (self.mac, ns.name), force=True)\r\n if ns.pid: cli('ip netns exec %s kill -9 %d' % (ns.name, ns.pid), force=True)\r\n cli('ip netns exec %s /usr/sbin/dnsmasq --no-poll --no-hosts --log-facility=/opt/rdhcp/%s/log --dhcp-leasefile=/opt/rdhcp/%s/lease --pid-file=/opt/rdhcp/%s/pid --conf-file=/opt/rdhcp/%s/dhcp --addn-hosts=/opt/rdhcp/%s/hosts' % (ns.name, ns.name, ns.name, ns.name, ns.name, ns.name))\r\n with open('/opt/rdhcp/%s/pid' % ns.name, 'r') as fd:\r\n ns.pid = int(fd.read())\r\n ns.update()\r\n return self\r\n \r\n def toDict(self):\r\n return {\r\n 'id' : self.id,\r\n 'ns_id' : self.ns_id,\r\n 'mac' : self.mac,\r\n 'ip' : self.ip,\r\n 'name' : self.name\r\n }\r\n"},"avg_line_length":{"kind":"number","value":40.6556886228,"string":"40.655689"},"max_line_length":{"kind":"number","value":303,"string":"303"},"alphanum_fraction":{"kind":"number","value":0.5351645924,"string":"0.535165"}}},{"rowIdx":46217,"cells":{"hexsha":{"kind":"string","value":"9fa33ee059bdfc8ff76392d80ea7fb6a0fd4a242"},"size":{"kind":"number","value":941,"string":"941"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"app/api/v1/session.py"},"max_stars_repo_name":{"kind":"string","value":"zucc-acm-devteam/zuccacm-sso"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9c7d2f0b9cc069962f32b555152732a98bf2e94a"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"app/api/v1/session.py"},"max_issues_repo_name":{"kind":"string","value":"zucc-acm-devteam/zuccacm-sso"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9c7d2f0b9cc069962f32b555152732a98bf2e94a"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"app/api/v1/session.py"},"max_forks_repo_name":{"kind":"string","value":"zucc-acm-devteam/zuccacm-sso"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9c7d2f0b9cc069962f32b555152732a98bf2e94a"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from flask_login import current_user, login_required, login_user, logout_user\n\nfrom app.libs.error_code import Success, NotFound, Forbidden, DeleteSuccess\nfrom app.libs.red_print import RedPrint\nfrom app.validators.user import LoginForm\nfrom app.models.user import User\nfrom app.libs.helper import renew_ticket\n\napi = RedPrint('session')\n\n\n@api.route('', methods=['GET'])\n@login_required\ndef get_session_api():\n return Success(data=current_user)\n\n\n@api.route('', methods=['POST'])\ndef create_session_api():\n form = LoginForm().validate_for_api().data_\n user = User.get_by_id(form['username'])\n if not user:\n raise NotFound(msg='用户不存在')\n if not user.check_password(form['password']):\n raise Forbidden(msg='密码错误')\n renew_ticket(user)\n login_user(user)\n return Success(msg='登录成功', data=user)\n\n\n@api.route('', methods=['DELETE'])\ndef delete_session_api():\n logout_user()\n return DeleteSuccess('登出成功')\n"},"avg_line_length":{"kind":"number","value":26.8857142857,"string":"26.885714"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.7268862912,"string":"0.726886"}}},{"rowIdx":46218,"cells":{"hexsha":{"kind":"string","value":"4cdd5c18859a82abe8befbf8af033e0d5aedce09"},"size":{"kind":"number","value":575,"string":"575"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"musterloesungen/6_warm_ups/dictionaries.py"},"max_stars_repo_name":{"kind":"string","value":"giu/appe6-uzh-hs2018"},"max_stars_repo_head_hexsha":{"kind":"string","value":"204dea36be1e53594124b606cdfa044368e54726"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"musterloesungen/6_warm_ups/dictionaries.py"},"max_issues_repo_name":{"kind":"string","value":"giu/appe6-uzh-hs2018"},"max_issues_repo_head_hexsha":{"kind":"string","value":"204dea36be1e53594124b606cdfa044368e54726"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"musterloesungen/6_warm_ups/dictionaries.py"},"max_forks_repo_name":{"kind":"string","value":"giu/appe6-uzh-hs2018"},"max_forks_repo_head_hexsha":{"kind":"string","value":"204dea36be1e53594124b606cdfa044368e54726"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Kurs: Python: Grundlagen der Programmierung für Nicht-Informatiker\n# Semester: Herbstsemester 2018\n# Homepage: http://accaputo.ch/kurs/python-uzh-hs-2018/\n# Author: Giuseppe Accaputo\n# Aufgabe: Warm-Up: Dictionaries besser kennenlernen\n\ndef preisliste_ausgeben(menu): \n print(\"Unsere Preisliste:\")\n \n for (gericht, preis) in menu.items():\n print(\" *\", gericht, \",\", preis, \"CHF\")\n\nmenu = {\n \"Burger\": 10.5,\n \"Pommes\": 4.0,\n \"Chicken Nuggets\": 8.25\n}\n\npreisliste_ausgeben(menu)"},"avg_line_length":{"kind":"number","value":28.75,"string":"28.75"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.6139130435,"string":"0.613913"}}},{"rowIdx":46219,"cells":{"hexsha":{"kind":"string","value":"1a6660ef66e470505b2f6489a4c6757325e9e737"},"size":{"kind":"number","value":17442,"string":"17,442"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"backend/apps/test_all_url_endpoints.py"},"max_stars_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_stars_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"backend/apps/test_all_url_endpoints.py"},"max_issues_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_issues_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"backend/apps/test_all_url_endpoints.py"},"max_forks_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_forks_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.test import TestCase, Client\nfrom apps.iamstudent.models import Student, AUSBILDUNGS_TYPEN_COLUMNS\nfrom apps.ineedstudent.models import Hospital\nfrom apps.accounts.models import User\nfrom django.contrib import auth\n\nimport numpy as np\n\ndef generate_random_student(countrycode=\"DE\", plz=\"14482\", i=0, validated_email=False):\n m = str(i) + \"student@email.de\"\n pwd = User.objects.make_random_password()\n kwd = dict(zip(AUSBILDUNGS_TYPEN_COLUMNS,np.random.choice([True,False],size=len(AUSBILDUNGS_TYPEN_COLUMNS))))\n\n u = User.objects.create(username=m, email=m, is_student=True, validated_email=validated_email)\n u.set_password(pwd)\n s = Student.objects.create(user=u,\n countrycode=countrycode,\n plz=plz,\n availability_start='{}-{:02d}-{:02d}'.format(2020,3,23),\n **kwd\n )\n u.save()\n s.save()\n return m, pwd, s.uuid\n\ndef generate_random_hospital(countrycode=\"DE\", plz=\"14482\", i=0, datenschutz_zugestimmt=True, validated_email=False):\n m = str(i) + \"hospital@email.de\"\n pwd = User.objects.make_random_password()\n u = User.objects.create(username=m, email=m, is_hospital=True, validated_email=validated_email)\n u.set_password(pwd)\n s = Hospital.objects.create(user=u,\n countrycode=countrycode,\n plz=plz,\n ansprechpartner='XY',\n sonstige_infos='yeaah',\n datenschutz_zugestimmt=datenschutz_zugestimmt, \n einwilligung_datenweitergabe=True,\n )\n u.save()\n s.save()\n return m, pwd, s.uuid\n\ndef generate_staff_user(i=0):\n m = str(i) + \"staff@email.de\"\n pwd = User.objects.make_random_password()\n u = User.objects.create_superuser(username=m, email=m)\n u.set_password(pwd)\n u.save()\n return m, pwd\n\nclass UrlEndpointTestCase(TestCase):\n\n def setUp(self):\n self.client = Client(HTTP_USER_AGENT='Mozilla/5.0')\n\n def test_http_get_endpoints(self):\n assert self.client.get('/', {}).status_code == 200\n assert self.client.get('/about/', {}).status_code == 200\n assert self.client.get('/impressum/', {}).status_code == 200\n assert self.client.get('/dataprotection/', {}).status_code == 200\n assert self.client.get('/legal-questions/', {}).status_code == 200\n\n # Mapview\n assert self.client.get('/mapview/', {}).status_code == 200\n\n # Accounts\n assert self.client.get('/accounts/signup_student', {}).status_code == 200\n assert self.client.get('/accounts/signup_hospital', {}).status_code == 200\n assert self.client.get('/accounts/password_reset/', {}).status_code == 200\n assert self.client.get('/accounts/login/', {}).status_code == 200\n\n def test_count_url(self):\n generate_random_student(validated_email=True)\n response = self.client.get('/accounts/count', {})\n assert response.status_code == 200\n self.assertJSONEqual(\n str(response.content, encoding='utf8'),\n {'facility_count': 0, 'user_count': 1}\n )\n\n generate_random_hospital(validated_email=True)\n response = self.client.get('/accounts/count', {})\n assert response.status_code == 200\n self.assertJSONEqual(\n str(response.content, encoding='utf8'),\n {'facility_count': 1, 'user_count': 1}\n )\n\n\n\n def test_student(self):\n student_email, student_password, _ = generate_random_student()\n assert self.client.post('/accounts/logout/', {}).status_code == 200\n\n response = self.client.post('/accounts/password_reset', {\n \"email\": student_email\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_reset/done\n\n response = self.client.post('/accounts/validate_email', {\n \"email\": student_email\n }, follow=True)\n assert \"/accounts/login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n\n response = self.client.post('/accounts/login/', {\n \"username\": student_email,\n \"password\": student_password,\n }, follow=True)\n assert auth.get_user(self.client).username == student_email\n\n assert Student.objects.get(user__email=student_email).user.validated_email == False\n response = self.client.post('/accounts/validate_email', {\n \"email\": student_email\n }, follow=True)\n assert response.status_code == 200\n assert Student.objects.get(user__email=student_email).user.validated_email\n\n\n response = self.client.post('/accounts/password_change', {\n \"email\": student_email,\n \"new_password1\": student_password,\n \"new_password2\": student_password\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_change/done\n\n assert self.client.get('/mapview/', {}).status_code == 200\n\n response = self.client.get('/accounts/profile_redirect', follow=True)\n assert \"profile_student\" in response.redirect_chain[0][0]\n assert self.client.get('/accounts/profile_student', {}).status_code == 200\n\n assert self.client.get('/accounts/logout/', {}).status_code == 200\n assert auth.get_user(self.client).is_anonymous\n\n response = self.client.post('/accounts/login/', {\n \"username\": student_email,\n \"password\": student_password,\n }, follow=True)\n assert auth.get_user(self.client).username == student_email\n\n # Test view list of studens without being logged in as student. Should redirect!\n response = self.client.get(\"/ineedstudent/students/DE/14482/0\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n # Test admin view when logged in as student. Should redirect\n response = self.client.get(\"/accounts/approve_hospitals\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n m1, p1, uuid1 = generate_random_hospital(\"DE\", \"14482\", 1337)\n m2, p2, uuid2 = generate_random_hospital(\"DE\", \"10115\", 1234)\n m3, p3, uuid3 = generate_random_hospital(\"AT\", \"4020\", 420)\n response = self.client.get('/ineedstudent/hospital_view/' + str(uuid1) + \"/\")\n assert response.status_code == 200\n\n response = self.client.get('/ineedstudent/hospitals/DE/14482')\n assert response.status_code == 200\n\n assert self.client.get('/accounts/delete_me_ask', {}).status_code == 200\n assert self.client.get('/accounts/delete_me', {}).status_code == 200\n\n\n response = self.client.post('/accounts/login/', {\n \"username\": student_email,\n \"password\": student_password,\n }, follow=True)\n assert auth.get_user(self.client).is_anonymous\n\n # Only available to logged in users, should redirect\n response = self.client.get('/ineedstudent/hospital_view/' + str(uuid1) + \"/\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n # Only available to logged in users, should redirect\n response = self.client.get('/ineedstudent/hospitals/DE/14482', follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n def test_hospital(self):\n hospital_email, hospital_password, uuid = generate_random_hospital()\n\n assert self.client.post('/accounts/logout/', {}).status_code == 200\n\n response = self.client.post('/accounts/password_reset', {\n \"email\": hospital_email\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_reset/done\n\n response = self.client.post('/accounts/validate_email', {\n \"email\": hospital_email\n }, follow=True)\n assert \"/accounts/login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n\n response = self.client.post('/accounts/login/', {\n \"username\": hospital_email,\n \"password\": hospital_password,\n }, follow=True)\n assert auth.get_user(self.client).username == hospital_email\n\n assert Hospital.objects.get(user__email=hospital_email).user.validated_email == False\n response = self.client.post('/accounts/validate_email', {\n \"email\": hospital_email\n }, follow=True)\n assert response.status_code == 200\n assert Hospital.objects.get(user__email=hospital_email).user.validated_email\n\n\n response = self.client.post('/accounts/password_change', {\n \"email\": hospital_email,\n \"new_password1\": hospital_password,\n \"new_password2\": hospital_password\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_change/done\n\n assert self.client.get('/mapview/', {}).status_code == 200\n #TODO Test Detailansicht for a hospital!\n\n response = self.client.get('/accounts/profile_redirect', follow=True)\n assert response.status_code == 200\n assert \"profile_hospital\" in response.redirect_chain[0][0]\n assert self.client.get('/accounts/profile_hospital', {}).status_code == 200\n\n assert self.client.get('/accounts/logout/', {}).status_code == 200\n assert auth.get_user(self.client).is_anonymous\n\n response = self.client.post('/accounts/login/', {\n \"username\": hospital_email,\n \"password\": hospital_password,\n }, follow=True)\n assert auth.get_user(self.client).username == hospital_email\n\n # Test view list of students with being logged in as hospital. Should work!\n response = self.client.get(\"/ineedstudent/students/DE/14482/0\", follow=True)\n assert response.status_code == 200\n assert len(response.redirect_chain) == 0\n\n\n # Test admin view when logged in as hospital. Should redirect\n response = self.client.get(\"/accounts/approve_hospitals\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n\n response = self.client.get('/ineedstudent/hospital_view/' + str(uuid) + \"/\")\n assert response.status_code == 200\n\n response = self.client.get('/ineedstudent/hospitals/DE/14482')\n assert response.status_code == 200\n\n m1, p1, uuid1 = generate_random_student(\"DE\", \"14482\", 1337, validated_email=True)\n m2, p2, uuid2 = generate_random_student(\"DE\", \"10115\", 1234, validated_email=True)\n m3, p3, uuid3 = generate_random_student(\"DE\", \"10115\", 12345, validated_email=False)\n m4, p4, uuid4 = generate_random_student(\"AT\", \"4020\", 420, validated_email=True)\n response = self.client.get('/ineedstudent/students/DE/14482/0')\n\n assert \"1 Helfer*innen\" in str(response.content)\n assert response.status_code == 200\n\n response = self.client.get('/ineedstudent/students/DE/14482/50')\n assert \"2 Helfer*innen\" in str(response.content)\n assert response.status_code == 200\n\n assert self.client.get('/accounts/delete_me_ask', {}).status_code == 200\n assert self.client.get('/accounts/delete_me', {}).status_code == 200\n\n response = self.client.post('/accounts/login/', {\n \"username\": hospital_email,\n \"password\": hospital_password,\n }, follow=True)\n assert auth.get_user(self.client).is_anonymous\n\n # Test view list of studens without being logged in. Should redirect!\n response = self.client.get(\"/ineedstudent/students/DE/14482/0\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n # Test admin view as logged out user. Should redirect\n response = self.client.get(\"/accounts/approve_hospitals\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n hospital_email, hospital_password, uuid = generate_random_hospital(datenschutz_zugestimmt=False, i=9999)\n response = self.client.post('/accounts/login/', {\n \"username\": hospital_email,\n \"password\": hospital_password,\n }, follow=True)\n assert Hospital.objects.get(user__email=hospital_email).datenschutz_zugestimmt == False\n assert \"zustimmung\" in response.redirect_chain[1][0]\n assert auth.get_user(self.client).username == hospital_email\n\n response = self.client.post('/ineedstudent/zustimmung', {\n \"datenschutz_zugestimmt\": True,\n \"einwilligung_datenweitergabe\": True,\n }, follow=True)\n assert response.status_code == 200\n assert \"login_redirect\" in response.redirect_chain[0][0]\n assert Hospital.objects.get(user__email=hospital_email).datenschutz_zugestimmt == True\n\n def test_sudent_individual_view(self):\n staff_email, staff_password = generate_staff_user()\n hospital_email, hospital_password, hospital_uuid = generate_random_hospital()\n student_email, student_password, student_uuid = generate_random_student()\n\n response = self.client.post('/accounts/login/', {\n \"username\": student_email,\n \"password\": student_password,\n }, follow=True)\n response = self.client.get('/iamstudent/view_student/' + str(student_uuid), follow=True)\n assert response.status_code == 200\n assert \"/accounts/profile_student\" in response.redirect_chain[0][0]\n\n # TOOD: test which emails can be seen here!\n response = self.client.post('/accounts/login/', {\n \"username\": staff_email,\n \"password\": staff_password,\n }, follow=True)\n response = self.client.get('/iamstudent/view_student/' + str(student_uuid))\n assert response.status_code == 200\n\n # TOOD: test which emails can be seen here!\n response = self.client.post('/accounts/login/', {\n \"username\": hospital_email,\n \"password\": hospital_password,\n }, follow=True)\n response = self.client.get('/iamstudent/view_student/' + str(student_uuid))\n assert response.status_code == 200\n\n\n\n\n def test_admin(self):\n staff_email, staff_password = generate_staff_user()\n\n assert self.client.post('/accounts/logout/', {}).status_code == 200\n\n response = self.client.post('/accounts/password_reset', {\n \"email\": staff_email\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_reset/done\n\n response = self.client.post('/accounts/login/', {\n \"username\": staff_email,\n \"password\": staff_password,\n }, follow=True)\n assert auth.get_user(self.client).username == staff_email\n\n response = self.client.post('/accounts/password_change', {\n \"email\": staff_email,\n \"new_password1\": staff_password,\n \"new_password2\": staff_password\n }, follow=True)\n #print(response.redirect_chain)\n assert response.status_code == 200\n #TODO why does this not redirect to /accounts/password_change/done\n\n assert self.client.get('/mapview/', {}).status_code == 200\n #TODO Test Detailansicht for a hospital!\n\n response = self.client.get('/accounts/profile_redirect', follow=True)\n assert response.status_code == 200\n assert \"approve_hospitals\" in response.redirect_chain[0][0]\n\n response = self.client.get('/accounts/approve_hospitals', follow=True)\n assert response.status_code == 200\n\n assert self.client.get('/accounts/logout/', {}).status_code == 200\n assert auth.get_user(self.client).is_anonymous\n\n response = self.client.post('/accounts/login/', {\n \"username\": staff_email,\n \"password\": staff_password,\n }, follow=True)\n assert auth.get_user(self.client).username == staff_email\n\n # Test view list of studens witbeing logged in as staff user\n # Current behavior: Should redirect!\n # TODO: discuss what the behavior of this should be!\n response = self.client.get(\"/ineedstudent/students/DE/14482/0\", follow=True)\n assert \"login\" in response.redirect_chain[0][0]\n assert response.status_code == 200\n\n\n assert self.client.get('/accounts/delete_me_ask', {}).status_code == 200\n assert self.client.get('/accounts/delete_me', {}).status_code == 200\n\n response = self.client.post('/accounts/login/', {\n \"username\": staff_email,\n \"password\": staff_password,\n }, follow=True)\n assert auth.get_user(self.client).is_anonymous\n\n response = self.client.get(\"/ineedstudent/students/DE/14482/0\", follow=True)\n"},"avg_line_length":{"kind":"number","value":42.75,"string":"42.75"},"max_line_length":{"kind":"number","value":117,"string":"117"},"alphanum_fraction":{"kind":"number","value":0.6420708634,"string":"0.642071"}}},{"rowIdx":46220,"cells":{"hexsha":{"kind":"string","value":"6465aca9b1ad8ec2565cf8881bc61c342672a493"},"size":{"kind":"number","value":87,"string":"87"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/redengine.py"},"max_stars_repo_name":{"kind":"string","value":"DubskySteam/Facharbeit_Schlitten"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4ce8764a13ea41120b1b17f9f77d516aa958442b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"python/redengine.py"},"max_issues_repo_name":{"kind":"string","value":"DubskySteam/Facharbeit_Schlitten"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4ce8764a13ea41120b1b17f9f77d516aa958442b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/redengine.py"},"max_forks_repo_name":{"kind":"string","value":"DubskySteam/Facharbeit_Schlitten"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4ce8764a13ea41120b1b17f9f77d516aa958442b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\nimport tinkerforge\nimport os\nimport sys\nimport math\nimport time"},"avg_line_length":{"kind":"number","value":12.4285714286,"string":"12.428571"},"max_line_length":{"kind":"number","value":22,"string":"22"},"alphanum_fraction":{"kind":"number","value":0.8045977011,"string":"0.804598"}}},{"rowIdx":46221,"cells":{"hexsha":{"kind":"string","value":"64c2a8d445544ec9378b4ce1bd8ef90a4f884577"},"size":{"kind":"number","value":192,"string":"192"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"exercises/zh/exc_01_08_02.py"},"max_stars_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_stars_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2085,"string":"2,085"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-04-17T13:10:40.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T21:51:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"exercises/zh/exc_01_08_02.py"},"max_issues_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_issues_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":79,"string":"79"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-04-18T14:42:55.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-07T08:15:43.000Z"},"max_forks_repo_path":{"kind":"string","value":"exercises/zh/exc_01_08_02.py"},"max_forks_repo_name":{"kind":"string","value":"Jette16/spacy-course"},"max_forks_repo_head_hexsha":{"kind":"string","value":"32df0c8f6192de6c9daba89740a28c0537e4d6a0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":361,"string":"361"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-04-17T13:34:32.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T04:42:45.000Z"},"content":{"kind":"string","value":"import spacy\n\nnlp = spacy.load(\"zh_core_web_sm\")\n\ntext = \"写入历史了:苹果是美国第一家市值超过一万亿美元的上市公司。\"\n\n# 处理文本\ndoc = ____\n\n# 对识别出的实体进行遍历\nfor ent in ____.____:\n # 打印实体文本及标注\n print(ent.____, ____.____)\n"},"avg_line_length":{"kind":"number","value":13.7142857143,"string":"13.714286"},"max_line_length":{"kind":"number","value":38,"string":"38"},"alphanum_fraction":{"kind":"number","value":0.703125,"string":"0.703125"}}},{"rowIdx":46222,"cells":{"hexsha":{"kind":"string","value":"64cb07f975fd7cee6091cd828bd81bd845457ce2"},"size":{"kind":"number","value":39156,"string":"39,156"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/server_health.py"},"max_stars_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-11-11T07:57:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-28T08:26:53.000Z"},"max_issues_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/server_health.py"},"max_issues_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-09-05T21:47:07.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2019-09-17T18:10:45.000Z"},"max_forks_repo_path":{"kind":"string","value":"Contrib-Microsoft/Olympus_rack_manager/python-ocs/commonapi/controls/server_health.py"},"max_forks_repo_name":{"kind":"string","value":"opencomputeproject/Rack-Manager"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e1a61d3eeeba0ff655fe9c1301e8b510d9b2122a"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":11,"string":"11"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-07-20T00:16:32.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-01-11T14:17:48.000Z"},"content":{"kind":"string","value":"# Copyright (C) Microsoft Corporation. All rights reserved.\n\n# This program is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom ipmicmd_library import *\nfrom bladeinfo_lib import *\nfrom bladethermal_lib import fan_sub_parser\n\ndef get_server_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n ipmi_cmd = 'ocsoem redfish health' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"Server health\") \n \n healthrsp = {}\n \n if completion_code.cc_key in output:\n healthrsp[completion_code.cc_key] = completion_code.failure\n healthrsp[completion_code.desc] = \"get server health ipmi call error \"\n return healthrsp \n \n if(output['status_code'] == 0) or (output['stdout']): \n healthrsp = parse_health_response(output['stdout']) \n return healthrsp\n \n else: \n errorData = output['stderr'].split('\\n') \n errorData = filter(None, errorData) \n healthrsp[completion_code.cc_key] = completion_code.failure\n \n for data in errorData:\n if \"Error\" in data:\n healthrsp[completion_code.desc] = data.split(\":\")[-1].strip()\n elif completion_code.cc_key in data:\n healthrsp[completion_code.ipmi_code] = data.split(\":\")[-1].strip()\n else:\n healthrsp[completion_code.desc] = data.strip() \n break \n \n if healthrsp[completion_code.desc] == \"\":\n healthrsp[completion_code.desc] = errorData.strip()\n \n return healthrsp\n \n except Exception,e:\n #log.exception(\"Exception error is: %s \" %e)\n healthrsp[completion_code.cc_key] = completion_code.failure\n healthrsp[completion_code.desc] = \"Get server health, Exception: \", e\n return healthrsp \n \ndef parse_health_response(output):\n try: \n completionstate = True\n healthrsp = {}\n healthrsp[\" Server Information\"] = {}\n healthrsp[\"CPU Information\"] = {}\n healthrsp[\"Memory Information\"] = {} \n healthrsp[\"PCIE Information\"] = {} \n healthrsp[\"Temperature Information\"] = {}\n healthrsp[\"FRU Information\"] = {}\n healthrsp[\"Fan Information\"] = {}\n healthrsp[\"Sensor Information\"] = {} \n \n #populating temperatures data\n health = output.split('$')\n\n healthdata = filter(None, health) #Remove empty data\n \n if len(healthdata) == 0:\n healthrsp[completion_code.cc_key] = completion_code.failure\n healthrsp[completion_code.desc] = \"health data is empty\"\n return healthrsp \n else:\n for value in healthdata:\n object_data = value.split('\\n')\n object_value= filter(None, object_data)\n # Skipping empty lists if any\n if len(object_value) == 0:\n break\n else:\n if object_value[0].lower().strip('-').strip() == \"fru information\": \n fru_info = get_fru_info(object_value) \n if completion_code.cc_key in fru_info.keys():\n value = fru_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"FRU Information\"] = fru_info\n \n elif object_value[0].lower().strip('-').strip() == \"cpu information\": \n cpu_info = get_cpu_info(object_value) \n if completion_code.cc_key in cpu_info.keys():\n value = cpu_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n healthrsp[\"CPU Information\"] = cpu_info\n \n elif object_value[0].lower().strip('-').strip() == \"server information\": \n server_info = get_server_info(object_value) \n \n if completion_code.cc_key in server_info.keys():\n value = server_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\" Server Information\"] = server_info\n \n elif object_value[0].lower().strip('-').strip() == \"memory information\": \n memory_data = value.split('*') \n memory_value= filter(None, memory_data) \n mem_info = get_memory_health(memory_value) \n \n if completion_code.cc_key in mem_info.keys():\n value = mem_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"Memory Information\"] = mem_info \n \n elif object_value[0].lower().strip('-').strip() == \"pcie information\": \n pcie_data = value.split('*') \n pcie_value= filter(None, pcie_data) \n pcie_info = get_pcie_info(pcie_value) \n \n if completion_code.cc_key in pcie_info.keys():\n value = pcie_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"PCIE Information\"] = pcie_info \n \n elif object_value[0].lower().strip('-').strip() == \"fan information\": \n del object_value[0] # deleting first record which is \"-----Fan Information-------\" string \n fan_info = get_sensor_info(object_value, 'fan') \n \n if completion_code.cc_key in fan_info.keys():\n value = fan_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"Fan Information\"] = fan_info\n \n elif object_value[0].lower().strip('-').strip() == \"temperature information\": \n del object_value[0] # deleting first record which is \"-----Temperature Information-------\" string \n temp_info = get_sensor_info(object_value) \n \n if completion_code.cc_key in temp_info.keys():\n value = temp_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"Temperature Information\"] = temp_info\n \n elif object_value[0].lower().strip('-').strip() == \"sensor information\": \n del object_value[0] # deleting first record which is \"-----Sensor Information-------\" string \n sensor_info = get_sensor_info(object_value) \n \n if completion_code.cc_key in sensor_info.keys():\n value = sensor_info.pop(completion_code.cc_key,None) \n if value == completion_code.failure:\n completionstate &= False\n \n healthrsp[\"Sensor Information\"] = sensor_info\n \n except Exception,e:\n #log.exception(\"Exception error is: %s \" %e)\n healthrsp[completion_code.cc_key] = completion_code.failure\n healthrsp[completion_code.desc] = \"Get server health, Exception: \", e\n return healthrsp \n \n if completionstate:\n healthrsp[completion_code.cc_key] = completion_code.success\n else:\n healthrsp[completion_code.cc_key] = completion_code.failure\n return healthrsp\n\ndef get_memory_health(memory):\n try:\n completionstate = True\n mem_rsp = {}\n dimm_id = 1 \n \n for value in memory:\n dimm_data = value.split('\\n') \n dimm_data = filter(None, dimm_data) # Removes empty strings \n # Skipping empty lists if any \n if len(dimm_data) == 0:\n break \n if len(dimm_data) == 2:\n continue\n \n mem_rsp[dimm_id] = {} \n for value in dimm_data: \n if \"Completion Code:\" in value:\n completionstate &= False\n elif \"DimmId\" in value:\n mem_rsp[dimm_id][\"Dimm Id\"] = value.split(\":\")[-1].strip() \n elif \"Dimm Type\" in value:\n mem_rsp[dimm_id][\"Dimm Type\"] = value.split(\":\")[-1].strip()\n elif \"Dimm speed\" in value:\n mem_rsp[dimm_id][\"Dimm Speed\"] = value.split(\":\")[-1].strip() \n elif \"Dimm size\" in value:\n mem_rsp[dimm_id][\"Dimm Size\"] = value.split(\":\")[-1].strip() \n elif \"Dimm Status\" in value:\n mem_rsp[dimm_id][\"Dimm Status\"] = value.split(\":\")[-1].strip()\n elif \"Voltage\" in value:\n mem_rsp[dimm_id][\"Dimm Voltage\"] = value.split(\":\")[-1].strip()\n elif \"Running Speed\" in value:\n mem_rsp[dimm_id][\"Running Speed\"] = value.split(\":\")[-1].strip()\n dimm_id = dimm_id + 1\n \n except Exception,e:\n mem_rsp[completion_code.cc_key] = completion_code.failure\n mem_rsp[completion_code.desc] = \"Get memory health, Exception: \", e\n return mem_rsp \n \n if completionstate:\n mem_rsp[completion_code.cc_key] = completion_code.success\n else:\n mem_rsp[completion_code.cc_key] = completion_code.failure\n \n return mem_rsp \n\ndef get_pcie_info(pcie):\n try:\n completionstate = True\n pcie_rsp = {}\n pcie_id = 1 \n\n for value in pcie:\n pcie_data = value.split('\\n') \n \n pcie_data = filter(None, pcie_data) # Removes empty list \n # Skipping empty lists if any \n if len(pcie_data) == 0:\n break \n if len(pcie_data) == 2:\n continue\n \n pcie_rsp[pcie_id] = {} \n for value in pcie_data: \n if \"Completion Code:\" in value:\n completionstate &= False\n elif \"PCIe Id\" in value:\n pcie_rsp[pcie_id][\"PCIe Index\"] = value.split(\":\")[-1].strip() \n elif \"PCIe Status\" in value:\n pcie_rsp[pcie_id][\"PCIe Status\"] = value.split(\":\")[-1].strip() \n elif \"PCIe Device\" in value:\n pcie_rsp[pcie_id][\"State\"] = value.split(\":\")[-1].strip()\n elif \"Device Id\" in value:\n pcie_rsp[pcie_id][\"Device Id\"] = value.split(\":\")[-1].strip() \n elif \"Vendor Id\" in value:\n pcie_rsp[pcie_id][\"Vendor Id\"] = value.split(\":\")[-1].strip() \n elif \"SubSystem Id\" in value:\n pcie_rsp[pcie_id][\"SubSystem Id\"] = value.split(\":\")[-1].strip()\n elif \"SubSystem vendor Id\" in value:\n pcie_rsp[pcie_id][\"SubSystem vendor Id\"] = value.split(\":\")[-1].strip()\n \n pcie_id = pcie_id + 1\n \n except Exception,e:\n pcie_rsp[completion_code.cc_key] = completion_code.failure\n pcie_rsp[completion_code.desc] = \"Get PCIe health, Exception: \", e\n return pcie_rsp \n \n if completionstate:\n pcie_rsp[completion_code.cc_key] = completion_code.success\n else:\n pcie_rsp[completion_code.cc_key] = completion_code.failure\n \n return pcie_rsp \n \ndef get_server_info(server):\n try:\n completionstate = True \n server_rsp = {}\n \n for value in server: \n if \"Completion Code:\" in value:\n completionstate &= False\n elif \"Server communication type\" in value:\n server_type = value.split(\":\")[-1].strip()\n if server_type == \"IPMI\":\n server_rsp[\"Server Type\"] = \"C2010\"\n elif server_type == \"REST\":\n server_rsp[\"Server Type\"] = \"J2010\"\n else:\n server_rsp[\"Server Type\"] = \"Unknown\"\n completionstate &= False \n elif \"Slot Id\" in value:\n server_rsp[\"Server Slot ID\"] = value.split(\":\")[-1].strip()\n elif \"System Power State\" in value:\n server_rsp[\"Server State\"] = value.split(\":\")[-1].strip()\n \n except Exception,e:\n server_rsp[completion_code.cc_key] = completion_code.failure\n server_rsp[completion_code.desc] = \"Get Server Information, Exception: \", e\n return server_rsp \n \n if completionstate:\n server_rsp[completion_code.cc_key] = completion_code.success\n else:\n server_rsp[completion_code.cc_key] = completion_code.failure\n \n return server_rsp \n\ndef get_cpu_info(cpu):\n try:\n completionstate = True\n \n cpursp = {}\n cpursp[\"Processor-1\"] = {}\n cpursp[\"Processor-2\"] = {} \n \n for value in cpu: \n if \"Completion Code:\" in value:\n completionstate &= False\n elif \"Processor0 Type\" in value:\n cpursp[\"Processor-1\"][\"Processor Id\"] = 0\n cpursp[\"Processor-1\"][\"Processor Type\"] = value.split(\":\")[-1].strip() \n elif \"Processor0 Frequency\" in value:\n cpursp[\"Processor-1\"][\"Processor Frequency\"] = value.split(\":\")[-1].strip()\n elif \"Processor0 State\" in value:\n cpursp[\"Processor-1\"][\"ProcessorState\"] = value.split(\":\")[-1].strip()\n elif \"Processor1 Type\" in value:\n cpursp[\"Processor-2\"][\"Processor Id\"] = 1\n cpursp[\"Processor-2\"][\"Processor Type\"] = value.split(\":\")[-1].strip() \n elif \"Processor1 Frequency\" in value:\n cpursp[\"Processor-2\"][\"Processor Frequency\"] = value.split(\":\")[-1].strip()\n elif \"Processor1 State\" in value:\n cpursp[\"Processor-2\"][\"ProcessorState\"] = value.split(\":\")[-1].strip() \n \n except Exception,e:\n cpursp[completion_code.cc_key] = completion_code.failure\n cpursp[completion_code.desc] = \"Get CPU health, Exception: \", e\n return cpursp \n \n if completionstate:\n cpursp[completion_code.cc_key] = completion_code.success\n else:\n cpursp[completion_code.cc_key] = completion_code.failure\n \n return cpursp\n\ndef get_sensor_info(temp, sensortype = ''):\n try:\n completionstate = True \n temp_rsp = {}\n record_id = 1 \n for value in temp: \n if \"Completion Code:\" in value:\n completionstate &= False\n \n # Skipping empty lists if any \n if len(value) == 0:\n break \n \n val = value.split (\"|\")\n sensor = {}\n if sensortype == \"fan\":\n if \"pwm\" in val[0].lower().strip():\n continue\n else:\n sensor[\"Fan Name\"] = val[0].strip ()\n sensor[\"Fan Number\"] = val[1].strip ()\n sensor[\"Fan Status\"] = val[2].strip ()\n sensor[\"Fan MemberId\"] = val[3].strip ()\n sensor[\"Fan Reading\"] = val[4].strip ()\n else:\n sensor[\"Sensor Description\"] = val[0].strip ()\n sensor[\"Sensor Number\"] = val[1].strip ()\n sensor[\"Sensor Status\"] = val[2].strip ()\n sensor[\"Sensor Entity ID\"] = val[3].strip ()\n sensor[\"Sensor Reading\"] = val[4].strip ()\n \n \n temp_rsp[record_id] = sensor \n \n record_id = record_id + 1\n \n except Exception,e:\n temp_rsp[completion_code.cc_key] = completion_code.failure\n temp_rsp[completion_code.desc] = \"Get Sensor Information, Exception: \", e\n return temp_rsp \n \n if completionstate:\n temp_rsp[completion_code.cc_key] = completion_code.success\n else:\n temp_rsp[completion_code.cc_key] = completion_code.failure\n \n return temp_rsp \n\ndef show_memory_info(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n ipmi_cmd = 'ocsoem dimminfo' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n get_memory = parse_memory(cmdinterface ,\"memory\") \n \n if get_memory is None or not get_memory: # Check empty or none\n return set_failure_dict(\"Empty memory info\", completion_code.failure) \n \n return get_memory \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"get memory info Exception: \", e), completion_code.failure)\n \ndef show_pcie_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n ipmi_cmd = 'ocsoem getpcie' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n get_pcie = parse_pcie(cmdinterface ,\"pcie\") \n \n if get_pcie is None or not get_pcie: # Check empty or none\n return set_failure_dict(\"Empty PCIe information\", completion_code.failure)\n \n return get_pcie\n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"get Pcie info Exception: \", e), completion_code.failure)\n \ndef show_cpu_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n ipmi_cmd = 'ocsoem redfish cpu' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"cpu\") \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n cpursp = {}\n \n if(output['status_code'] == 0):\n cpu_data = output['stdout'].split('\\n')\n \n cpursp = get_cpu_info(cpu_data)\n else:\n error_data = output['stderr'].split('\\n') \n cpursp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n cpursp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n cpursp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n if cpursp is None or not cpursp: # Check empty or none\n return set_failure_dict(\"Empty cpu information\", completion_code.failure)\n \n return cpursp\n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"show cpu info Exception: \", e), completion_code.failure)\n\ndef show_temperature_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n ipmi_cmd = 'sdr type temperature' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"Temperature\") \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n temprsp = {}\n \n if(output['status_code'] == 0):\n temp_data = output['stdout'].split('\\n') \n temprsp = get_sensor_info(temp_data)\n else:\n error_data = output['stderr'].split('\\n') \n temprsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n temprsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n temprsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n if temprsp is None or not temprsp: # Check empty or none\n return set_failure_dict(\"Empty temperature information\", completion_code.failure)\n \n return temprsp\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"show temperature info Exception: \", e), completion_code.failure)\n\ndef show_fan_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n ipmi_cmd = 'sdr type fan' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"Fan\") \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n fanrsp = {}\n \n if(output['status_code'] == 0):\n fan_data = output['stdout'].split('\\n') \n fanrsp = get_sensor_info(fan_data,'fan') \n else:\n error_data = output['stderr'].split('\\n') \n fanrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n fanrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n fanrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n if fanrsp is None or not fanrsp: # Check empty or none\n return set_failure_dict(\"Empty fan information\", completion_code.failure)\n \n return fanrsp\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"show fan info Exception: \", e), completion_code.failure)\n\n# This method is using to get the show manager inventory or sh system health -s (server info)\ndef show_server_health(serverid, inventory = False):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n ipmi_cmd = 'ocsoem redfish server' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"server\") \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n serverrsp = {}\n \n if(output['status_code'] == 0) or output['stdout']:\n server_data = output['stdout'].split('\\n') \n serverrsp = parse_server_details(server_data, inventory) \n else:\n error_data = output['stderr'].split('\\n') \n serverrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n serverrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n serverrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n if serverrsp is None or not serverrsp: # Check empty or none\n return set_failure_dict(\"Empty server information\", completion_code.failure)\n \n return serverrsp\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"show server info Exception: \", e), completion_code.failure)\n\ndef parse_server_details(server, inventory):\n try:\n completionstate = True \n server_rsp = {}\n \n for value in server: \n if \"Completion Code\" in value:\n completionstate &= False\n elif \"Server communication type\" in value:\n server_type = value.split(\":\")[-1].strip()\n if server_type == \"IPMI\":\n server_rsp[\"Server Type\"] = \"C2010\"\n elif server_type == \"REST\":\n server_rsp[\"Server Type\"] = \"J2010\"\n else:\n server_rsp[\"Server Type\"] = \"Unknown\"\n completionstate &= False \n elif \"Slot Id\" in value:\n server_rsp[\"Server Slot ID\"] = value.split(\":\")[-1].strip()\n elif \"System Power State\" in value:\n server_rsp[\"Server State\"] = value.split(\":\")[-1].strip()\n elif inventory == True and \"GUID\" in value:\n guid = value.split(\":\")[-1].strip()\n if guid.lower().strip() == \"failure\":\n completionstate &= False \n server_rsp[\"UUID\"] = guid\n elif inventory == True and \"MAC1\" in value:\n mac1 = value.split(\":\")[-1].strip()\n if mac1.lower().strip() == \"failure\":\n completionstate &= False\n server_rsp[\"MAC1\"] = mac1\n \n except Exception,e:\n server_rsp[completion_code.cc_key] = completion_code.failure\n server_rsp[completion_code.desc] = \"Get Server Information, Exception: \", e\n return server_rsp \n \n if completionstate:\n server_rsp[completion_code.cc_key] = completion_code.success\n else:\n server_rsp[completion_code.cc_key] = completion_code.failure\n \n return server_rsp \n \ndef show_sensor_health(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n ipmi_cmd = 'sdr elist' \n cmdinterface = interface + ' ' + ipmi_cmd\n \n output = call_ipmi(cmdinterface, \"sensor\") \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n sensorrsp = {}\n \n if(output['status_code'] == 0):\n sensor_data = output['stdout'].split('\\n') \n sensorrsp = get_sensor_info(sensor_data)\n else:\n error_data = output['stderr'].split('\\n') \n sensorrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n sensorrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n sensorrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n if sensorrsp is None or not sensorrsp: # Check empty or none\n return set_failure_dict(\"Empty sensor information\", completion_code.failure)\n \n return sensorrsp\n \n except Exception, e:\n #Log_Error(\"Failed Exception:\",e)\n return set_failure_dict((\"show sensor info Exception: \", e), completion_code.failure)\n\ndef get_server_fru(serverid):\n \n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n # IPMI command to get FRU details\n cmdinterface = interface + ' ' + \"fru print\" \n \n fru_collection = parse_fru_data(cmdinterface, \"fru\")\n \n if fru_collection is None or not fru_collection: # Check empty or none\n return set_failure_dict(\"Empty Fru data\", completion_code.failure)\n \n except Exception, e:\n return set_failure_dict((\"Server fru Exception\",e), completion_code.failure)\n \n return fru_collection\n \ndef get_server_nicinfo(serverid):\n try:\n if serverid < 1 or serverid > 48:\n return set_failure_dict(\"Expected server-id between 1 to 48\", completion_code.failure) \n else: \n interface = get_ipmi_interface(serverid)\n \n if \"Failed:\" in interface:\n return set_failure_dict(interface, completion_code.failure)\n \n nic_collection = {}\n \n for i in range(1,3): \n ipmi_cmd = 'ocsoem nicinfo' + ' ' + str(i) # IPMI command to get server pcie details\n cmdinterface = interface + ' ' + ipmi_cmd \n get_nic = parse_nic(cmdinterface , \"nic\", str(i)) \n \n if get_nic is None or not get_nic: # Check empty or none\n nic_collection[completion_code.cc_key] = completion_code.failure\n \n nic_collection.update({i: get_nic})\n \n except Exception, e:\n return set_failure_dict((\"Server fru Exception\",e), completion_code.failure)\n \n nic_collection[completion_code.cc_key] = completion_code.success\n return nic_collection\n \ndef parse_fru_data(interface,command):\n try: \n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return output\n \n fru_rsp = {}\n \n if(output['status_code'] == 0):\n sdata = output['stdout'].split('\\n') \n fru_rsp = get_fru_info(sdata) \n else:\n error_data = output['stderr'].split('\\n') \n fru_rsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n fru_rsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n fru_rsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n except Exception, e:\n #log.exception(\"Exception error is: \",e)\n return set_failure_dict((\"parse_fru() Exception \",e), completion_code.failure) \n \n return fru_rsp\n\ndef get_fru_info(output): \n try:\n completionstate = True\n fru_rsp = {}\n \n for value in output:\n if \"Completion Code:\" in value:\n completionstate &= False\n elif \"Board Mfg Date\" in value:\n date = value.split(\":\")\n date.pop(0)\n fru_rsp[\"Board Mfg Date\"] = \":\".join(date)\n elif \"Board Mfg\" in value:\n fru_rsp[\"Board Mfg\"] = value.split(\":\")[-1].strip()\n elif \"Board Product\" in value:\n fru_rsp[\"Board Product\"] = value.split(\":\")[-1].strip()\n elif \"Board Serial\" in value:\n fru_rsp[\"Board Serial Number\"] = value.split(\":\")[-1].strip()\n elif \"Board Part Number\" in value:\n fru_rsp[\"Board Part Number\"] = value.split(\":\")[-1].strip()\n elif \"Product Asset Tag\" in value:\n fru_rsp[\"AssetTag\"] = value.split(\":\")[-1].strip()\n elif \"Product Manufacturer\" in value:\n fru_rsp[\"Manufacturer\"] = value.split(\":\")[-1].strip()\n elif \"Product Name\" in value:\n fru_rsp[\"Model\"] = value.split(\":\")[-1].strip()\n elif \"Product Part Number\" in value:\n fru_rsp[\"Product Part Number\"] = value.split(\":\")[-1].strip() \n elif \"Product Version\" in value:\n fru_rsp[\"Product Version\"] = value.split(\":\")[-1].strip()\n elif \"Product Serial\" in value: \n fru_rsp[\"Product Serial\"] = value.split(\":\")[-1].strip()\n \n if completionstate:\n fru_rsp[completion_code.cc_key] = completion_code.success\n else: \n fru_rsp[completion_code.cc_key] = completion_code.failure\n \n return fru_rsp\n except Exception,e:\n #log.exception(\"Exception error is: %s \" %e)\n fru_rsp[completion_code.cc_key] = completion_code.failure\n fru_rsp[completion_code.desc] = \"Get fru info, Exception: \", e\n return fru_rsp \n \ndef parse_memory(interface ,command):\n try: \n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return set_failure_dict(\"IPMI call error {0}\".format(output), completion_code.failure)\n \n memoryrsp = {}\n \n if(output['status_code'] == 0):\n memory_data = output['stdout'].split('*') \n memory_value= filter(None, memory_data) \n memoryrsp = get_memory_health(memory_value) \n return memoryrsp\n else:\n error_data = output['stderr'].split('\\n') \n memoryrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n memoryrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n memoryrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n except Exception, e:\n #log.exception(\"Exception error is: \",e)\n return set_failure_dict((\"parse_memopry() Exception \",e), completion_code.failure) \n \ndef parse_pcie(interface ,command):\n try: \n output = call_ipmi(interface, command) \n \n if \"ErrorCode\" in output:\n return output\n pciersp = {}\n \n if(output['status_code'] == 0):\n pcie_data = output['stdout'].split('*') \n pcie_value= filter(None, pcie_data) \n pciersp = get_pcie_info(pcie_value) \n return pciersp\n else:\n error_data = output['stderr'].split('\\n') \n pciersp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n pciersp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n pciersp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n except Exception, e:\n #log.exception(\"Exception error is: \",e)\n return set_failure_dict((\"parse_pcie Exception \",e), completion_code.failure) \n\ndef parse_nic(interface, command, nicid):\n try: \n output = call_ipmi(interface, command+nicid) \n \n if \"ErrorCode\" in output:\n return output\n \n nicrsp = {}\n \n if(output['status_code'] == 0):\n sdata = output['stdout'].strip() \n nicrsp[\"Device Id\"] = nicid\n nicrsp[\"Mac Address\"] = sdata[:-1]\n else:\n error_data = output['stderr'].split('\\n') \n nicrsp[\"Device Id\"] = nicid \n nicrsp[completion_code.cc_key] = completion_code.failure\n \n for data in error_data:\n if \"Error\" in data:\n nicrsp[completion_code.desc] = data.split(\":\")[-1]\n elif \"Completion Code\" in data:\n nicrsp[completion_code.ipmi_code] = data.split(\":\")[-1] \n \n except Exception, e:\n #log.exception(\"Exception error is: \",e)\n return set_failure_dict((\"parse_pcie Exception \",e), completion_code.failure) \n \n return nicrsp\n"},"avg_line_length":{"kind":"number","value":43.0759075908,"string":"43.075908"},"max_line_length":{"kind":"number","value":134,"string":"134"},"alphanum_fraction":{"kind":"number","value":0.5178261314,"string":"0.517826"}}},{"rowIdx":46223,"cells":{"hexsha":{"kind":"string","value":"b382ad15202e6d1f8333e684f4ffd5247e6fccf7"},"size":{"kind":"number","value":1658,"string":"1,658"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/STGAN/eval.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/STGAN/eval.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/cv/STGAN/eval.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\" Model Test \"\"\"\nimport tqdm\n\nfrom mindspore.common import set_seed\n\nfrom src.models import STGANModel\nfrom src.utils import get_args\nfrom src.dataset import CelebADataLoader\n\nset_seed(1)\n\ndef test():\n \"\"\" test function \"\"\"\n args = get_args(\"test\")\n print('\\n\\n=============== start testing ===============\\n\\n')\n data_loader = CelebADataLoader(args.dataroot,\n mode=args.phase,\n selected_attrs=args.attrs,\n batch_size=1,\n image_size=args.image_size)\n iter_per_epoch = len(data_loader)\n args.dataset_size = iter_per_epoch\n model = STGANModel(args)\n\n for _ in tqdm.trange(iter_per_epoch, desc='Test Loop'):\n data = next(data_loader.test_loader)\n model.test(data, data_loader.test_set.get_current_filename())\n\n print('\\n\\n=============== finish testing ===============\\n\\n')\n\n\nif __name__ == '__main__':\n test()\n"},"avg_line_length":{"kind":"number","value":34.5416666667,"string":"34.541667"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.6121833534,"string":"0.612183"}}},{"rowIdx":46224,"cells":{"hexsha":{"kind":"string","value":"b335e52a012fb8622b66b5023b129d0118cdc02f"},"size":{"kind":"number","value":922,"string":"922"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"LeetCode_problems/kth-smallest-element-in-a-bst/Solution.py"},"max_stars_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":165,"string":"165"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-10-03T08:01:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T02:42:08.000Z"},"max_issues_repo_path":{"kind":"string","value":"LeetCode_problems/kth-smallest-element-in-a-bst/Solution.py"},"max_issues_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":383,"string":"383"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-10-03T07:39:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-20T07:06:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"LeetCode_problems/kth-smallest-element-in-a-bst/Solution.py"},"max_forks_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":380,"string":"380"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-03T08:05:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-19T06:56:59.000Z"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 5 2020\n\n@author: Shrey1608\n\"\"\"\n# Approach :1) Recursive inorder traversal i.e first do a inorder traversal which is a method of Dfs and then just follow the recursion(Time complexity= O(N)(h is height of the tree)\n# It's a very straightforward approach with O(N)\\mathcal{O}(N)O(N) time complexity. The idea is to build an inorder traversal of BST which is an array sorted in the ascending order. Now the answer is the k - 1th element of this array.\n\n\n#Solution : 1)Recursive inorder traversal\nclass Solution:\n def inorder(self,root,output):\n if root == None:\n return\n else:\n self.inorder(root.left,output)\n output.append(root.val)\n self.inorder(root.right,output)\n \n def kthSmallest(self, root: TreeNode, k: int) -> int:\n output=[]\n self.inorder(root,output)\n return output[k-1]"},"avg_line_length":{"kind":"number","value":38.4166666667,"string":"38.416667"},"max_line_length":{"kind":"number","value":234,"string":"234"},"alphanum_fraction":{"kind":"number","value":0.6561822126,"string":"0.656182"}}},{"rowIdx":46225,"cells":{"hexsha":{"kind":"string","value":"2fdddc7d0dd81d8a5e9af91722fed8c0a3fc930b"},"size":{"kind":"number","value":6443,"string":"6,443"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/main/python/qxy/rename.py"},"max_stars_repo_name":{"kind":"string","value":"gwdgithubnom/ox-patient"},"max_stars_repo_head_hexsha":{"kind":"string","value":"cddf4fe381cb4506db8e0d62803dd2044cf7ad92"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/main/python/qxy/rename.py"},"max_issues_repo_name":{"kind":"string","value":"gwdgithubnom/ox-patient"},"max_issues_repo_head_hexsha":{"kind":"string","value":"cddf4fe381cb4506db8e0d62803dd2044cf7ad92"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/main/python/qxy/rename.py"},"max_forks_repo_name":{"kind":"string","value":"gwdgithubnom/ox-patient"},"max_forks_repo_head_hexsha":{"kind":"string","value":"cddf4fe381cb4506db8e0d62803dd2044cf7ad92"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-04-14T00:45:38.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-04-14T00:45:38.000Z"},"content":{"kind":"string","value":"import os\r\nimport configparser\r\nfrom context import resource_manager\r\nfrom tools import file_manage\r\n\r\n\r\ndef rename_dir(url,reverse=True):\r\n \"\"\"\r\n 用户给定文件夹的路径,如果给定路径存在,更改指定文件夹下的文件夹的名字\r\n :param url: 用户给定的文件夹的路径\r\n :param reverse: 如果reverse=True就进行反向命名,reverse=False就对所给文件夹所包含的文件夹进行重命名;\r\n :return:\r\n \"\"\"\r\n if _exist_(url)and reverse==False:\r\n if not os.path.exists(url):\r\n url = os.path.abspath(url)\r\n a = 1\r\n list_sto=_random_name(url,'D')\r\n while(True):\r\n if str(a) in list_sto:\r\n a=a+1\r\n else:\r\n break\r\n conf_sto = configparser.ConfigParser()\r\n conf_sto.read('conf'+resource_manager.getSeparator()+'directory.ini')\r\n for ob in list_sto:\r\n try:\r\n files=conf_sto.get(url,ob)\r\n old=os.path.join(url,files);\r\n filetype=os.path.splitext(files)[1];\r\n d=a\r\n if os.path.isdir(old):\r\n New=os.path.join(url,str(a)+filetype);\r\n a=str(a)+filetype\r\n os.rename(old, New);\r\n _store_(url,ob,a,'D')\r\n a=d\r\n a=a+1\r\n except:\r\n pass;\r\n elif reverse==True and _exist_(url):\r\n _reverse_(url,'D')\r\n\r\ndef rename_file(url,reverse=True):\r\n \"\"\"\r\n 用户给定文件夹的路径,如果给定路径存在,更改指定文件夹下的文件的名字\r\n :param url: 用户给定的文件夹的路径\r\n :param reverse: 如果reverse=True就进行反向命名,reverse=False就对所给文件夹所包含的文件进行重命名;\r\n :return:\r\n \"\"\"\r\n if _exist_(url)and reverse==False:\r\n if not os.path.exists(url):\r\n url = os.path.abspath(url)\r\n a = 1\r\n list_sto=_random_name(url,'F')\r\n conf_sto = configparser.ConfigParser()\r\n conf_sto.read('conf'+resource_manager.getSeparator()+'factory.ini')\r\n for ob in list_sto:\r\n try:\r\n files=conf_sto.get(url,ob)\r\n old=os.path.join(url,files);\r\n filetype=os.path.splitext(files)[1];\r\n d=a\r\n if os.path.isfile(old):\r\n New=os.path.join(url,str(a)+filetype);\r\n a=str(a)+filetype\r\n os.rename(old, New);\r\n _store_(url,ob, a,'F')\r\n a=d\r\n a=a+1\r\n except:\r\n pass;\r\n elif reverse==True and _exist_(url):\r\n _reverse_(url,'F')\r\n\r\n\r\n\"\"\"根据reverse进行反向目录生成\"\"\"\r\ndef _reverse_(doc_name,type):\r\n \"\"\"\r\n 根据reverse进行反向目录生成\r\n :param doc_name: 用户给定文件夹的路径\r\n :param type: 根据用户调用方法的不同对文件和文件夹分开进行重命名;type=‘F(file)’对文件操作,type=‘D(directory)’对文件夹操作\r\n :return:\r\n \"\"\"\r\n try:\r\n conf = configparser.ConfigParser()\r\n if type=='D':\r\n conf.read('conf'+resource_manager.getSeparator()+'directory.ini')\r\n elif type=='F':\r\n conf.read('conf'+resource_manager.getSeparator()+'factory.ini')\r\n options = conf.options(doc_name)\r\n if not os.path.exists(doc_name):\r\n doc_name = os.path.abspath(doc_name)\r\n for option in options:\r\n try:\r\n str_val = conf.get(doc_name,option )\r\n New=os.path.join(doc_name,option);\r\n old=os.path.join(doc_name,str_val);\r\n os.rename(old,New);\r\n except:\r\n print(option+\" don't exist\")\r\n except:\r\n print(\"no document has been renamed\")\r\n\r\n\r\ndef _exist_(url):\r\n \"\"\"\r\n 判断所给的路径是否存在,如果所给的是相对路径(在判断文件夹不存在后)转换为绝对路径\r\n :param url: 用户给定文件夹的路径\r\n :return:\r\n \"\"\"\r\n s=url;\r\n if not os.path.exists(url):\r\n s = os.path.abspath(url)\r\n if os.path.exists(s) and os.path.isdir(s):\r\n return True\r\n else:\r\n print(url + \" don't exist or isn't a dir\")\r\n\r\n\r\ndef _store_(doc_name,files,a,type):\r\n \"\"\"\r\n 将更改后的文件oldname和newname以section的方式存到directory.ini或factory.ini中\r\n (具体哪个文件夹则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)\r\n :param doc_name:用户传入的文件夹的路径\r\n :param files:文件夹下面的文件或文件夹(具体类型根据type决定)的名字\r\n :param a:文件重命名后新的编码(名字)\r\n :return:\r\n \"\"\"\r\n try:\r\n config_write = configparser.ConfigParser()\r\n if type=='D':\r\n config_write.read('conf'+resource_manager.getSeparator()+'directory.ini')\r\n ftest = open('conf'+resource_manager.getSeparator()+'directory.ini','w+')\r\n elif type=='F':\r\n config_write.read('conf'+resource_manager.getSeparator()+'factory.ini')\r\n ftest = open('conf'+resource_manager.getSeparator()+'factory.ini','w+')\r\n check=config_write.sections()\r\n n=False\r\n if doc_name in check:\r\n n=True\r\n config_write.set(doc_name,files,str(a))\r\n if n==False:\r\n config_write.add_section(doc_name)\r\n config_write.set(doc_name,files,str(a))\r\n config_write.write(ftest)\r\n ftest.close()\r\n except:\r\n pass;\r\n\r\n\r\ndef _random_name(url,type):\r\n \"\"\"\r\n 对文件或文件夹进行随机重命名(防止产生因同名而无法重命名的问题)(具体类型则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)\r\n :param url: 用户传入的文件夹的地址\r\n :return: 返回文件夹中所有文件或文件夹重命名之前的名字的列表\r\n \"\"\"\r\n doc=os.listdir(url)\r\n for files in doc:\r\n try:\r\n filetype=os.path.splitext(files)[1]\r\n\r\n if not os.path.exists(files):\r\n old=url+resource_manager.getSeparator()+files\r\n else:\r\n old=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+files\r\n\r\n if os.path.isdir(old)and type=='D':\r\n random=file_manage.random_string()\r\n New=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+random\r\n os.rename(old, New);\r\n _store_(url,files,random+filetype,'D')\r\n elif os.path.isfile(old)and type=='F':\r\n random=file_manage.random_string()\r\n New=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+random\r\n os.rename(old, New);\r\n _store_(url,files,random+filetype,'F')\r\n except:\r\n pass\r\n list=doc\r\n return list;\r\n\r\n\r\nif __name__ == \"__main__\":\r\n rename_file(url=resource_manager.Properties.getRootPath()+'qxy/test',reverse=False)\r\n #rename_dir(url='qxy/otest',reverse=True)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"},"avg_line_length":{"kind":"number","value":33.041025641,"string":"33.041026"},"max_line_length":{"kind":"number","value":137,"string":"137"},"alphanum_fraction":{"kind":"number","value":0.5590563402,"string":"0.559056"}}},{"rowIdx":46226,"cells":{"hexsha":{"kind":"string","value":"4467b73c95d9b2c58955f1c54d33d5927feef2ce"},"size":{"kind":"number","value":510,"string":"510"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"PINp/2014/Koleganov_N_S/task_3_10.py"},"max_stars_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"PINp/2014/Koleganov_N_S/task_3_10.py"},"max_issues_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"PINp/2014/Koleganov_N_S/task_3_10.py"},"max_forks_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Задача 3. Вариант 10.\n# Напишите программу, которая выводит имя \"Игорь Васильевич Лотарев\", и\n# запрашивает его псевдоним. Программа должна сцеплять две эти строки и\n# выводить полученную строку, разделяя имя и псевдоним с помощью тире.\n# Колеганов Никита Сергеевич\n# 29.05.2012\n\nname=input(\"Герой нашей программы - Игорь Васильевич Лотарев.\\nПод каким же именем мы знаем этого человека?\")\nprint(\"Ваш ответ:\", name)\nprint(\"Все верно: Игорь Васильевич Лотарев -\", name)\ninput(\"\\n\\nНажмите Enter для выхода.\")\n"},"avg_line_length":{"kind":"number","value":42.5,"string":"42.5"},"max_line_length":{"kind":"number","value":109,"string":"109"},"alphanum_fraction":{"kind":"number","value":0.768627451,"string":"0.768627"}}},{"rowIdx":46227,"cells":{"hexsha":{"kind":"string","value":"4476ba1bfd2fc1007b3f67456acc9f653ecc2fe3"},"size":{"kind":"number","value":650,"string":"650"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"challenges/blackrock/audit_sale.py"},"max_stars_repo_name":{"kind":"string","value":"PlamenHristov/HackerRank"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2c875995f0d51d7026c5cf92348d9fb94fa509d6"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"challenges/blackrock/audit_sale.py"},"max_issues_repo_name":{"kind":"string","value":"PlamenHristov/HackerRank"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2c875995f0d51d7026c5cf92348d9fb94fa509d6"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"challenges/blackrock/audit_sale.py"},"max_forks_repo_name":{"kind":"string","value":"PlamenHristov/HackerRank"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2c875995f0d51d7026c5cf92348d9fb94fa509d6"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"def sale(securities, M, K):\n sorted_sec = sorted(securities, key=lambda x: x[0] * x[1], reverse=True)\n res = 0\n for i in range(M):\n if K > 0:\n x, _ = sorted_sec[i]\n y = 1\n else:\n x, y = sorted_sec[i]\n K -= 1\n res += x * y\n\n print(res)\n\n\n# N, M, K = list(map(int, input().split()))\n#\n# securities = []\n# for _ in range(N):\n# securities.append(tuple(map(int, input().split())))\n# print(securities)\n\n\ndef test():\n N, M, K = 3, 2, 1\n price = [(5, 10), (6, 60), (8, 40)]\n sale(price, M, K)\n # 1116/100\n\n\nif __name__ == '__main__':\n test()\n"},"avg_line_length":{"kind":"number","value":19.696969697,"string":"19.69697"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.4630769231,"string":"0.463077"}}},{"rowIdx":46228,"cells":{"hexsha":{"kind":"string","value":"92547dfdd13037b67a68166a90b4115762bfd97f"},"size":{"kind":"number","value":453,"string":"453"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"books/PythonAutomate/excel_spreadsheets/cell_inverter.py"},"max_stars_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_stars_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"books/PythonAutomate/excel_spreadsheets/cell_inverter.py"},"max_issues_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_issues_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"books/PythonAutomate/excel_spreadsheets/cell_inverter.py"},"max_forks_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_forks_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"cell_inverter.py\n스프레드시트 셀 반전시키기\n\"\"\"\nimport sys\nimport openpyxl\n\nif len(sys.argv) != 2:\n print(f\"python {__file__} <*.xlsx>\")\n sys.exit(1)\n\nfile_name = sys.argv[1]\n\nwb = openpyxl.load_workbook(file_name)\nsheet = wb.active\n\nnew_wb = openpyxl.Workbook()\nnew_sheet = new_wb.active\n\nfor row_obj in sheet.rows:\n for cell in row_obj:\n new_sheet.cell(row=cell.column, column=cell.row).value = cell.value\n\nnew_wb.save(f\"reversed_{file_name}\")\n"},"avg_line_length":{"kind":"number","value":18.875,"string":"18.875"},"max_line_length":{"kind":"number","value":75,"string":"75"},"alphanum_fraction":{"kind":"number","value":0.7041942605,"string":"0.704194"}}},{"rowIdx":46229,"cells":{"hexsha":{"kind":"string","value":"f380ec6d4ec28c12552bb7c4cc31a51a27dcbed1"},"size":{"kind":"number","value":789,"string":"789"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"udacity course code/01-06-plottwohistograms.py"},"max_stars_repo_name":{"kind":"string","value":"bluemurder/mlfl"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b895b2f1d01b0f6418a5bcee2f204dd7916062f0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-03-22T22:25:54.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-22T22:25:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"udacity course code/01-06-plottwohistograms.py"},"max_issues_repo_name":{"kind":"string","value":"bluemurder/mlfl"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b895b2f1d01b0f6418a5bcee2f204dd7916062f0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":6,"string":"6"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2017-01-16T09:53:21.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2017-01-18T12:20:09.000Z"},"max_forks_repo_path":{"kind":"string","value":"udacity course code/01-06-plottwohistograms.py"},"max_forks_repo_name":{"kind":"string","value":"bluemurder/mlfl"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b895b2f1d01b0f6418a5bcee2f204dd7916062f0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"Plot a couple of histogram.\"\"\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom util import get_data, plot_data, compute_daily_returns\n\ndef test_run():\n # Read data\n dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index\n symbols = ['SPY','XOM']\n df = get_data(symbols, dates) # get data for each symbol\n #plot_data(df)\n\n # Compute daily returns\n daily_returns = compute_daily_returns(df)\n #plot_data(daily_returns, title = \"Daily returns\", ylabel = \"Daily returns\")\n\n # Compute and plot a couple of histograms on same chart\n daily_returns['SPY'].hist(bins = 20, label = 'SPY')\n daily_returns['XOM'].hist(bins = 20, label = 'XOM')\n plt.legend(loc = 'upper right')\n plt.show()\n\nif __name__ == \"__main__\":\n test_run()\n"},"avg_line_length":{"kind":"number","value":30.3461538462,"string":"30.346154"},"max_line_length":{"kind":"number","value":80,"string":"80"},"alphanum_fraction":{"kind":"number","value":0.6717363752,"string":"0.671736"}}},{"rowIdx":46230,"cells":{"hexsha":{"kind":"string","value":"946b4c33baf96dd37d1d32b582a875e2a52db092"},"size":{"kind":"number","value":3140,"string":"3,140"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/deal_features.py"},"max_stars_repo_name":{"kind":"string","value":"Times125/Emotion-Analyse"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-01-16T06:39:00.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-28T11:46:41.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/deal_features.py"},"max_issues_repo_name":{"kind":"string","value":"Times125/Emotion-Analyse"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/deal_features.py"},"max_forks_repo_name":{"kind":"string","value":"Times125/Emotion-Analyse"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-08-16T14:53:37.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-08-17T02:01:22.000Z"},"content":{"kind":"string","value":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author:lch02\n@Time: 2017/12/26 9:51\n@Description: \n\"\"\"\nimport itertools\nimport os\nimport pickle\nimport config\nfrom nltk.collocations import BigramCollocationFinder\nfrom nltk.metrics import BigramAssocMeasures\nfrom config import test_path\nfrom nltk.probability import FreqDist, ConditionalFreqDist\n__author__ = 'lch02'\n\n\n\"\"\"\n计算单个词和双词搭配的贡献(信息量\n\"\"\"\ndef word_bigram_scores():\n pos_data = pickle.load(open(os.path.join(test_path, 'pos_review.pkl'), 'rb'))\n neg_data = pickle.load(open(os.path.join(test_path, 'neg_review.pkl'), 'rb'))\n\n pos_words = list(itertools.chain(*pos_data))\n neg_words = list(itertools.chain(*neg_data))\n\n pos_bigram_finder = BigramCollocationFinder.from_words(pos_words)\n neg_bigram_finder = BigramCollocationFinder.from_words(neg_words)\n\n pos_bigrams = pos_bigram_finder.nbest(BigramAssocMeasures.chi_sq, config.bigram_scores_threshold)\n neg_bigrams = neg_bigram_finder.nbest(BigramAssocMeasures.chi_sq, config.bigram_scores_threshold)\n\n pos_words.extend(pos_bigrams)\n neg_words.extend(neg_bigrams)\n\n word_tf = FreqDist() # 统计所有词频\n con_word_tf = ConditionalFreqDist() # 统计每个词的概率分布\n\n for word in pos_words:\n word_tf[word] += 1\n con_word_tf['pos'][word] += 1\n for word in neg_words:\n word_tf[word] += 1\n con_word_tf['neg'][word] += 1\n pos_word_count = con_word_tf['pos'].N() # 积极词的数量\n neg_word_count = con_word_tf['neg'].N() # 消极词的数量\n total_word_count = pos_word_count + neg_word_count # 总词\n bigram_scores_dict = {}\n for word, freq in word_tf.iteritems():\n pos_score = BigramAssocMeasures.chi_sq(con_word_tf['pos'][word], (freq, pos_word_count), total_word_count) # 计算积极词的卡方统计量\n neg_score = BigramAssocMeasures.chi_sq(con_word_tf['neg'][word], (freq, neg_word_count), total_word_count) # 计算消极词的卡方统计量\n bigram_scores_dict[word] = pos_score + neg_score\n return bigram_scores_dict\n\n\"\"\"\n选择贡献最大的特征\n\"\"\"\ndef get_best_words(scores_dict, threshold=10000):\n best = sorted(scores_dict.iteritems(), key=lambda (word, score): score, reverse=True)[:threshold] # 从大到小排列,选择前10000个\n best_words = set([w for w, s in best])\n return best_words\n\n\"\"\" \n选择1:最有信息量的单个词作为特征\n\"\"\"\ndef best_words_features(words):\n if config.best_words is None:\n config.best_words = pickle.load(open(os.path.join(config.test_path, 'best_feats.pkl'), 'rb'))\n lst = []\n for word in words:\n if word in config.best_words:\n lst.append((word, True))\n else:\n lst.append((word, False))\n return dict(lst)\n\n\"\"\"\n选择2:把所有词和双词搭配一起作为特征\n\"\"\"\ndef best_bigram_words_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1500):\n try:\n bigram_finder = BigramCollocationFinder.from_words(words)\n bigrams = bigram_finder.nbest(score_fn, n)\n except ZeroDivisionError:\n words.append(' ')\n bigram_finder = BigramCollocationFinder.from_words(words)\n bigrams = bigram_finder.nbest(score_fn, n)\n d = dict([(bigram, True) for bigram in bigrams])\n d.update(best_words_features(words))\n return d\n"},"avg_line_length":{"kind":"number","value":33.4042553191,"string":"33.404255"},"max_line_length":{"kind":"number","value":130,"string":"130"},"alphanum_fraction":{"kind":"number","value":0.7041401274,"string":"0.70414"}}},{"rowIdx":46231,"cells":{"hexsha":{"kind":"string","value":"0474aff44e74e67aa3b8adf050d40e168b3d908a"},"size":{"kind":"number","value":960,"string":"960"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"kiosk/migrations/0007_kontakt_nachricht.py"},"max_stars_repo_name":{"kind":"string","value":"AndiBr/ffksk"},"max_stars_repo_head_hexsha":{"kind":"string","value":"ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"kiosk/migrations/0007_kontakt_nachricht.py"},"max_issues_repo_name":{"kind":"string","value":"AndiBr/ffksk"},"max_issues_repo_head_hexsha":{"kind":"string","value":"ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":14,"string":"14"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-09-12T06:59:55.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-02-26T07:17:48.000Z"},"max_forks_repo_path":{"kind":"string","value":"kiosk/migrations/0007_kontakt_nachricht.py"},"max_forks_repo_name":{"kind":"string","value":"AndiBr/ffksk"},"max_forks_repo_head_hexsha":{"kind":"string","value":"ff4bc4ad26d4571eaa1a6ff815b2e6a876f8ba99"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-06-17 23:47\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('kiosk', '0006_produktpalette_imverkauf'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Kontakt_Nachricht',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=40)),\n ('email', models.EmailField(max_length=254, verbose_name='E-Mail-Adresse')),\n ('gesendet', models.DateTimeField(auto_now_add=True)),\n ('betreff', models.TextField(blank=True, max_length=128)),\n ('text', models.TextField(max_length=1024)),\n ('beantwortet', models.BooleanField(default=False)),\n ],\n ),\n ]\n"},"avg_line_length":{"kind":"number","value":34.2857142857,"string":"34.285714"},"max_line_length":{"kind":"number","value":114,"string":"114"},"alphanum_fraction":{"kind":"number","value":0.6010416667,"string":"0.601042"}}},{"rowIdx":46232,"cells":{"hexsha":{"kind":"string","value":"1ee361bc5f016feba4576a04b68aff16068863af"},"size":{"kind":"number","value":6200,"string":"6,200"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"components/py_engine/adapter/esp32/m5stackcore2/lib/i2c_bus.py"},"max_stars_repo_name":{"kind":"string","value":"wstong999/AliOS-Things"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"components/py_engine/adapter/esp32/m5stackcore2/lib/i2c_bus.py"},"max_issues_repo_name":{"kind":"string","value":"wstong999/AliOS-Things"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"components/py_engine/adapter/esp32/m5stackcore2/lib/i2c_bus.py"},"max_forks_repo_name":{"kind":"string","value":"wstong999/AliOS-Things"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from machine import I2C\nfrom machine import Pin\nfrom micropython import const\nimport struct\n#import unit\n\n#PORTA = (unit.PORTA)\n\nPAHUB0 = (0, None)\nPAHUB1 = (1, None)\nPAHUB2 = (2, None)\nPAHUB3 = (3, None)\nPAHUB4 = (4, None)\nPAHUB5 = (5, None)\n\nM_BUS = (21, 22)\n\nUINT8LE = const((0 << 6) | (1 << 4) | 1)\nUINT16LE = const((0 << 6) | (1 << 4) | 2)\nUINT32LE = const((0 << 6) | (1 << 4) | 4)\nINT8LE = const((0 << 6) | (0 << 4) | 1)\nINT16LE = const((0 << 6) | (0 << 4) | 2)\nINT32LE = const((0 << 6) | (0 << 4) | 4)\nUINT8BE = const((1 << 6) | (1 << 4) | 1)\nUINT16BE = const((1 << 6) | (1 << 4) | 2)\nUINT32BE = const((1 << 6) | (1 << 4) | 4)\nINT8BE = const((1 << 6) | (0 << 4) | 1)\nINT16BE = const((1 << 6) | (0 << 4) | 2)\nINT32BE = const((1 << 6) | (0 << 4) | 4)\n\ndef get(port, pos=0, freq=400000, device_in=False):\n if port[1] is None:\n return Pahub_I2C(port[0])\n if device_in or port == (21, 22):\n return I2C(1, sda=Pin(port[0]), scl=Pin(port[1]), freq=freq)\n else:\n return I2C(0, sda=Pin(port[0]), scl=Pin(port[1]), freq=freq)\n\nclass easyI2C():\n def __init__(self, port, addr, freq=400000):\n self.i2c = get(port, pos=0, freq=freq)\n self.addr = addr\n\n def write_u8(self, reg, data):\n buf = bytearray(1)\n buf[0] = data\n self.i2c.writeto_mem(self.addr, reg, buf)\n\n def write_u16(self, reg, data, byteorder='big'):\n buf = bytearray(2)\n encode = 'h'\n struct.pack_into(encode, buf, 0, data)\n self.i2c.writeto_mem(self.addr, reg, buf)\n\n def write_u32(self, reg, data, byteorder='big'):\n buf = bytearray(4)\n encode = 'i'\n struct.pack_into(encode, buf, 0, data)\n self.i2c.writeto_mem(self.addr, reg, buf)\n\n def read_u8(self, reg):\n return self.i2c.readfrom_mem(self.addr, reg, 1)[0]\n\n def read_u16(self, reg, byteorder='big'):\n buf = bytearray(2)\n self.i2c.readfrom_mem_into(self.addr, reg, buf)\n encode = 'h'\n return struct.unpack(encode, buf)[0]\n\n def read_u32(self, reg, byteorder='big'):\n buf = bytearray(4)\n self.i2c.readfrom_mem_into(self.addr, reg, buf)\n encode = 'i'\n return struct.unpack(encode, buf)[0]\n\n def read(self, num):\n return self.i2c.readfrom(self.addr, num)\n\n def read_reg(self, reg, num):\n return self.i2c.readfrom_mem(self.addr, reg, num)\n\n @staticmethod\n def _get_format_str(format_type):\n format_str = '>' if (format_type & (1 << 6)) else '<'\n format_str += {1: 'b', 2: 'h', 4: 'i'}.get(format_type & 0x0f)\n format_str = format_str.upper() if (format_type & (1 << 4)) else format_str\n return format_str\n \n def write_mem_data(self, reg, data, format_type):\n format_str = self._get_format_str(format_type)\n buf = bytearray(struct.pack(format_str, data))\n self.i2c.writeto_mem(self.addr, reg, buf)\n\n def write_data(self, data, format_type):\n format_str = self._get_format_str(format_type)\n buf = bytearray(struct.pack(format_str, data))\n self.i2c.writeto(self.addr, buf)\n\n def write_list(self, data):\n buf = bytearray(data)\n self.i2c.writeto(self.addr, buf)\n\n def write_mem_list(self, reg, data, num):\n buf = bytearray(data)\n self.i2c.writeto_mem(self.addr, reg, buf)\n\n def read_data(self, num, format_type):\n format_str = self._get_format_str(format_type)\n format_str = format_str[0] + format_str[1] * num\n buf = bytearray((format_type & 0x0f) * num)\n self.i2c.readfrom_into(self.addr, buf)\n return struct.unpack(format_str, buf)\n\n def read_mem_data(self, reg, num, format_type):\n format_str = self._get_format_str(format_type)\n format_str = format_str[0] + format_str[1] * num\n buf = bytearray((format_type & 0x0f) * num)\n self.i2c.readfrom_mem_into(self.addr, reg, buf)\n return struct.unpack(format_str, buf)\n\n def scan(self):\n return self.i2c.scan()\n\n def available(self):\n return self.i2c.is_ready(self.addr)\n\nclass Pahub_I2C:\n def __init__(self, pos, port=(32, 33), freq=100000): # PORTA (32, 33)\n from units import _pahub\n self.pahub = _pahub.Pahub(port)\n self.i2c = get(port, freq=freq)\n self.pos = pos\n\n def readfrom(self, addr, num):\n self.pahub.select_only_on(self.pos)\n data = self.i2c.readfrom(addr, num)\n return data\n\n def readfrom_into(self, addr, buf):\n buf_in = bytearray(len(buf))\n self.pahub.select_only_on(self.pos)\n self.i2c.readfrom_into(addr, buf_in)\n for i in range(len(buf)):\n buf[i] = buf_in[i]\n\n def readfrom_mem_into(self, addr, reg, buf):\n buf_in = bytearray(len(buf))\n self.pahub.select_only_on(self.pos)\n self.i2c.readfrom_mem_into(addr, reg, buf_in)\n for i in range(len(buf)):\n buf[i] = buf_in[i]\n\n def readfrom_mem(self, addr, reg, num):\n self.pahub.select_only_on(self.pos)\n data = self.i2c.readfrom_mem(addr, reg, num)\n return data\n\n def writeto_mem(self, addr, reg, data):\n self.pahub.select_only_on(self.pos)\n self.i2c.writeto_mem(addr, reg, data)\n\n def writeto(self, addr, data):\n self.pahub.select_only_on(self.pos)\n self.i2c.writeto(addr, data)\n\n def is_ready(self, addr):\n self.pahub.select_only_on(self.pos)\n data = self.i2c.is_ready(addr)\n return data\n\n def scan(self):\n self.pahub.select_only_on(self.pos)\n data = self.i2c.scan()\n return data\n\n def available(self):\n return self.i2c.is_ready(self.addr)\n\n def deinit(self):\n pass\n\nclass Unit(Exception):\n pass\n\nclass UnitI2C:\n def __init__(self, port, freq, addr):\n self.i2c = easyI2C(port, addr, freq)\n\n def _check_device(self):\n if self.i2c.available() or self.i2c.available():\n pass\n else:\n raise Unit(\"{} unit not found\".format(self.__qualname__.upper()))\n\n def deinit(self):\n pass\n\n"},"avg_line_length":{"kind":"number","value":31.3131313131,"string":"31.313131"},"max_line_length":{"kind":"number","value":83,"string":"83"},"alphanum_fraction":{"kind":"number","value":0.5977419355,"string":"0.597742"}}},{"rowIdx":46233,"cells":{"hexsha":{"kind":"string","value":"78b630f6accb702a9583ce0777f4bb8ab85a5834"},"size":{"kind":"number","value":502,"string":"502"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"hrp.py"},"max_stars_repo_name":{"kind":"string","value":"banachtech/hrp"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b3ce4f3f35973e808eaea8a1f8b42c98ac691364"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"hrp.py"},"max_issues_repo_name":{"kind":"string","value":"banachtech/hrp"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b3ce4f3f35973e808eaea8a1f8b42c98ac691364"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"hrp.py"},"max_forks_repo_name":{"kind":"string","value":"banachtech/hrp"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b3ce4f3f35973e808eaea8a1f8b42c98ac691364"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import argparse, numpy as np, pandas as pd\r\nfrom hrputils import calchrp\r\n\r\nmy_parser = argparse.ArgumentParser()\r\nmy_parser.add_argument('-i', help='input csv file with returns', required=True)\r\nmy_parser.add_argument('-o', help='output csv file with hrp weights, defaults to weights.csv in the same folder', default='weights.csv')\r\nargs = my_parser.parse_args()\r\n\r\nx = np.loadtxt(args.i,delimiter=',', dtype=float)\r\nw = calchrp(x)\r\nprint(w.sort_index().values)\r\nw.to_csv(args.o, index=False, sep=',')"},"avg_line_length":{"kind":"number","value":41.8333333333,"string":"41.833333"},"max_line_length":{"kind":"number","value":137,"string":"137"},"alphanum_fraction":{"kind":"number","value":0.7330677291,"string":"0.733068"}}},{"rowIdx":46234,"cells":{"hexsha":{"kind":"string","value":"78cc7d959318c6e14d17c6fd8da823301d5d9bd5"},"size":{"kind":"number","value":8063,"string":"8,063"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Webpage/Django/settings.py"},"max_stars_repo_name":{"kind":"string","value":"ASV-Aachen/Website"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bbfc02d71dde67fdf89a4b819b795a73435da7cf"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Webpage/Django/settings.py"},"max_issues_repo_name":{"kind":"string","value":"ASV-Aachen/Website"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bbfc02d71dde67fdf89a4b819b795a73435da7cf"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":46,"string":"46"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2022-01-08T12:03:24.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-30T08:51:05.000Z"},"max_forks_repo_path":{"kind":"string","value":"Webpage/Django/settings.py"},"max_forks_repo_name":{"kind":"string","value":"ASV-Aachen/Website"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bbfc02d71dde67fdf89a4b819b795a73435da7cf"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nDjango settings for Django project.\n\nGenerated by 'django-admin startproject' using Django 3.1.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\nimport os\nfrom pathlib import Path\nimport crispy_forms\nimport json\nfrom django.urls import reverse_lazy\nimport sys\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ[\"DEBUG\"] == \"True\"\n\nHost = os.environ[\"Host\"]\nALLOWED_HOSTS = []\nALLOWED_HOSTS.append(\".\" + os.environ[\"ALLOWED_HOSTS\"])\nALLOWED_HOSTS.append(\"webpage:8080\")\nALLOWED_HOSTS.append(\"webpage\")\n\nX_FRAME_OPTIONS = 'ALLOWALL'\n\nXS_SHARING_ALLOWED_METHODS = ['POST','GET']\nOIDC_VERIFY_SSL = False\n# Conection to Keycloak as OIDC\n\nOIDC_RP_CLIENT_ID = os.environ[\"OIDC_RP_CLIENT_ID\"]\nOIDC_RP_CLIENT_SECRET = os.environ[\"OIDC_RP_CLIENT_SECRET\"]\nOIDC_RP_SIGN_ALGO = os.environ[\"OIDC_RP_SIGN_ALGO\"]\n\n# OIDC_RP_IDP_SIGN_KEY = '-----BEGIN CERTIFICATE----- -----END CERTIFICATE-----'\n\nOIDC_OP_JWKS_ENDPOINT = Host + '/sso/auth/realms/ASV/protocol/openid-connect/certs'\nOIDC_RP_SCOPES = 'openid email profile'\n\nOIDC_OP_AUTHORIZATION_ENDPOINT = Host + '/sso/auth/realms/ASV/protocol/openid-connect/auth'\nOIDC_OP_TOKEN_ENDPOINT = Host + '/sso/auth/realms/ASV/protocol/openid-connect/token'\nOIDC_OP_USER_ENDPOINT = Host + '/sso/auth/realms/ASV/protocol/openid-connect/userinfo'\n\n\n# Provided by mozilla-django-oidc\nLOGIN_URL = reverse_lazy('oidc_authentication_callback')\n\n# App urls\nLOGIN_REDIRECT_URL = Host\nLOGOUT_REDIRECT_URL = Host + \"/auth/realms/ASV/protocol/openid-connect/logout?redirect_uri=\" + Host\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nOIDC_CLOCK_SKEW = 560\n\nsys.modules['fontawesome_free'] = __import__('fontawesome-free')\n\n\n# Application definition\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'crispy_forms',\n 'fontawesome_free',\n # 'django-filter',\n # 'django.contrib.sites',\n 'ajax_select',\n\n # ASV-Apps\n 'utils',\n 'web',\n 'blog',\n 'member',\n 'mozilla_django_oidc',\n 'tinymce',\n 'filebrowser',\n 'django_resized',\n 'simple_history',\n # 'arbeitsstunden',\n 'jollen',\n 'cruises',\n 'api',\n]\n\n\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'custom.customOIDCAB.MyOIDCAB',\n 'mozilla_django_oidc.auth.OIDCAuthenticationBackend',\n)\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'mozilla_django_oidc.middleware.SessionRefresh',\n 'simple_history.middleware.HistoryRequestMiddleware',\n]\n\nROOT_URLCONF = 'Django.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [BASE_DIR / 'templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n 'libraries':{\n 'createMenuObject': 'utils.menu',\n 'has_group': 'utils.tags',\n 'getLeftFront': 'utils.tags',\n 'getRightFront': 'utils.tags',\n 'genderTitel': 'utils.member',\n },\n },\n },\n]\n\nWSGI_APPLICATION = 'Django.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'websiteDB',\n 'USER': os.environ[\"MYSQL_USER\"],\n 'PASSWORD': os.environ[\"MYSQL_PASSWORD\"],\n 'HOST': 'db', # Or an IP Address that your DB is hosted on\n 'PORT': 3306,\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n# FORCE_SCRIPT_NAME = '/webpage'\nSTATIC_URL = '/static/'\n\n# Add these new lines\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static/'),\n)\n\nSTATIC_ROOT = '/webpage/static'\n# os.path.join(BASE_DIR, 'staticfiles')\n\n# FORCE_SCRIPT_NAME = \"/webpage/\"\n\nFILEBROWSER_DIRECTORY = ''\nDIRECTORY = ''\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\n# MEDIA_ROOT = \"/media/\"\nMEDIA_URL = \"/media/\"\n\nSITE_ID = 2\n\n\n\"\"\"TinyMCE\"\"\"\n\nTINYMCE_JS_URL = os.path.join(STATIC_URL, \"tinymce/tinymce.min.js\")\nTINYMCE_JS_ROOT = os.path.join(STATIC_URL, \"tinymce/\")\n\nTINYMCE_DEFAULT_CONFIG = {\n \"content_css\": \"/static/css/tinymce/asv-content.css\",\n \"height\": \"500px\",\n \"images_upload_url\": '/upload_image/',\n \"width\": \"Auto\",\n \"menubar\": \"file edit view insert format tools table help\",\n \"plugins\": \"advlist autolink lists link image charmap print preview anchor searchreplace visualblocks code \"\n \"fullscreen insertdatetime media table paste code help wordcount spellchecker\",\n \"toolbar\": \"undo redo | bold italic underline strikethrough | fontselect fontsizeselect formatselect | alignleft \"\n \"aligncenter alignright alignjustify | outdent indent | numlist bullist checklist | forecolor \"\n \"backcolor casechange permanentpen formatpainter removeformat | pagebreak | charmap emoticons | \"\n \"fullscreen preview save print | insertfile image media pageembed template link anchor codesample | \"\n \"a11ycheck ltr rtl | showcomments addcomment code\",\n \"custom_undo_redo_levels\": 10,\n 'fontsize_formats': \"8pt 10pt 11pt 12pt 13pt 14pt 16pt 18pt 20pt 24pt 36pt\",\n 'font_formats': \"ASV=ASV,Verdana,Arial,Helvetica,sans-serif;\" +\n# \"Arial Black=arial black,avant garde;\" +\n# \"Book Antiqua=book antiqua,palatino;\" +\n# \"Comic Sans MS=comic sans ms,sans-serif;\" +\n# \"Courier New=courier new,courier;\" +\n# \"Georgia=georgia,palatino;\" +\n# \"Helvetica=helvetica;\" +\n# \"Impact=impact,chicago;\" +\n# \"Symbol=symbol;\" +\n# \"Tahoma=tahoma,arial,helvetica,sans-serif;\" +\n# \"Terminal=terminal,monaco;\" +\n# \"Times New Roman=times new roman,times;\" +\n \"Trebuchet MS=trebuchet ms,geneva;\" +\n# \"Verdana=verdana,geneva;\" +\n \"Webdings=webdings;\" +\n \"Wingdings=wingdings,zapf dingbats\",\n}\nTINYMCE_SPELLCHECKER = False # TODO: Prüfen ob Schreibprüfung implementiert werden kann.\nTINYMCE_COMPRESSOR = True\nTINYMCE_STYLE_FORMATS_MERGE: True"},"avg_line_length":{"kind":"number","value":30.5416666667,"string":"30.541667"},"max_line_length":{"kind":"number","value":118,"string":"118"},"alphanum_fraction":{"kind":"number","value":0.6905618256,"string":"0.690562"}}},{"rowIdx":46235,"cells":{"hexsha":{"kind":"string","value":"bf1c0ecd66d57162de1744dc7f1f0072c469a461"},"size":{"kind":"number","value":1522,"string":"1,522"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"official/recommend/tbnet/src/config.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"official/recommend/tbnet/src/config.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"official/recommend/tbnet/src/config.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"TBNet configurations.\"\"\"\n\nimport json\n\n\nclass TBNetConfig:\n \"\"\"\n TBNet config file parser and holder.\n\n Args:\n config_path (str): json config file path.\n \"\"\"\n def __init__(self, config_path):\n with open(config_path) as f:\n json_dict = json.load(f)\n self.num_item = int(json_dict['num_item'])\n self.num_relation = int(json_dict['num_relation'])\n self.num_entity = int(json_dict['num_entity'])\n self.per_item_num_paths = int(json_dict['per_item_num_paths'])\n self.embedding_dim = int(json_dict['embedding_dim'])\n self.batch_size = int(json_dict['batch_size'])\n self.lr = float(json_dict['lr'])\n self.kge_weight = float(json_dict['kge_weight'])\n self.node_weight = float(json_dict['node_weight'])\n self.l2_weight = float(json_dict['l2_weight'])\n"},"avg_line_length":{"kind":"number","value":38.05,"string":"38.05"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.6590013141,"string":"0.659001"}}},{"rowIdx":46236,"cells":{"hexsha":{"kind":"string","value":"17214237c66cedce9feb050af3a6e63dea6a0917"},"size":{"kind":"number","value":164,"string":"164"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"book/_build/jupyter_execute/docs/000_intro.py"},"max_stars_repo_name":{"kind":"string","value":"tom-tubeless/Biologie"},"max_stars_repo_head_hexsha":{"kind":"string","value":"44660ea21971d4b3d77118606bfe1264993465fc"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"book/_build/jupyter_execute/docs/000_intro.py"},"max_issues_repo_name":{"kind":"string","value":"tom-tubeless/Biologie"},"max_issues_repo_head_hexsha":{"kind":"string","value":"44660ea21971d4b3d77118606bfe1264993465fc"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"book/_build/jupyter_execute/docs/000_intro.py"},"max_forks_repo_name":{"kind":"string","value":"tom-tubeless/Biologie"},"max_forks_repo_head_hexsha":{"kind":"string","value":"44660ea21971d4b3d77118606bfe1264993465fc"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Bio am WWG\n# \n# Unterrichtsplanung, -inhalte und -materialien für das Fach Biologie am Wim-Wenders-Gymnasium Düsseldorf.\n"},"avg_line_length":{"kind":"number","value":23.4285714286,"string":"23.428571"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.737804878,"string":"0.737805"}}},{"rowIdx":46237,"cells":{"hexsha":{"kind":"string","value":"da849c9b8364c41265b972fc16018f755fffa237"},"size":{"kind":"number","value":1115,"string":"1,115"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python_reference/useful_scripts/conc_gzip_files.py"},"max_stars_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-05-10T09:16:23.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-05-10T09:16:23.000Z"},"max_issues_repo_path":{"kind":"string","value":"python_reference/useful_scripts/conc_gzip_files.py"},"max_issues_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python_reference/useful_scripts/conc_gzip_files.py"},"max_forks_repo_name":{"kind":"string","value":"gopala-kr/ds-notebooks"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bc35430ecdd851f2ceab8f2437eec4d77cb59423"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-10-14T07:30:18.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-10-14T07:30:18.000Z"},"content":{"kind":"string","value":"# Sebastian Raschka 03/2014\n\nimport gzip\nimport shutil\nimport os\n\n#import pyprind\n\ndef conc_gzip_files(in_dir, out_file, append=False, print_progress=True):\n \"\"\" Reads contents from gzipped ASCII or UTF-8 files, decodes them, and\n appends the lines to one output file.\n\n Keyword arguments:\n in_dir (str): Path of the directory with the gzip-files\n out_file (str): Path to the resulting file\n append (bool): If true, it appends contents to an exisiting file,\n else creates a new output file.\n print_progress (bool): prints progress bar if true.\n\n \"\"\"\n write_mode = 'wb'\n gzips = [os.path.join(in_dir, i) for i in os.listdir(in_dir) if i.endswith('.gz')]\n #if print_progress:\n # pbar = pyprind.ProgBar(len(gzips))\n with open(out_file, 'ab' if append else 'wb') as ofile:\n for f in gzips:\n with gzip.open(f, 'rb') as gzipf:\n shutil.copyfileobj(gzipf, ofile)\n #if print_progress:\n # pbar.update()\n\nif __name__ == '__main__':\n conc_gzip_files('/home/usr/my_dir', '/home/usr/test.txt')\n"},"avg_line_length":{"kind":"number","value":32.7941176471,"string":"32.794118"},"max_line_length":{"kind":"number","value":86,"string":"86"},"alphanum_fraction":{"kind":"number","value":0.6394618834,"string":"0.639462"}}},{"rowIdx":46238,"cells":{"hexsha":{"kind":"string","value":"e52b8445bb0424f37bdedb3f32cfe011304969e5"},"size":{"kind":"number","value":2819,"string":"2,819"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"RSS/model/rssfeed.py"},"max_stars_repo_name":{"kind":"string","value":"dereklm12880/rssticker"},"max_stars_repo_head_hexsha":{"kind":"string","value":"d90e8c00811d67bd9fb8104bbb6ec98aae5221f4"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-02-26T01:54:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-04-27T20:09:14.000Z"},"max_issues_repo_path":{"kind":"string","value":"RSS/model/rssfeed.py"},"max_issues_repo_name":{"kind":"string","value":"dereklm12880/rssticker"},"max_issues_repo_head_hexsha":{"kind":"string","value":"d90e8c00811d67bd9fb8104bbb6ec98aae5221f4"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":17,"string":"17"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-02-29T02:43:44.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-04-27T20:38:44.000Z"},"max_forks_repo_path":{"kind":"string","value":"RSS/model/rssfeed.py"},"max_forks_repo_name":{"kind":"string","value":"dereklm12880/rssticker"},"max_forks_repo_head_hexsha":{"kind":"string","value":"d90e8c00811d67bd9fb8104bbb6ec98aae5221f4"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":8,"string":"8"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-02-26T21:37:36.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-06-23T00:01:27.000Z"},"content":{"kind":"string","value":"# method for rss links\nimport feedparser\n\n\nclass RssModel:\n\n \"\"\" Class model.rssfeeds.RssModel.\n This class parses the feeds that are given and returns them in an an empty list.\n \"\"\"\n\n _newsreel_index_pos = -1\n _raw_feed = ''\n given_url = ''\n title = ''\n subtitle = ''\n link = ''\n newsreel = []\n\n def parse(self, feed_url):\n\n \"\"\" Function model.rssfeeds.RssModel.parse.\n This function checks to see if there are feeds are strings and if they\n have URLs, otherwise throwing an unexpected error. If exceptions aren't thrown,\n the title, subtitle, and link are stored.\n Arguments:\n feed_url -- the url that is taken from the feed.\n \"\"\"\n\n if not isinstance(feed_url, str): raise Exception('Expects string {} given'.format(type(feed_url)))\n self._raw_feed = feedparser.parse(feed_url)\n if len(self._raw_feed) == 0: raise Exception(\"No feed with the url {} found.\".format(feed_url))\n if 'bozo' in self._raw_feed and self._raw_feed['bozo'] == 1: raise Exception(\"An unexpected issue occurred: {}\".format(self._raw_feed['bozo_exception']))\n self.given_url = feed_url\n self.title = self._raw_feed['feed']['title']\n self.subtitle = self._raw_feed['feed']['subtitle']\n self.link = self._raw_feed['feed']['link']\n self.newsreel = self._raw_feed['entries']\n return self\n\n def get_current(self):\n \n \"\"\" Function model.rssfeed.RssModel.get_current.\n This function gets the current article from the feed. If nothing is loaded, an \n exception is thrown.\n \"\"\"\n\n try:\n _tmp = None\n if self._newsreel_index_pos < 0:\n _tmp = self._newsreel_index_pos\n self._newsreel_index_pos = 0\n\n _news_reel = self.newsreel[self._newsreel_index_pos]\n self._newsreel_index_pos = _tmp if _tmp else self._newsreel_index_pos\n return _news_reel\n except IndexError: raise Exception(\"There is no news loaded! Try parsing a new RSS feed.\")\n\n def get_next(self):\n\n \"\"\" Function model.rssfeed.RssModel.get_next.\n This function gets the next article in the feed until it gets to the end. When it gets\n to the end of the feeds, it throws an exception.\n \"\"\"\n\n try:\n self._newsreel_index_pos = self._newsreel_index_pos +1\n return self.get_current()\n except IndexError: raise Exception(\"There is no more news! Try parsing a new RSS feed.\")\n\n #TODO: add functionality to move to the next URL feed. Once out of URLs, it will load the URL from the \n # beginning of the list and continue. If we want to continue to the next feed, we need to add a\n # load_next_feed function that cycles back to the first feed."},"avg_line_length":{"kind":"number","value":39.1527777778,"string":"39.152778"},"max_line_length":{"kind":"number","value":161,"string":"161"},"alphanum_fraction":{"kind":"number","value":0.6445548067,"string":"0.644555"}}},{"rowIdx":46239,"cells":{"hexsha":{"kind":"string","value":"00aa38d98e9f0a6d64f535591ec7a7ee59a6c658"},"size":{"kind":"number","value":909,"string":"909"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"3kCTF/2021/pwn/klibrary/upload.py"},"max_stars_repo_name":{"kind":"string","value":"ruhan-islam/ctf-archives"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8c2bf6a608c821314d1a1cfaa05a6cccef8e3103"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-11-02T20:53:58.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-02T20:53:58.000Z"},"max_issues_repo_path":{"kind":"string","value":"3kCTF/2021/pwn/klibrary/upload.py"},"max_issues_repo_name":{"kind":"string","value":"ruhan-islam/ctf-archives"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8c2bf6a608c821314d1a1cfaa05a6cccef8e3103"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"3kCTF/2021/pwn/klibrary/upload.py"},"max_forks_repo_name":{"kind":"string","value":"ruhan-islam/ctf-archives"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8c2bf6a608c821314d1a1cfaa05a6cccef8e3103"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python3.8\n\nfrom pwn import *\n\n\nEXPLOIT_PATH = '/tmp/exploit'\n\nSERVER = 178.62.107.48\nPORT = 9994\n\nSHELL_PROMPT = '$ '\n\n\ndef get_splitted_encoded_exploit():\n split_every = 256\n # Change the name to your exploit path\n with open('exploit', 'rb') as exploit_file:\n exploit = base64.b64encode(exploit_file.read())\n return [exploit[i:i+split_every] for i in range(0, len(exploit), split_every)]\n\n\ndef upload_exploit(sh):\n chunks_sent = 0\n splitted_exploit = get_splitted_encoded_exploit()\n for exploit_chunk in splitted_exploit:\n print(f'[*] Sending a chunk ({chunks_sent}/{len(splitted_exploit)})')\n sh.sendlineafter(\n SHELL_PROMPT, f'echo {exploit_chunk.decode()} | base64 -d >> {EXPLOIT_PATH}')\n chunks_sent += 1\n\nr = remote(SERVER, PORT)\nupload_exploit(r)\n# When finished, your exploit will be in /tmp directory. Good luck.\nr.interactive()\n"},"avg_line_length":{"kind":"number","value":25.9714285714,"string":"25.971429"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.6853685369,"string":"0.685369"}}},{"rowIdx":46240,"cells":{"hexsha":{"kind":"string","value":"dae43870e767daf4c1ff60a5ad5c986861b3a2d3"},"size":{"kind":"number","value":387,"string":"387"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"INBa/2015/Serdechnaya_A_M/task_3_25.py"},"max_stars_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"INBa/2015/Serdechnaya_A_M/task_3_25.py"},"max_issues_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"INBa/2015/Serdechnaya_A_M/task_3_25.py"},"max_forks_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#Напишите программу, которая выводит имя \"Алессандро Филипели\", и запрашивает его псевдоним. \n#Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.\nprint(\"Введите псевдоним Алессандро ди Мариано ди Ванни Филипепи:\")\nnick=input()\nprint(\"Алессандро ди Мариано ди Ванни Филипепи - это \"+nick)\ninput(\"Нажмите ENTER для продолжения\")"},"avg_line_length":{"kind":"number","value":64.5,"string":"64.5"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.8010335917,"string":"0.801034"}}},{"rowIdx":46241,"cells":{"hexsha":{"kind":"string","value":"9718fa3aadd8b98177539d83b1aae2caac12a8ad"},"size":{"kind":"number","value":10305,"string":"10,305"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"research/cv/Auto-DeepLab/src/modules/operations.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":77,"string":"77"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:37.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T13:09:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"research/cv/Auto-DeepLab/src/modules/operations.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":3,"string":"3"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-10-30T14:44:57.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-14T06:57:57.000Z"},"max_forks_repo_path":{"kind":"string","value":"research/cv/Auto-DeepLab/src/modules/operations.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":24,"string":"24"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-10-15T08:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-24T18:45:20.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===========================================================================\n\"\"\"Operations used in NAS Base Cell\"\"\"\nimport mindspore.nn as nn\nfrom mindspore import ops\n\nfrom .bn import NormLeakyReLU, BatchNormalization\n\nOPS = {\n 'none': lambda channels, stride, momentum, eps, affine, use_abn, parallel: Zero(stride),\n 'avg_pool_3x3': lambda channels, stride, momentum, eps, affine, use_abn, parallel: nn.AvgPool2d(3, stride=stride, pad_mode='same'),\n 'max_pool_3x3': lambda channels, stride, momentum, eps, affine, use_abn, parallel: nn.MaxPool2d(3, stride=stride, pad_mode='same'),\n 'skip_connect': lambda channels, stride, momentum, eps, affine, use_abn, parallel: Identity() if stride == 1 else FactorizedReduce(channels, channels, affine=affine),\n 'sep_conv_3x3': lambda channels, stride, momentum, eps, affine, use_abn, parallel: SepConv(channels, channels, 3, stride, 1, momentum, eps, affine, parallel=parallel),\n 'sep_conv_5x5': lambda channels, stride, momentum, eps, affine, use_abn, parallel: SepConv(channels, channels, 5, stride, 2, momentum, eps, affine, parallel=parallel),\n 'dil_conv_3x3': lambda channels, stride, momentum, eps, affine, use_abn, parallel: DilConv(channels, channels, 3, stride, 2, 2, momentum, eps, affine, use_abn=use_abn, parallel=parallel),\n 'dil_conv_5x5': lambda channels, stride, momentum, eps, affine, use_abn, parallel: DilConv(channels, channels, 5, stride, 4, 2, momentum, eps, affine, use_abn=use_abn, parallel=parallel)\n}\n\n\nclass SepConv(nn.Cell):\n \"\"\"Sepconv\"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding=0,\n momentum=0.9,\n eps=1e-5,\n affine=True,\n use_abn=False,\n parallel=True):\n super(SepConv, self).__init__()\n if use_abn:\n self.op = nn.SequentialCell(\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, in_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n NormLeakyReLU(in_channels, momentum, eps, affine=affine, parallel=parallel),\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=1,\n pad_mode='pad', padding=padding, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n NormLeakyReLU(out_channels, momentum, eps, affine=affine, parallel=parallel)\n )\n\n else:\n self.op = nn.SequentialCell(\n nn.ReLU(),\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, in_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n BatchNormalization(in_channels, momentum, eps, affine=affine, parallel=parallel),\n nn.ReLU(),\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=1,\n pad_mode='pad', padding=padding, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n BatchNormalization(out_channels, momentum, eps, affine=affine, parallel=parallel)\n )\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n return self.op(x)\n\n\nclass DilConv(nn.Cell):\n \"\"\"Dilconv\"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding=0,\n dilation=1,\n momentum=0.9,\n eps=1e-5,\n affine=True,\n separate=False,\n use_abn=False,\n parallel=True):\n super(DilConv, self).__init__()\n if use_abn:\n if separate:\n self.op = nn.SequentialCell(\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, dilation=dilation, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n NormLeakyReLU(out_channels, momentum, eps, affine=affine, parallel=parallel)\n )\n else:\n self.op = nn.SequentialCell(\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, dilation=dilation,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n NormLeakyReLU(out_channels, momentum, eps, affine=affine, parallel=parallel)\n )\n\n else:\n if separate:\n self.op = nn.SequentialCell(\n nn.ReLU(),\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, dilation=dilation, group=in_channels,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n nn.BatchNorm2d(out_channels, affine=affine),\n )\n else:\n self.op = nn.SequentialCell(\n nn.ReLU(),\n nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, stride=stride,\n pad_mode='pad', padding=padding, dilation=dilation,\n has_bias=False, weight_init='HeNormal'),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, pad_mode='same',\n has_bias=False, weight_init='HeNormal'),\n nn.BatchNorm2d(out_channels, affine=affine),\n )\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n return self.op(x)\n\n\nclass Identity(nn.Cell):\n \"\"\"Identity\"\"\"\n def __init__(self):\n super(Identity, self).__init__()\n self.null = None\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n return x\n\n\nclass Zero(nn.Cell):\n \"\"\"Zero\"\"\"\n def __init__(self, stride):\n super(Zero, self).__init__()\n self.stride = stride\n self.mul = ops.Mul()\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n if self.stride == 1:\n return self.mul(x, 0.)\n return self.mul(x[:, :, ::self.stride, ::self.stride], 0.)\n\n\nclass FactorizedReduce(nn.Cell):\n \"\"\"FactorizedReduce\"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n affine=True):\n super(FactorizedReduce, self).__init__()\n assert out_channels % 2 == 0\n self.relu = nn.ReLU()\n self.conv_1 = nn.Conv2d(in_channels, out_channels // 2, 1, stride=2,\n has_bias=False, pad_mode='valid', weight_init='HeNormal')\n self.conv_2 = nn.Conv2d(in_channels, out_channels // 2, 1, stride=2,\n has_bias=False, pad_mode='valid', weight_init='HeNormal')\n self.bn = nn.BatchNorm2d(out_channels, affine=affine)\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n x = self.relu(x)\n cat = ops.Concat(axis=1)\n out = cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])])\n out = self.bn(out)\n return out\n\n\nclass ReLUConvBN(nn.Cell):\n \"\"\"ReLUConvBN\"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding=0,\n pad_mode='pad',\n momentum=0.9,\n eps=1e-5,\n affine=True,\n use_abn=False,\n parallel=True):\n super(ReLUConvBN, self).__init__()\n if use_abn:\n self.op = nn.SequentialCell(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,\n pad_mode=pad_mode, padding=padding, has_bias=False),\n NormLeakyReLU(out_channels, momentum, eps, parallel=parallel)\n )\n\n else:\n self.op = nn.SequentialCell(\n nn.ReLU(),\n nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,\n pad_mode=pad_mode, padding=padding, has_bias=False),\n nn.BatchNorm2d(out_channels, momentum, eps, affine=affine)\n )\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n return self.op(x)\n"},"avg_line_length":{"kind":"number","value":44.6103896104,"string":"44.61039"},"max_line_length":{"kind":"number","value":191,"string":"191"},"alphanum_fraction":{"kind":"number","value":0.5609898108,"string":"0.56099"}}},{"rowIdx":46242,"cells":{"hexsha":{"kind":"string","value":"e7bd79478548a190e02ee4d47c9aae64bbe087a3"},"size":{"kind":"number","value":602,"string":"602"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p02_datenstrukturen/m03_anwendung_woerterbuch.py"},"max_stars_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_stars_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p02_datenstrukturen/m03_anwendung_woerterbuch.py"},"max_issues_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_issues_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"___Python/Thomas/pycurs_180625/p02_datenstrukturen/m03_anwendung_woerterbuch.py"},"max_forks_repo_name":{"kind":"string","value":"uvenil/PythonKurs201806"},"max_forks_repo_head_hexsha":{"kind":"string","value":"85afa9c9515f5dd8bec0c546f077d8cc39568fe8"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"satz = \"Fischers Fritze fischt frische Fische\"\r\n# Häufigkeit Buchstabe e\r\n\r\n# 1. Ansatz: for-schleife und Zähler\r\nzaehler = 0\r\nfor zeichen in satz:\r\n if zeichen == \"e\":\r\n zaehler += 1\r\nprint (zaehler)\r\n\r\n# Häufigkeit der Buchstaben im Satz\r\ndic_m03 = {}\r\nfor zeichen in satz:\r\n if zeichen in dic_m03:\r\n dic_m03[zeichen] += 1\r\n else:\r\n dic_m03[zeichen] = 1\r\nprint(\"Dictonray unsortiert \"+str(dic_m03))\r\nprint(\"\")\r\n\r\n# Sortieren nach Häufigkeit\r\ndic_m03 = sorted(dic_m03.items(), key=lambda tupel: tupel[1], reverse=True)\r\nprint(\"Dictonray sortiert \"+str(dict(dic_m03)))\r\n"},"avg_line_length":{"kind":"number","value":25.0833333333,"string":"25.083333"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.657807309,"string":"0.657807"}}},{"rowIdx":46243,"cells":{"hexsha":{"kind":"string","value":"4134a9ed6605204696d1b0f3389d20bef0dbe091"},"size":{"kind":"number","value":71,"string":"71"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"20-hs-redez-sem/groups/05-decentGames/src/State.py"},"max_stars_repo_name":{"kind":"string","value":"Kyrus1999/BACnet"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5be8e1377252166041bcd0b066cce5b92b077d06"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-17T21:12:18.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-12-12T15:55:54.000Z"},"max_issues_repo_path":{"kind":"string","value":"20-hs-redez-sem/groups/05-decentGames/src/State.py"},"max_issues_repo_name":{"kind":"string","value":"Kyrus1999/BACnet"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5be8e1377252166041bcd0b066cce5b92b077d06"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-07-19T06:18:43.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-10T12:17:58.000Z"},"max_forks_repo_path":{"kind":"string","value":"20-hs-redez-sem/groups/05-decentGames/src/State.py"},"max_forks_repo_name":{"kind":"string","value":"Kyrus1999/BACnet"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5be8e1377252166041bcd0b066cce5b92b077d06"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":25,"string":"25"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-03-20T09:32:45.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-07-18T18:12:59.000Z"},"content":{"kind":"string","value":"FF = 'ff'\nONGOING = 'normal'\nFINISHED = 'finished'\nCHEATED = 'cheated'\n"},"avg_line_length":{"kind":"number","value":14.2,"string":"14.2"},"max_line_length":{"kind":"number","value":21,"string":"21"},"alphanum_fraction":{"kind":"number","value":0.661971831,"string":"0.661972"}}},{"rowIdx":46244,"cells":{"hexsha":{"kind":"string","value":"68c2134237499d3dac238e74c6255f6a3006f781"},"size":{"kind":"number","value":92,"string":"92"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"2015/03/poll-health-parents-20150302/graphic_config.py"},"max_stars_repo_name":{"kind":"string","value":"nprapps/graphics-archive"},"max_stars_repo_head_hexsha":{"kind":"string","value":"97b0ef326b46a959df930f5522d325e537f7a655"},"max_stars_repo_licenses":{"kind":"list like","value":["FSFAP"],"string":"[\n \"FSFAP\"\n]"},"max_stars_count":{"kind":"number","value":14,"string":"14"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-05-08T13:41:51.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-02-24T12:34:55.000Z"},"max_issues_repo_path":{"kind":"string","value":"2015/03/poll-health-control-20150302/graphic_config.py"},"max_issues_repo_name":{"kind":"string","value":"nprapps/graphics-archive"},"max_issues_repo_head_hexsha":{"kind":"string","value":"97b0ef326b46a959df930f5522d325e537f7a655"},"max_issues_repo_licenses":{"kind":"list like","value":["FSFAP"],"string":"[\n \"FSFAP\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"2015/03/poll-health-control-20150302/graphic_config.py"},"max_forks_repo_name":{"kind":"string","value":"nprapps/graphics-archive"},"max_forks_repo_head_hexsha":{"kind":"string","value":"97b0ef326b46a959df930f5522d325e537f7a655"},"max_forks_repo_licenses":{"kind":"list like","value":["FSFAP"],"string":"[\n \"FSFAP\"\n]"},"max_forks_count":{"kind":"number","value":7,"string":"7"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2015-04-04T04:45:54.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-02-18T11:12:48.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nCOPY_GOOGLE_DOC_KEY = '1bHsaAin7zYrB-hasOXuZolTM7DwAhn4degtdhVhsO9c'\n"},"avg_line_length":{"kind":"number","value":23,"string":"23"},"max_line_length":{"kind":"number","value":68,"string":"68"},"alphanum_fraction":{"kind":"number","value":0.8369565217,"string":"0.836957"}}},{"rowIdx":46245,"cells":{"hexsha":{"kind":"string","value":"68eb4e700b71b99f2891692fbb6e5c832dd9ef33"},"size":{"kind":"number","value":985,"string":"985"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/UnivaluedBinaryTree/univalued_binary_tree.py"},"max_stars_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_stars_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-08-16T14:52:05.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-08-16T14:52:05.000Z"},"max_issues_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/UnivaluedBinaryTree/univalued_binary_tree.py"},"max_issues_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_issues_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Problems/Depth-First Search/easy/UnivaluedBinaryTree/univalued_binary_tree.py"},"max_forks_repo_name":{"kind":"string","value":"dolong2110/Algorithm-By-Problems-Python"},"max_forks_repo_head_hexsha":{"kind":"string","value":"31ecc7367aaabdd2b0ac0af7f63ca5796d70c730"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n# Recursive\ndef isUnivalTree(root: TreeNode) -> bool:\n def compare_value(cur_root: TreeNode, previous_root: int) -> bool:\n if not cur_root:\n return True\n\n if cur_root.val != previous_root:\n return False\n\n return compare_value(cur_root.left, cur_root.val) and compare_value(cur_root.right, cur_root.val)\n\n return compare_value(root, root.val)\n\n# Iterative\n# def isUnivalTree(root: TreeNode) -> bool:\n# stack = [(root, root.val)]\n# while stack:\n# cur_node, prev_val = stack.pop()\n# if cur_node.val != prev_val:\n# return False\n# if cur_node.left:\n# stack.append((cur_node.left, cur_node.val))\n# if cur_node.right:\n# stack.append((cur_node.right, cur_node.val))\n#\n# return True"},"avg_line_length":{"kind":"number","value":29.8484848485,"string":"29.848485"},"max_line_length":{"kind":"number","value":105,"string":"105"},"alphanum_fraction":{"kind":"number","value":0.6162436548,"string":"0.616244"}}},{"rowIdx":46246,"cells":{"hexsha":{"kind":"string","value":"6bd5a409455fe5863f8fd48fa075c30b28979376"},"size":{"kind":"number","value":4429,"string":"4,429"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"insert_tables.py"},"max_stars_repo_name":{"kind":"string","value":"arwhyte/gffa-db"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20410ab00c9f86a1bf7ca85ebdbaa9535b106c59"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"insert_tables.py"},"max_issues_repo_name":{"kind":"string","value":"arwhyte/gffa-db"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20410ab00c9f86a1bf7ca85ebdbaa9535b106c59"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"insert_tables.py"},"max_forks_repo_name":{"kind":"string","value":"arwhyte/gffa-db"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20410ab00c9f86a1bf7ca85ebdbaa9535b106c59"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import requests\nimport json\nimport psycopg2\n\ndef read_json(filepath, encoding='utf-8'):\n \"\"\"Reads a JSON document, decodes the file content, and returns a\n dictionary if provided with a valid filepath.\n\n Parameters:\n filepath (str): path to file\n\n Returns:\n dict: dict representations of the decoded JSON document\n \"\"\"\n\n with open(filepath, 'r', encoding=encoding) as file_obj:\n return json.load(file_obj)\n\ndef write_json(filepath, data, encoding='utf-8', ensure_ascii=False, indent=2):\n \"\"\"Serializes object as JSON. Writes content to the provided filepath.\n\n Parameters:\n filepath (str): the path to the file\n data (dict)/(list): the data to be encoded as JSON and written to the file\n encoding (str): name of encoding used to encode the file\n ensure_ascii (str): if False non-ASCII characters are printed as is; otherwise\n non-ASCII characters are escaped.\n indent (int): number of \"pretty printed\" indention spaces applied to encoded JSON\n\n Returns:\n None\n \"\"\"\n\n with open(filepath, 'w', encoding=encoding) as file_obj:\n json.dump(data, file_obj, ensure_ascii=ensure_ascii, indent=indent)\n\ndef get_cleaned_list(data):\n \"\"\"Takes dictionary data and converts its values it into list. Then rearranges the order of elements as per the database column order.\n\n Parameters:\n data (dict): one dict data from the whole json read file\n\n Returns:\n result (list): list of rearranged values as per database columns\n \"\"\"\n result = []\n data = list(data.values())\n result.append(data[13])\n for i in data[:-1]:\n result.append(i)\n result.append(json.dumps(data))\n print(len(result))\n print(result[-1])\n return result\n\ndef main():\n\n # Connect to an existing database\n conn = psycopg2.connect(\"dbname=gffa_db user=postgres password=postgres\")\n\n # Open a cursor to perform database operations\n cur = conn.cursor()\n\n # --------------- Query to insert records into film table ---------------\n\n # Read film data from swapi_films.json\n film_data = read_json('./data/swapi_json/swapi_films.json')\n\n #insertion process\n for film in film_data:\n film_list = get_cleaned_list(film)\n sql = \"\"\"INSERT INTO public.film(url, title, episode_id, opening_crawl, director, producer, release_date, characters, planets, starships, vehicles, species, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING\"\"\"\n # Execute to insert records into film table\n cur.execute(sql, film_list)\n # Make the changes to the database persistent\n conn.commit()\n\n # # --------------- Query to insert records into person table ---------------\n\n # # Read person data from swapi_people.json\n # person_data = read_json('./data/swapi_json/swapi_people.json')\n\n # # insertion process\n # for person in person_data:\n # person_list = get_cleaned_list(person)\n # sql = \"\"\"INSERT INTO public.person(url, name, height, mass, hair_color, skin_color, eye_color, birth_year, gender, homeworld, films, species, vehicles, starships, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING\"\"\"\n # # Execute to insert records into person table\n # cur.execute(sql, person_list)\n # # Make the changes to the database persistent\n # conn.commit()\n\n # # --------------- Query to insert records into planet table ---------------\n\n # # Read person data from swapi_planets.json\n # planet_data = read_json('./data/swapi_json/swapi_planets.json')\n\n # # insertion process\n # for planet in planet_data:\n # planet_list = get_cleaned_list(planet)\n # sql = \"\"\"INSERT INTO public.planet(url, name, rotation_period, orbital_period, diameter, climate, gravity, terrain, surface_water, population, residents, films, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING\"\"\"\n # # Execute to insert records into planet table\n # cur.execute(sql, planet_list)\n # # Make the changes to the database persistent\n # conn.commit()\n\n # Close cursor\n cur.close()\n\n # Close connection\n conn.close()\n\nif __name__ == '__main__':\n main()"},"avg_line_length":{"kind":"number","value":39.1946902655,"string":"39.19469"},"max_line_length":{"kind":"number","value":307,"string":"307"},"alphanum_fraction":{"kind":"number","value":0.6443892527,"string":"0.644389"}}},{"rowIdx":46247,"cells":{"hexsha":{"kind":"string","value":"6bd8266adc81cedfdafbd1545c80a2bbca9cae23"},"size":{"kind":"number","value":11463,"string":"11,463"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/PaloAltoNetworks_IoT3rdParty/Scripts/SendAllPANWIoTDevicesToCiscoISE/SendAllPANWIoTDevicesToCiscoISE.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/PaloAltoNetworks_IoT3rdParty/Scripts/SendAllPANWIoTDevicesToCiscoISE/SendAllPANWIoTDevicesToCiscoISE.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/PaloAltoNetworks_IoT3rdParty/Scripts/SendAllPANWIoTDevicesToCiscoISE/SendAllPANWIoTDevicesToCiscoISE.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"import demistomock as demisto # noqa: F401\nfrom CommonServerPython import * # noqa: F401\n\nPANW_IOT_INSTANCE = demisto.args().get('panw_iot_3rd_party_instance')\nCISCO_ISE_ACTIVE_INSTANCE = demisto.args().get(\"active_ise_instance\")\nGET_EP_ID_CMD = 'cisco-ise-get-endpoint-id-by-name'\n\nCISCO_ISE_FIELD_MAP = {\n \"ip\": [\"ZingboxIpAddress\", \"PanwIoTIpAddress\"],\n \"ip address\": [\"ZingboxIP\", \"PanwIoTIP\"],\n \"ip_address\": [\"ZingboxIP\", \"PanwIoTIP\"],\n \"profile\": [\"ZingboxProfile\", \"PanwIoTProfile\"],\n \"category\": [\"ZingboxCategory\", \"PanwIoTCategory\"],\n \"risk_score\": [\"ZingboxRiskScore\", \"PanwIoTRiskScore\"],\n \"risk score\": [\"ZingboxRiskScore\", \"PanwIoTRiskScore\"],\n \"confidence\": [\"ZingboxConfidence\", \"PanwIoTConfidence\"],\n \"confidence score\": [\"ZingboxConfidence\", \"PanwIoTConfidence\"],\n \"confidence_score\": [\"ZingboxConfidence\", \"PanwIoTConfidence\"],\n \"tag\": [\"ZingboxTag\", \"PanwIoTTag\"],\n \"asset_tag\": [\"ZingboxTag\", \"PanwIoTTag\"],\n \"Tags\": [\"ZingboxTag\", \"PanwIoTTag\"],\n \"hostname\": [\"ZingboxHostname\", \"PanwIoTHostname\"],\n \"osCombined\": [\"ZingboxOS\", \"PanwIoTOS\"],\n \"model\": [\"ZingboxModel\", \"PanwIoTModel\"],\n \"vendor\": [\"ZingboxVendor\", \"PanwIoTVendor\"],\n \"Serial Number\": [\"ZingboxSerial\", \"PanwIoTSerial\"],\n \"Serial_Number\": [\"ZingboxSerial\", \"PanwIoTSerial\"],\n \"endpoint protection\": [\"ZingboxEPP\", \"PanwIoTEPP\"],\n \"endpoint_protection\": [\"ZingboxEPP\", \"PanwIoTEPP\"],\n \"AET\": [\"ZingboxAET\", \"PanwIoTAET\"],\n \"External Network\": [\"ZingboxInternetAccess\", \"PanwIoTInternetAccess\"],\n}\n\nINT_FIELDS = [\"risk_score\", \"risk score\", \"confidence\", \"confidence score\", \"confidence_score\"]\n\n\ndef send_status_to_panw_iot_cloud(status, msg):\n \"\"\"\n Reports status details back to PANW IoT Cloud.\n param status: Status (error, disabled, success) to be send to PANW IoT cloud.\n param msg: Debug message to be send to PANW IoT cloud.\n \"\"\"\n resp = demisto.executeCommand(\"panw-iot-3rd-party-report-status-to-panw\", {\n \"status\": status,\n \"message\": msg,\n \"integration_name\": \"ise\",\n \"playbook_name\": \"PANW IoT 3rd Party Cisco ISE Integration - Bulk Export to Cisco ISE\",\n \"asset_type\": 'device',\n \"timestamp\": int(round(time.time() * 1000)),\n \"using\": PANW_IOT_INSTANCE\n })\n\n if isError(resp[0]):\n err_msg = f'Error, failed to send status to PANW IoT Cloud - {resp[0].get(\"Contents\")}'\n raise Exception(err_msg)\n\n\ndef get_active_ise_instance_or_error_msg():\n \"\"\"\n Get the active configured Cisco ISE instance, if not found then return the error message.\n \"\"\"\n response = demisto.executeCommand(\"GetCiscoISEActiveInstance\", {})\n err_msg = None\n active_instance = None\n\n data = response[0].get('EntryContext', {})\n\n if 'PaloAltoIoTIntegrationBase.ActiveNodeInstance' in data:\n active_instance = data.get('PaloAltoIoTIntegrationBase.ActiveNodeInstance')\n elif 'PaloAltoIoTIntegrationBase.NodeErrorStatus' in data:\n err_msg = data.get('PaloAltoIoTIntegrationBase.NodeErrorStatus')\n\n return active_instance, err_msg\n\n\ndef extract_ise_api_error(err_msg):\n \"\"\"\n Extract any connection error or error code if possible,\n Otherwise just return the original error\n \"\"\"\n err_msg = err_msg.split('-')[0]\n if err_msg.startswith(\"Error in API call to Cisco\"):\n start = err_msg.find('[') + 1\n end = err_msg.find(']')\n return err_msg[start:end]\n elif err_msg.startswith(\"Connection Error. Verify\"):\n return \"Connection Error\"\n else:\n return err_msg\n\n\ndef get_devices_from_panw_iot_cloud(offset, page_size):\n \"\"\"\n Gets assets from PANW IoT cloud.\n param offset: Offset number for the asset list.\n param page_size: Page size of the response being requested.\n \"\"\"\n resp = demisto.executeCommand(\"panw-iot-3rd-party-get-asset-list\", {\n \"asset_type\": 'device',\n \"increment_type\": None,\n \"offset\": offset,\n \"pageLength\": page_size,\n \"using\": PANW_IOT_INSTANCE\n\n })\n if isError(resp[0]):\n err_msg = f'Error, could not get assets from PANW IoT Cloud - {resp[0].get(\"Contents\")}'\n raise Exception(err_msg)\n\n return resp[0]['Contents']\n\n\ndef convert_device_map_to_cisco_ise_attributes(device_map):\n \"\"\"\n Converts a PANW IoT device_map to Cisco ISE custom attributes map.\n param device_map: Single PANW IoT device_map with device attributes .\n \"\"\"\n attribute_list = {}\n if 'deviceid' in device_map:\n if device_map['deviceid'] is None or device_map['deviceid'] == \"\":\n return None\n attribute_list['mac'] = device_map['deviceid']\n if not is_mac_address(attribute_list['mac']):\n return None\n zb_attributes = {}\n for field in device_map:\n if device_map[field] is None or device_map[field] == \"\":\n continue\n if field in CISCO_ISE_FIELD_MAP:\n if field in INT_FIELDS:\n try:\n int_val = int(device_map[field])\n except Exception:\n continue\n zb_attributes[CISCO_ISE_FIELD_MAP[field][0]] = int_val\n zb_attributes[CISCO_ISE_FIELD_MAP[field][1]] = int_val\n else:\n zb_attributes[CISCO_ISE_FIELD_MAP[field][0]] = device_map[field]\n zb_attributes[CISCO_ISE_FIELD_MAP[field][1]] = device_map[field]\n attribute_list['zb_attributes'] = zb_attributes\n return attribute_list\n\n\ndef update_existing_endpoint(mac, attr_map, ep_id, active_instance):\n \"\"\"\n Update an existing endpoint with the given custom attributes.\n Param mac: mac address of the endpoint that needs to be updated.\n Param attr_map: a map containing various ise custom attributes.\n Param ep_id: ID for endpoint that needs to be updated.\n Param active_instance: The primary/active ISE instance.\n \"\"\"\n attribute_names = \"\"\n attribute_values = \"\"\n for key in attr_map:\n attribute_names += key + \",\"\n attribute_values += str(attr_map[key]) + \",\"\n attribute_names = attribute_names[:-1]\n attribute_values = attribute_values[:-1]\n\n resp = demisto.executeCommand(\"cisco-ise-update-endpoint-custom-attribute\", {\n \"id\": ep_id,\n \"macAddress\": mac,\n \"attributeName\": attribute_names,\n \"attributeValue\": attribute_values,\n \"using\": active_instance\n })\n if isError(resp[0]):\n err_msg = f'Error, failed to update custom attributes for endpoint {id} - {resp[0].get(\"Contents\")}'\n raise Exception(err_msg)\n\n\ndef create_new_ep(mac, attr_map, active_instance):\n \"\"\"\n Create a new endpoint with the given params\n Param mac: mac address of the endpoint that needs to be created.\n Param attr_map: a map containing various ise custom attributes.\n Param active_instance: The primary/active ISE instance.\n \"\"\"\n resp = demisto.executeCommand(\"cisco-ise-create-endpoint\", {\n \"mac_address\": mac,\n \"attributes_map\": attr_map,\n \"using\": active_instance\n })\n if isError(resp[0]):\n err_msg = f'Failed to create new Endpoint {mac} - {resp[0].get(\"Contents\")}'\n raise Exception(err_msg)\n\n\ndef create_or_update_ep(mac, attr_map):\n \"\"\"\n Check if an enpoint exists in ISE, if not create one with the custom attributes\n otherwise update it. If at any point the connection goes down or we get a 401 -\n unautherized access we will attempt to get the new active instance.\n Params mac: Mac adress of the endpoint.\n attr_map: Custom attributes for the endpoint.\n \"\"\"\n\n global CISCO_ISE_ACTIVE_INSTANCE\n global GET_EP_ID_CMD\n\n cmd_mac_syntax_map = {\n \"cisco-ise-get-endpoint-id-by-name\": \"mac_address\",\n \"cisco-ise-get-endpoint-id\": \"macAddress\"\n }\n\n # Check if this mac address (endpoint) is present in ISE by attempting to get its ID\n resp = demisto.executeCommand(GET_EP_ID_CMD, {\n cmd_mac_syntax_map[GET_EP_ID_CMD]: mac,\n \"using\": CISCO_ISE_ACTIVE_INSTANCE\n })\n\n if isError(resp[0]):\n err_msg = extract_ise_api_error(resp[0].get(\"Contents\"))\n\n # 404 Not Found or empty results, we need to create a new EP\n if err_msg == \"404\" or err_msg == \"list index out of range\":\n create_new_ep(mac, attr_map, CISCO_ISE_ACTIVE_INSTANCE)\n\n # 405 - Method not allowed means we need to switch to an old filter based API\n elif err_msg == '405':\n GET_EP_ID_CMD = \"cisco-ise-get-endpoint-id\"\n\n # The primary went down (connection Error) or 401 if a fail over occurred (this primary/active\n # is not a secondary/standby device).We should attempt to get the new Primary/Active\n # instance is possible.\n elif err_msg == \"Connection Error\" or err_msg == \"401\":\n # Failover can take up to 10 minutes, its ok to just wait even if its a standalone ISE noe.\n msg = \"ISE instance is down. Trying again in 10 minutes. Error = %s\" % err_msg\n demisto.info(\"PANW_IOT_3RD_PARTY_BASE %s\" % msg)\n send_status_to_panw_iot_cloud(\"error\", msg)\n time.sleep(10 * 60)\n # Try again to get a new active instance\n new_active_instance, err_msg = get_active_ise_instance_or_error_msg()\n if new_active_instance is None:\n raise Exception(err_msg)\n else:\n CISCO_ISE_ACTIVE_INSTANCE = new_active_instance\n msg = f\"Found new active ISE instance {CISCO_ISE_ACTIVE_INSTANCE}\"\n send_status_to_panw_iot_cloud(\"success\", msg)\n else:\n raise Exception(resp[0].get(\"Contents\"))\n else:\n ep_id = resp[0]['EntryContext']['Endpoint(val.ID === obj.ID)']['ID']\n update_existing_endpoint(mac, attr_map, ep_id, CISCO_ISE_ACTIVE_INSTANCE)\n\n\ndef get_all_panw_iot_devices_and_send_to_cisco_ise():\n \"\"\"\n Retrieves all devices from PANW IoT Cloud, 1000 devices at a time and sends it\n to the primary/active cisco ise.\n \"\"\"\n count = 0\n offset = 0\n page_size = 1000\n unique_macs = set()\n\n while True:\n device_list = get_devices_from_panw_iot_cloud(offset, page_size)\n size = len(device_list)\n count += size\n for device in device_list:\n attrs = convert_device_map_to_cisco_ise_attributes(device)\n if attrs is not None:\n mac = attrs['mac']\n attr_map = attrs['zb_attributes']\n if mac not in unique_macs:\n create_or_update_ep(mac, attr_map)\n unique_macs.add(mac)\n time.sleep(0.5)\n\n if size == page_size:\n offset += page_size\n msg = f'Successfully exported {count} devices to Cisco ISE'\n send_status_to_panw_iot_cloud(\"success\", msg,)\n else:\n break\n return(f'Total {count} devices pulled from PANW IoT Cloud.\\n'\n f'Exported {len(unique_macs)} devices (with available mac addresses) to Cisco ISE')\n\n\ndef main():\n try:\n status_msg = get_all_panw_iot_devices_and_send_to_cisco_ise()\n except Exception as ex:\n send_status_to_panw_iot_cloud(\"error\", str(ex))\n return_error(str(ex))\n\n send_status_to_panw_iot_cloud(\"success\", status_msg)\n return_results(status_msg)\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n main()\n"},"avg_line_length":{"kind":"number","value":38.9897959184,"string":"38.989796"},"max_line_length":{"kind":"number","value":108,"string":"108"},"alphanum_fraction":{"kind":"number","value":0.6512256826,"string":"0.651226"}}},{"rowIdx":46248,"cells":{"hexsha":{"kind":"string","value":"6bfc67542b8061cb0974af1ae7c6a732f07ff2ec"},"size":{"kind":"number","value":290,"string":"290"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/python/py-accepted/231A.py"},"max_stars_repo_name":{"kind":"string","value":"cbarnson/UVa"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0dd73fae656613e28b5aaf5880c5dad529316270"},"max_stars_repo_licenses":{"kind":"list like","value":["Unlicense","MIT"],"string":"[\n \"Unlicense\",\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2019-09-07T17:00:26.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-08-05T02:08:35.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/python/py-accepted/231A.py"},"max_issues_repo_name":{"kind":"string","value":"cbarnson/UVa"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0dd73fae656613e28b5aaf5880c5dad529316270"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense","MIT"],"string":"[\n \"Unlicense\",\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/python/py-accepted/231A.py"},"max_forks_repo_name":{"kind":"string","value":"cbarnson/UVa"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0dd73fae656613e28b5aaf5880c5dad529316270"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense","MIT"],"string":"[\n \"Unlicense\",\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#! python\n\n# Problem # : 231A\n# Created on : 2019-01-14 21:19:31\n\n\ndef Main():\n n = int(input())\n cnt = 0\n for i in range(0, n):\n if sum(list(map(int, input().split(' ')))) > 1: \n cnt += 1\n else:\n print(cnt)\n\n\nif __name__ == '__main__':\n Main()\n"},"avg_line_length":{"kind":"number","value":15.2631578947,"string":"15.263158"},"max_line_length":{"kind":"number","value":56,"string":"56"},"alphanum_fraction":{"kind":"number","value":0.4724137931,"string":"0.472414"}}},{"rowIdx":46249,"cells":{"hexsha":{"kind":"string","value":"d442d4ebcfe5532e7387c7a3bf2893c827baa1c9"},"size":{"kind":"number","value":79,"string":"79"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"examples/miniportal/portal/actors/system/usermanager/listUsers.py"},"max_stars_repo_name":{"kind":"string","value":"Jumpscale/web"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2015-10-26T10:38:32.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2015-10-26T10:38:32.000Z"},"max_issues_repo_path":{"kind":"string","value":"examples/miniportal/portal/actors/system/usermanager/listUsers.py"},"max_issues_repo_name":{"kind":"string","value":"Jumpscale/web"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"examples/miniportal/portal/actors/system/usermanager/listUsers.py"},"max_forks_repo_name":{"kind":"string","value":"Jumpscale/web"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8e8ec2ce01f3105c7647ee8a0c90af09311cbbeb"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\n\ndef listUsers(actors, name='azmy', age=20):\n raise NotImplementedError()\n"},"avg_line_length":{"kind":"number","value":15.8,"string":"15.8"},"max_line_length":{"kind":"number","value":43,"string":"43"},"alphanum_fraction":{"kind":"number","value":0.6962025316,"string":"0.696203"}}},{"rowIdx":46250,"cells":{"hexsha":{"kind":"string","value":"d477f19fface70fc19c62f57f3984f053cd44963"},"size":{"kind":"number","value":6824,"string":"6,824"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"lbry/tests/integration/test_comment_commands.py"},"max_stars_repo_name":{"kind":"string","value":"Nykseli/lbry-sdk"},"max_stars_repo_head_hexsha":{"kind":"string","value":"07afc0aa0a1e6c0ef6aa284fb47513af940440c1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"lbry/tests/integration/test_comment_commands.py"},"max_issues_repo_name":{"kind":"string","value":"Nykseli/lbry-sdk"},"max_issues_repo_head_hexsha":{"kind":"string","value":"07afc0aa0a1e6c0ef6aa284fb47513af940440c1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":4,"string":"4"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-10-27T21:53:05.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-11T03:10:54.000Z"},"max_forks_repo_path":{"kind":"string","value":"lbry/tests/integration/test_comment_commands.py"},"max_forks_repo_name":{"kind":"string","value":"braveheart12/lbry-sdk"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dc709b468f9dce60d206161785def5c7ace2b763"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from math import ceil\n\nfrom aiohttp import web\n\nfrom lbry.testcase import CommandTestCase\n\n\nclass MockedCommentServer:\n\n ERRORS = {\n 'INVALID_PARAMS': {'code': -32602, 'message': 'Invalid parameters'},\n 'INTERNAL': {'code': -32603, 'message': 'An internal error'},\n 'UNKNOWN': {'code': -1, 'message': 'An unknown or very miscellaneous error'},\n 'INVALID_METHOD': {'code': -32604, 'message': 'The Requested method does not exist'}\n }\n\n def __init__(self, port=2903):\n self.port = port\n self.app = web.Application(debug=True)\n self.app.add_routes([web.post('/api', self.api)])\n self.runner = None\n self.server = None\n self.comments = []\n self.comment_id = 0\n\n def create_comment(self, **comment):\n self.comment_id += 1\n comment['comment_id'] = self.comment_id\n if 'channel_id' in comment:\n comment['channel_url'] = 'lbry://' + comment['channel_name'] + '#' + comment['channel_id']\n self.comments.append(comment)\n return comment\n\n def get_claim_comments(self, page=1, page_size=50, **kwargs):\n return {\n 'page': page,\n 'page_size': page_size,\n 'total_pages': ceil(len(self.comments)/page_size),\n 'total_items': len(self.comments),\n 'items': (self.comments[::-1])[(page - 1) * page_size: page * page_size]\n }\n\n methods = {\n 'get_claim_comments': get_claim_comments,\n 'create_comment': create_comment,\n }\n\n def process_json(self, body) -> dict:\n response = {'jsonrpc': '2.0', 'id': body['id']}\n if body['method'] in self.methods:\n params = body.get('params', {})\n result = self.methods[body['method']](self, **params)\n response['result'] = result\n else:\n response['error'] = self.ERRORS['INVALID_METHOD']\n return response\n\n async def start(self):\n self.runner = web.AppRunner(self.app)\n await self.runner.setup()\n self.server = web.TCPSite(self.runner, 'localhost', self.port)\n await self.server.start()\n\n async def stop(self):\n await self.runner.shutdown()\n await self.runner.cleanup()\n\n async def api(self, request):\n body = await request.json()\n if type(body) is list or type(body) is dict:\n if type(body) is list:\n response = [self.process_json(part) for part in body]\n else:\n response = self.process_json(body)\n return web.json_response(response)\n else:\n raise TypeError('invalid type passed')\n\n\nclass CommentCommands(CommandTestCase):\n\n async def asyncSetUp(self):\n await super().asyncSetUp()\n self.daemon.conf.comment_server = 'http://localhost:2903/api'\n self.comment_server = MockedCommentServer(2903)\n await self.comment_server.start()\n self.addCleanup(self.comment_server.stop)\n\n async def test01_comment_create(self):\n channel = (await self.channel_create('@JimmyBuffett'))['outputs'][0]\n stream = (await self.stream_create())['outputs'][0]\n\n self.assertEqual(0, len((await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']))\n comment = await self.daemon.jsonrpc_comment_create(\n claim_id=stream['claim_id'],\n channel_id=channel['claim_id'],\n comment=\"It's 5 O'Clock Somewhere\"\n )\n comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']\n self.assertEqual(1, len(comments))\n self.assertEqual(comment['comment_id'], comments[0]['comment_id'])\n self.assertEqual(stream['claim_id'], comments[0]['claim_id'])\n\n channel2 = (await self.channel_create('@BuffettJimmy'))['outputs'][0]\n await self.daemon.jsonrpc_comment_create(\n claim_id=stream['claim_id'],\n channel_name=channel2['name'],\n comment='Let\\'s all go to Margaritaville',\n parent_id=comments[0]['comment_id']\n )\n comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']\n self.assertEqual(2, len(comments))\n self.assertEqual(comments[0]['channel_id'], channel2['claim_id'])\n self.assertEqual(comments[0]['parent_id'], comments[1]['comment_id'])\n\n comment = await self.daemon.jsonrpc_comment_create(\n claim_id=stream['claim_id'],\n comment='Anonymous comment'\n )\n comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']\n self.assertEqual(comment['comment_id'], comments[0]['comment_id'])\n\n async def test02_unsigned_comment_list(self):\n stream = (await self.stream_create())['outputs'][0]\n comments = []\n for i in range(28):\n comment = await self.daemon.jsonrpc_comment_create(\n comment=f'{i}',\n claim_id=stream['claim_id'],\n )\n self.assertIn('comment_id', comment)\n comments.append(comment)\n\n comment_list = await self.daemon.jsonrpc_comment_list(\n claim_id=stream['claim_id']\n )\n self.assertIs(comment_list['page_size'], 50)\n self.assertIs(comment_list['page'], 1)\n self.assertIs(comment_list['total_items'], 28)\n for comment in comment_list['items']:\n self.assertEqual(comment['comment'], comments.pop()['comment'])\n\n signed_comment_list = await self.daemon.jsonrpc_comment_list(\n claim_id=stream['claim_id'],\n is_channel_signature_valid=True\n )\n self.assertIs(len(signed_comment_list['items']), 0)\n\n async def test03_signed_comments_list(self):\n channel = (await self.channel_create('@JimmyBuffett'))['outputs'][0]\n stream = (await self.stream_create())['outputs'][0]\n comments = []\n for i in range(28):\n comment = await self.daemon.jsonrpc_comment_create(\n comment=f'{i}',\n claim_id=stream['claim_id'],\n channel_id=channel['claim_id'],\n )\n self.assertIn('comment_id', comment)\n comments.append(comment)\n\n comment_list = await self.daemon.jsonrpc_comment_list(\n claim_id=stream['claim_id']\n )\n self.assertIs(comment_list['page_size'], 50)\n self.assertIs(comment_list['page'], 1)\n self.assertIs(comment_list['total_items'], 28)\n for comment in comment_list['items']:\n self.assertEqual(comment['comment'], comments.pop()['comment'])\n\n signed_comment_list = await self.daemon.jsonrpc_comment_list(\n claim_id=stream['claim_id'],\n is_channel_signature_valid=True\n )\n self.assertIs(len(signed_comment_list['items']), 28)\n"},"avg_line_length":{"kind":"number","value":38.7727272727,"string":"38.772727"},"max_line_length":{"kind":"number","value":103,"string":"103"},"alphanum_fraction":{"kind":"number","value":0.6085873388,"string":"0.608587"}}},{"rowIdx":46251,"cells":{"hexsha":{"kind":"string","value":"2e06dfe9bc67684bd6217157e61a6a60afe366d2"},"size":{"kind":"number","value":9265,"string":"9,265"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"encoder.py"},"max_stars_repo_name":{"kind":"string","value":"FinlayDaG33k/HBC"},"max_stars_repo_head_hexsha":{"kind":"string","value":"e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-06-15T07:22:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2016-06-15T07:22:14.000Z"},"max_issues_repo_path":{"kind":"string","value":"encoder.py"},"max_issues_repo_name":{"kind":"string","value":"FinlayDaG33k/HBC"},"max_issues_repo_head_hexsha":{"kind":"string","value":"e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"encoder.py"},"max_forks_repo_name":{"kind":"string","value":"FinlayDaG33k/HBC"},"max_forks_repo_head_hexsha":{"kind":"string","value":"e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n\"\"\"\nHBC by FinlayDaG33k under the MIT License\n\"\"\"\nimport Pyro4\nimport os\nimport platform\nimport re\nimport logging\nfrom threading import Timer\nimport subprocess\nfrom encoder_cfg import pyro_host, pyro_port, ftp_host, ftp_port, ftp_user, ftp_pass\nfrom encoder_cfg import IDLE, RUNNING, Task, getLanIP\nfrom ftplib import FTP\nimport socket\n \nhandbrake_unix = '/usr/bin/HandBrakeCLI'\nhandbrake_win32 = 'C:\\\\Program Files\\\\Handbrake\\\\HandBrakeCLI.exe'\nhandbrake_win64 = 'C:\\\\Program Files (x86)\\\\Handbrake\\\\HandBrakeCLI.exe'\n\nclass Encoder(object):\n \"\"\"\n Main encoder object\n \"\"\"\n def __init__(self):\n # The dir which the encoder uses to store video filess it grabs from the central server\n # and files which it generates via handbrake\n # TODO -- make this configurable\n self.homedir = os.path.expanduser(\"~\")\n\n # Look up the central server\n self.central = Pyro4.Proxy('PYRONAME:central.encoding@{0}:{1}'.format(pyro_host,pyro_port))\n\n # Determine the handbrake path\n # TODO -- This should probably be configurable too\n self.handbrake = ''\n if os.path.exists(handbrake_unix):\n self.handbrake = handbrake_unix\n elif os.path.exists(handbrake_win32):\n self.handbrake = handbrake_win32\n elif os.path.exists(handbrake_win64):\n self.handbrake = handbrake_win64\n\n self.status = IDLE\n\n # The name used to register with Pyro Naming\n # TODO -- Might want to use a better naming scheme, lazy linux users may not set hostnames\n # on all their hosts, meaning we could have multiple encoder.localhost's stepping on eachother\n self.name = 'encoder.{0}'.format(platform.node())\n\n # Reference the external handbrake process\n self.encodeProc = None\n\n # This timer will check on the encoder's status every ten seconds\n self.timer = Timer(10,self.checkForTask)\n self.timer.start()\n \n def getName(self):\n return self.name\n \n def getLine(self):\n \"\"\"\n Read from the handbrake process's stdout one char at a time\n until we hit a \\r -- this is a needed because if you try\n a readline it'll hang until it hits \\n -- which won't happen\n until handbrake exits -- it updates the vid progress in place\n with multiple \\r messages\n \"\"\"\n line = ''\n while True:\n out = self.encodeProc.stdout.read(1)\n if out:\n if out == '\\r':\n break\n if not out:\n break\n line += out\n return line\n \n def checkForTask(self):\n if self.status == RUNNING:\n # We (think) we're doing something\n if self.encodeProc:\n # Handbrake process reference exists\n if self.encodeProc.poll() is not None:\n # Handbrake has exited\n # TODO -- we should do some validation on the handbrake exit code, just checking that the\n # output file exists is pretty weak\n if os.path.exists(os.path.join(self.homedir,self.task.getOutputName())):\n # Since this file exists we assume things succeeded, FTP the video to the central server\n if not self.sendVideo():\n self.task.setErrors('Unable to send video')\n\n # Complete the task and inform the central server that we're done\n self.task.taskFinished()\n self.task.setCompleted(100)\n self.central.finishTask(self.task)\n self.cleanUp()\n return\n else:\n # We're not done yet, but handbrake is running, update the central server on our progress\n if not self.central.updateTask(self.task):\n self.cancel(self.task.getName())\n else:\n # Don't know why we think we're running -- probably a corner case here, but let's go back to IDLE\n self.status = IDLE\n else:\n failed = False\n # Try to get at ask from the central server\n self.task = self.central.getTask(self.getName())\n if self.task:\n # We got a task, set our status to running, grab the video via FTP from the server and begin the encode\n # process\n self.status = RUNNING\n if self.getVideo(self.task.getName()):\n self.encodeVid()\n else:\n failed = True\n if failed:\n # Something bad happened with FTP, fail the task and tell the server\n self.central.finishTask(self.task)\n self.status = IDLE\n # Reschedule the task so we'll check our state again in two seconds\n self.timer = Timer(2,self.checkForTask)\n self.timer.start()\n \n def cleanUp(self):\n \"\"\"\n Various clean up operations that need to be performed\n - Delete the video files, we shouldn't need them by now\n - Cancel the update timer if it's still active since HB has exited\n - Go back to IDLE\n - Reschedule the main method timer\n \"\"\"\n if os.path.exists(os.path.join(self.homedir,self.task.getOutputName())):\n os.unlink(os.path.join(self.homedir,self.task.getOutputName()))\n if os.path.exists(os.path.join(self.homedir,self.task.getName())):\n os.unlink(os.path.join(self.homedir,self.task.getName()))\n self.updateTimer.cancel()\n self.timer.cancel()\n self.task = None\n self.status = IDLE\n self.timer = Timer(2,self.checkForTask)\n self.timer.start()\n \n def cancel(self,name):\n \"\"\"\n External call point to cancel the active task, used by the central server upon user request,\n kills the handbrake process and cleans up\n \"\"\"\n if self.task:\n if self.task.getName() == name:\n if self.encodeProc:\n self.encodeProc.kill()\n self.cleanUp()\n return True\n return False\n \n def updateCompleted(self):\n \"\"\"\n Timed method which gets the percentage completed from the handbrake stdout and updates the task\n \"\"\"\n out = self.getLine()\n if out:\n match = re.search('(\\d+\\.\\d+)\\s\\%',out)\n if match:\n completed = match.group(1)\n self.task.setCompleted(completed)\n if self.encodeProc:\n if self.encodeProc.poll() is None:\n self.updateTimer = Timer(.1,self.updateCompleted)\n self.updateTimer.start()\n\n def getStatus(self):\n return self.status\n \n def sendVideo(self):\n \"\"\"\n Sends the encoded video back to the central server\n \"\"\"\n ftp = FTP()\n ftp.connect(ftp_host,ftp_port)\n ftp.login(ftp_user, ftp_pass)\n ftp.storbinary('STOR {0}'.format(self.task.getOutputName()),open(os.path.join(self.homedir,self.task.getOutputName()),'rb'))\n return True\n \n def getVideo(self,video):\n \"\"\"\n Grabs the passed video from the central server\n \"\"\"\n ftp = FTP()\n ftp.connect(ftp_host,ftp_port)\n ftp.login(ftp_user, ftp_pass)\n ftp.retrbinary('RETR {0}'.format(video), open(os.path.join(self.homedir,video),'wb').write)\n return True\n \n def encodeVid(self):\n \"\"\"\n Kick off the handbrake process with the various settings found in the task as arguements\n Also starts the timer which will parse the handbrake output for completion percentages\n \"\"\"\n self.task.setOutputName(re.sub('\\.\\w*$','.{0}'.format(self.task.getFormat()),self.task.getName()))\n self.task.taskStarted()\n args = [self.handbrake]\n if self.task.getEncoder():\n args.extend(['-e',self.task.getEncoder()])\n if self.task.getFormat():\n args.extend(['-f',self.task.getFormat()])\n if self.task.getQuality():\n args.extend(['-q',self.task.getQuality()])\n if self.task.getLarge():\n args.append('-4')\n args.extend(['-i',os.path.join(self.homedir,self.task.getName()),'-o',os.path.join(self.homedir,self.task.getOutputName())])\n self.encodeProc = subprocess.Popen(args,stdout=subprocess.PIPE)\n self.updateTimer = Timer(.1,self.updateCompleted)\n self.updateTimer.start()\n\ndef main():\n encoder = Encoder()\n # Register encoder with Pyro naming\n daemon = Pyro4.Daemon(host=getLanIP())\n uri = daemon.register(encoder)\n ns = Pyro4.locateNS(host=pyro_host,port=pyro_port)\n try:\n # Remove any stale bindings in naming\n # TODO -- do a little more validation, a 'stale' binding may be a host with a duplicate name\n ns.remove(encoder.getName())\n except:\n pass\n ns.register(encoder.getName(),uri)\n daemon.requestLoop()\n \nif __name__ == \"__main__\":\n main()"},"avg_line_length":{"kind":"number","value":39.594017094,"string":"39.594017"},"max_line_length":{"kind":"number","value":132,"string":"132"},"alphanum_fraction":{"kind":"number","value":0.5816513761,"string":"0.581651"}}},{"rowIdx":46252,"cells":{"hexsha":{"kind":"string","value":"cf2ff7ebdead2953f99d3d442c16b6dc10c7c9a3"},"size":{"kind":"number","value":763,"string":"763"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Utils/py/ActionSelection/evaluation/decision_histogram_plot.py"},"max_stars_repo_name":{"kind":"string","value":"tarsoly/NaoTH"},"max_stars_repo_head_hexsha":{"kind":"string","value":"dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52"},"max_stars_repo_licenses":{"kind":"list like","value":["ECL-2.0","Apache-2.0"],"string":"[\n \"ECL-2.0\",\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Utils/py/ActionSelection/evaluation/decision_histogram_plot.py"},"max_issues_repo_name":{"kind":"string","value":"tarsoly/NaoTH"},"max_issues_repo_head_hexsha":{"kind":"string","value":"dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52"},"max_issues_repo_licenses":{"kind":"list like","value":["ECL-2.0","Apache-2.0"],"string":"[\n \"ECL-2.0\",\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Utils/py/ActionSelection/evaluation/decision_histogram_plot.py"},"max_forks_repo_name":{"kind":"string","value":"tarsoly/NaoTH"},"max_forks_repo_head_hexsha":{"kind":"string","value":"dcd2b67ef6bf9953c81d3e1b26e543b5922b7d52"},"max_forks_repo_licenses":{"kind":"list like","value":["ECL-2.0","Apache-2.0"],"string":"[\n \"ECL-2.0\",\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from __future__ import division\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pickle\n\n\"\"\"\nShows a bar plot of the decision histogram for one robot position on the field\n\nExample:\n run without any parameters\n\n $ python decision_histogram_plot.py\n\"\"\"\n\n# Set file for importing the decisions\ndecisions = pickle.load(open(\"../data/humanoids/simulate_every_pos-30-100.pickle\", \"rb\"))\n\n# Set robot position\nfixed_rotation = 0\nfixed_x = 1000\nfixed_y = 100\n\n\nfor pos in decisions:\n x, y, rotation, new_decision_histogram = pos\n\n # only plot if desired position is found\n if rotation == fixed_rotation and x == fixed_x and y == fixed_y:\n\n plt.bar(range(0, len(new_decision_histogram)), new_decision_histogram)\n plt.show()\n"},"avg_line_length":{"kind":"number","value":23.84375,"string":"23.84375"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.7300131062,"string":"0.730013"}}},{"rowIdx":46253,"cells":{"hexsha":{"kind":"string","value":"cf5813108f9b2605e07af83b9ad4c455d9ff95f7"},"size":{"kind":"number","value":269,"string":"269"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/primary/模块/print_tuple.py"},"max_stars_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"python/primary/模块/print_tuple.py"},"max_issues_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/primary/模块/print_tuple.py"},"max_forks_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# Filename: print_tuple.py\n\nage = 22\nmyempty = ()\nprint(len(myempty))\na1 = ('aa',)\nprint(len(a1))\n\na2 = ('abcd')\nprint(len(a2),a2[3])\n\nname = ('Swaroop')\n\nprint ('%s is %d years old' % (name, age))\nprint ('Why is %s playing with that python?' % name)\n"},"avg_line_length":{"kind":"number","value":15.8235294118,"string":"15.823529"},"max_line_length":{"kind":"number","value":52,"string":"52"},"alphanum_fraction":{"kind":"number","value":0.6096654275,"string":"0.609665"}}},{"rowIdx":46254,"cells":{"hexsha":{"kind":"string","value":"d8717a6e1bcee1ad4312e5038dbf93c9db29dff4"},"size":{"kind":"number","value":1439,"string":"1,439"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"kollokationsprofile_daten/10Words/dfcp/w_element/wele_dass/kollokation_Wphrase_pr.py"},"max_stars_repo_name":{"kind":"string","value":"ovanov/Kollokationsprofile-von-DFCP"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4d45410a4f0b1e692f18da965d51d9a8e5eb7a08"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"kollokationsprofile_daten/10Words/dfcp/w_element/wele_dass/kollokation_Wphrase_pr.py"},"max_issues_repo_name":{"kind":"string","value":"ovanov/Kollokationsprofile-von-DFCP"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4d45410a4f0b1e692f18da965d51d9a8e5eb7a08"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"kollokationsprofile_daten/10Words/dfcp/w_element/wele_dass/kollokation_Wphrase_pr.py"},"max_forks_repo_name":{"kind":"string","value":"ovanov/Kollokationsprofile-von-DFCP"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4d45410a4f0b1e692f18da965d51d9a8e5eb7a08"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import re\nfrom typing import Dict, List\n\nimport pandas as pd\nfrom pandas.core.frame import DataFrame\n\n\ndef preprocess_text(filename: str) -> List:\n\n listed_words = []\n with open(filename, 'r') as annis:\n for line in annis:\n if re.search(r\"\\d+\\.\\t\", line) != None:\n\n line = re.sub(r'\\d+\\.\\ttok\\s\\s', \"\", line)\n line = re.sub(r'dass\\s$', \"\", line)\n listed_words.append(line.split())\n print(line.split())\n\n return listed_words\n\n\ndef count_collocations(preprocessed_list: List) -> Dict:\n word_count_dict = {}\n\n for listed_values in preprocessed_list:\n for value in listed_values:\n if value not in word_count_dict.keys():\n word_count_dict[value] = [\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ]\n word_count_dict[value][listed_values.index(value)] += 1\n else:\n word_count_dict[value][listed_values.index(value)] += 1\n\n return word_count_dict\n\n\ndef save_as_csv(collocations: Dict, outfile_name: str) -> DataFrame:\n df = pd.DataFrame.from_dict(collocations, orient='index')\n\n return df.to_csv(outfile_name, sep='\\t')\n\n\ndef main():\n\n preprocessed_text = preprocess_text('./annis_10word_pr.txt')\n collocations = count_collocations(preprocessed_text)\n save_as_csv(collocations, './annis_10word_pr.csv')\n\n\nif __name__ == \"__main__\":\n main()\n"},"avg_line_length":{"kind":"number","value":26.6481481481,"string":"26.648148"},"max_line_length":{"kind":"number","value":71,"string":"71"},"alphanum_fraction":{"kind":"number","value":0.6073662265,"string":"0.607366"}}},{"rowIdx":46255,"cells":{"hexsha":{"kind":"string","value":"2b04b1016a2aba0152c37007fd02c2cd7f107c4b"},"size":{"kind":"number","value":3758,"string":"3,758"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Upload/Gs/GsUploadThread.py"},"max_stars_repo_name":{"kind":"string","value":"smthkissinger/docker-images"},"max_stars_repo_head_hexsha":{"kind":"string","value":"35e868295d04fa780325ada4168381f1e80e8fe4"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":282,"string":"282"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-06-16T14:41:44.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-02T03:43:02.000Z"},"max_issues_repo_path":{"kind":"string","value":"mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Upload/Gs/GsUploadThread.py"},"max_issues_repo_name":{"kind":"string","value":"smthkissinger/docker-images"},"max_issues_repo_head_hexsha":{"kind":"string","value":"35e868295d04fa780325ada4168381f1e80e8fe4"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":146,"string":"146"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-06-16T08:55:45.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2020-09-08T10:37:32.000Z"},"max_forks_repo_path":{"kind":"string","value":"mongodb/mongodb_consistent_backup/official/mongodb_consistent_backup/Upload/Gs/GsUploadThread.py"},"max_forks_repo_name":{"kind":"string","value":"smthkissinger/docker-images"},"max_forks_repo_head_hexsha":{"kind":"string","value":"35e868295d04fa780325ada4168381f1e80e8fe4"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":94,"string":"94"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-06-16T10:49:07.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-28T09:14:03.000Z"},"content":{"kind":"string","value":"import boto\nimport logging\nimport os\n\nfrom mongodb_consistent_backup.Common.Util import file_md5hash\nfrom mongodb_consistent_backup.Errors import OperationError\n\n\nclass GsUploadThread:\n def __init__(self, backup_dir, file_path, gs_path, bucket, project_id, access_key, secret_key, remove_uploaded=False, retries=5):\n self.backup_dir = backup_dir\n self.file_path = file_path\n self.gs_path = gs_path\n self.bucket = bucket\n self.project_id = project_id\n self.access_key = access_key\n self.secret_key = secret_key\n self.remove_uploaded = remove_uploaded\n self.retries = retries\n\n self.path = \"%s/%s\" % (self.bucket, self.gs_path)\n self.meta_data_dir = \"mongodb_consistent_backup-META\"\n self._metadata = None\n\n def configure(self):\n if not boto.config.has_section(\"Credentials\"):\n boto.config.add_section(\"Credentials\")\n boto.config.set(\"Credentials\", \"gs_access_key_id\", self.access_key)\n boto.config.set(\"Credentials\", \"gs_secret_access_key\", self.secret_key)\n if not boto.config.has_section(\"Boto\"):\n boto.config.add_section(\"Boto\")\n boto.config.setbool('Boto', 'https_validate_certificates', True)\n\n def get_uri(self):\n return boto.storage_uri(self.path, 'gs')\n\n def gs_exists(self):\n try:\n self.metadata()\n return True\n except boto.exception.InvalidUriError:\n return False\n\n def metadata(self):\n logging.debug(\"Getting metadata for path: %s\" % self.path)\n if not self._metadata:\n self._metadata = self.get_uri().get_key()\n return self._metadata\n\n def gs_md5hash(self):\n key = self.metadata()\n if hasattr(key, 'etag'):\n return key.etag.strip('\"\\'')\n\n def success(self):\n if self.remove_uploaded and not self.file_path.startswith(os.path.join(self.backup_dir, self.meta_data_dir)):\n logging.debug(\"Removing successfully uploaded file: %s\" % self.file_path)\n os.remove(self.file_path)\n\n def run(self):\n f = None\n try:\n self.configure()\n if self.gs_exists():\n gs_md5hash = self.gs_md5hash()\n if gs_md5hash and file_md5hash(self.file_path) == gs_md5hash:\n logging.debug(\"Path %s already exists with the same checksum (%s), skipping\" % (self.path, self.gs_md5hash()))\n return\n logging.debug(\"Path %s checksum and local checksum differ, re-uploading\" % self.path)\n else:\n logging.debug(\"Path %s does not exist, uploading\" % self.path)\n\n try:\n f = open(self.file_path, 'r')\n uri = self.get_uri()\n retry = 0\n error = None\n while retry < self.retries:\n try:\n logging.info(\"Uploading %s to Google Cloud Storage (attempt %i/%i)\" % (self.path, retry, self.retries))\n uri.new_key().set_contents_from_file(f)\n except Exception, e:\n logging.error(\"Received error for Google Cloud Storage upload of %s: %s\" % (self.path, e))\n error = e\n retry += 1\n continue\n if retry >= self.retries and error:\n raise error\n finally:\n if f:\n f.close()\n self.success()\n except Exception, e:\n logging.error(\"Uploading to Google Cloud Storage failed! Error: %s\" % e)\n raise OperationError(e)\n"},"avg_line_length":{"kind":"number","value":39.1458333333,"string":"39.145833"},"max_line_length":{"kind":"number","value":133,"string":"133"},"alphanum_fraction":{"kind":"number","value":0.5678552422,"string":"0.567855"}}},{"rowIdx":46256,"cells":{"hexsha":{"kind":"string","value":"510824956a099e0817d4a9c182b2014c3bfb0912"},"size":{"kind":"number","value":703,"string":"703"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Hello coding/selection_sort.py"},"max_stars_repo_name":{"kind":"string","value":"wooooooogi/SAlgorithm"},"max_stars_repo_head_hexsha":{"kind":"string","value":"bf76bb721785a52b6abf158077b554b0626ee1f7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Hello coding/selection_sort.py"},"max_issues_repo_name":{"kind":"string","value":"wooooooogi/SAlgorithm"},"max_issues_repo_head_hexsha":{"kind":"string","value":"bf76bb721785a52b6abf158077b554b0626ee1f7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Hello coding/selection_sort.py"},"max_forks_repo_name":{"kind":"string","value":"wooooooogi/SAlgorithm"},"max_forks_repo_head_hexsha":{"kind":"string","value":"bf76bb721785a52b6abf158077b554b0626ee1f7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Coded by Sungwook Kim\n# Date: 2019-07-30\n# Python version: 3.6.5\n# IDE: Spyder 3\n\n# Sort, information = [Name, Date, E-mail]\n\n# Sample code is 2001 year's 메리츠화재 stock information. (Downloaded in KRX)\n\n# Sort by price (When is the highest price in 2001)\n\nimport pandas as pd\nimport numpy as np\nimport os\n# Use pandas library to get csv information.\nData = pd.read_csv(os.getcwd() + \"/selection_sort_sample_code.csv\", encoding=\"ms949\", index_col=False)\nprint(Data)\nData = Data[:-1]\n#print(Data)\n\nsorting_variable = \"종가\"\nsorted_variable = \"년/월/일\"\n\nSV = Data[sorting_variable]\nSV = [SV]\n#SV = np.array()\n#print(SV)\n#for i in SV:\n# for j in SV:\n# if i < J:\n# break\n "},"avg_line_length":{"kind":"number","value":21.96875,"string":"21.96875"},"max_line_length":{"kind":"number","value":102,"string":"102"},"alphanum_fraction":{"kind":"number","value":0.6557610242,"string":"0.655761"}}},{"rowIdx":46257,"cells":{"hexsha":{"kind":"string","value":"64df9e124b4938a55fed62588a4f3ce6a5e14d05"},"size":{"kind":"number","value":463,"string":"463"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/3.prim1d/polysegment.py"},"max_stars_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_stars_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-04-11T14:11:40.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2018-09-12T19:03:36.000Z"},"max_issues_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/3.prim1d/polysegment.py"},"max_issues_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_issues_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"zencad/examples/1.GeomPrim/3.prim1d/polysegment.py"},"max_forks_repo_name":{"kind":"string","value":"Spiritdude/zencad"},"max_forks_repo_head_hexsha":{"kind":"string","value":"4e63b1a6306dd235f4daa2791b10249f7546c95b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n\"\"\"\nZenCad API example: polysegment.py\n\"\"\"\n\nfrom zencad import *\n\npnts = points([(0, 0, 0), (0, 10, 10), (0, 10, 20), (0, -10, 20), (0, -10, 10)])\n\nm0 = polysegment(pnts)\nm1 = polysegment(pnts, closed=True)\nm2 = polysegment(pnts, closed=True).fill()\nm3 = polysegment(pnts + [(0, 0, 0)])\nm4 = polysegment(pnts + [(0, 0, 0)]).fill()\n\ndisp(m0)\ndisp(m1.left(20))\ndisp(m2.left(40))\ndisp(m3.left(20).forw(30))\ndisp(m4.left(40).forw(30))\n\nshow()\n"},"avg_line_length":{"kind":"number","value":19.2916666667,"string":"19.291667"},"max_line_length":{"kind":"number","value":80,"string":"80"},"alphanum_fraction":{"kind":"number","value":0.6112311015,"string":"0.611231"}}},{"rowIdx":46258,"cells":{"hexsha":{"kind":"string","value":"8559713c3ce95f2a2f0024ef72dddcb77d47aa41"},"size":{"kind":"number","value":4596,"string":"4,596"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"datastories_semeval2017_task4/utilities_nn/data_loader.py"},"max_stars_repo_name":{"kind":"string","value":"florianfricke/Bachelor_Thesis_Sentiment_Analyse"},"max_stars_repo_head_hexsha":{"kind":"string","value":"aa1fa95cfbc13115ee60baaf79eab0d1940998ab"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-06-04T13:20:45.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-06-04T13:20:45.000Z"},"max_issues_repo_path":{"kind":"string","value":"datastories_semeval2017_task4/utilities_nn/data_loader.py"},"max_issues_repo_name":{"kind":"string","value":"florianfricke/Bachelor_Thesis_Sentiment_Analyse"},"max_issues_repo_head_hexsha":{"kind":"string","value":"aa1fa95cfbc13115ee60baaf79eab0d1940998ab"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":6,"string":"6"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-06-03T18:45:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-10T01:51:03.000Z"},"max_forks_repo_path":{"kind":"string","value":"datastories_semeval2017_task4/utilities_nn/data_loader.py"},"max_forks_repo_name":{"kind":"string","value":"florianfricke/Bachelor_Thesis_Sentiment_Analyse"},"max_forks_repo_head_hexsha":{"kind":"string","value":"aa1fa95cfbc13115ee60baaf79eab0d1940998ab"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nCreated by Christos Baziotis.\n\"\"\"\nimport random\nimport pickle\nimport numpy\n\nfrom ekphrasis.classes.preprocessor import TextPreProcessor\nfrom ekphrasis.classes.tokenizer import SocialTokenizer\nfrom ekphrasis.dicts.emoticons import emoticons\nfrom kutilities.helpers.data_preparation import print_dataset_statistics, \\\n labels_to_categories, categories_to_onehot\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom embeddings.WordVectorsManager import WordVectorsManager\nfrom modules.CustomPreProcessor import CustomPreProcessor\nfrom modules.EmbeddingsExtractor import EmbeddingsExtractor\n\ndef prepare_dataset(X, y, pipeline, y_one_hot=True, y_as_is=False):\n X = pipeline.fit_transform(X)\n if y_as_is:\n try:\n return X, numpy.asarray(y, dtype=float)\n except:\n return X, y\n\n # 1 - Labels (positive) to categories (integer)\n y_cat = labels_to_categories(y)\n\n if y_one_hot:\n # 2 - Labels to one-hot vectors\n return X, categories_to_onehot(y_cat)\n\n return X, y_cat\n\ndef get_embeddings(corpus, dim):\n vectors = WordVectorsManager(corpus, dim).read()\n vocab_size = len(vectors)\n print('Loaded %s word vectors.' % vocab_size)\n wv_map = {}\n pos = 0\n # +1 for zero padding token and +1 for \n emb_matrix = numpy.ndarray((vocab_size + 2, dim), dtype='float32')\n for i, (word, vector) in enumerate(vectors.items()):\n pos = i + 1\n wv_map[word] = pos\n emb_matrix[pos] = vector\n\n pos += 1\n wv_map[\"\"] = pos\n emb_matrix[pos] = numpy.random.uniform(low=-0.05, high=0.05, size=dim)\n\n return emb_matrix, wv_map\n\ndef prepare_text_only_dataset(X, pipeline):\n X = pipeline.fit_transform(X)\n return X\n\nclass Task4Loader:\n def __init__(self, word_indices, text_lengths, loading_data=True, datafolder=\"\", preprocess_typ=\"ekphrasis\", **kwargs):\n self.word_indices = word_indices\n self.y_one_hot = kwargs.get(\"y_one_hot\", True)\n\n self.pipeline = Pipeline([\n ('ext', EmbeddingsExtractor(word_indices=word_indices,\n max_lengths=text_lengths,\n add_tokens=(True),\n unk_policy=\"random\"))])\n if(loading_data):\n print(\"Loading data...\")\n self.X_train = pickle.load(open(\n \"{}X_train_{}.pickle\".format(datafolder, preprocess_typ), \"rb\"))\n self.X_test = pickle.load(open(\n \"{}X_test_{}.pickle\".format(datafolder, preprocess_typ), \"rb\"))\n self.y_train = pickle.load(open(\n \"{}y_train_{}.pickle\".format(datafolder, preprocess_typ), \"rb\"))\n self.y_test = pickle.load(open(\n \"{}y_test_{}.pickle\".format(datafolder, preprocess_typ), \"rb\"))\n\n print(\"-------------------\\ntraining set stats\\n-------------------\")\n print_dataset_statistics(self.y_train)\n print(\"-------------------\")\n\n def load_train_val_test(self):\n X_val, X_test, y_val, y_test = train_test_split(self.X_test, self.y_test,\n test_size=0.5,\n stratify=self.y_test,\n random_state=42)\n print(\"\\nPreparing training set...\")\n training = prepare_dataset(self.X_train, self.y_train, self.pipeline,\n self.y_one_hot)\n print(\"\\nPreparing validation set...\")\n validation = prepare_dataset(X_val, y_val, self.pipeline,\n self.y_one_hot)\n print(\"\\nPreparing test set...\")\n testing = prepare_dataset(X_test, y_test, self.pipeline,\n self.y_one_hot)\n return training, validation, testing\n\n def load_final(self): \n print(\"\\nPreparing training set...\")\n training = prepare_dataset(self.X_train, self.y_train, self.pipeline,\n self.y_one_hot) \n print(\"\\nPreparing test set...\")\n testing = prepare_dataset(self.X_test, self.y_test, self.pipeline,\n self.y_one_hot)\n return training, testing\n \n def decode_data_to_embeddings(self, X_data, y_data):\n embedding_data = prepare_dataset(X_data, y_data, self.pipeline,\n self.y_one_hot)\n return embedding_data\n"},"avg_line_length":{"kind":"number","value":40.6725663717,"string":"40.672566"},"max_line_length":{"kind":"number","value":123,"string":"123"},"alphanum_fraction":{"kind":"number","value":0.5881201044,"string":"0.58812"}}},{"rowIdx":46259,"cells":{"hexsha":{"kind":"string","value":"740ff96b7d1bf1f591bf1f5059c0aa00bbee743c"},"size":{"kind":"number","value":449,"string":"449"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"api/models/library.py"},"max_stars_repo_name":{"kind":"string","value":"aidun/seite50"},"max_stars_repo_head_hexsha":{"kind":"string","value":"761a8e76f9e4473f70a8705dce169d61bf660267"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"api/models/library.py"},"max_issues_repo_name":{"kind":"string","value":"aidun/seite50"},"max_issues_repo_head_hexsha":{"kind":"string","value":"761a8e76f9e4473f70a8705dce169d61bf660267"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-05-11T18:26:18.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2018-05-12T18:49:38.000Z"},"max_forks_repo_path":{"kind":"string","value":"api/models/library.py"},"max_forks_repo_name":{"kind":"string","value":"aidun/seite50"},"max_forks_repo_head_hexsha":{"kind":"string","value":"761a8e76f9e4473f70a8705dce169d61bf660267"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2018-10-10T19:52:17.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2018-10-10T19:52:17.000Z"},"content":{"kind":"string","value":"from django.db import models\nfrom api.models.user import User\n\n\n# Create your models here.\nclass Library(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=255, blank=False, unique=True)\n\n owner = models.ManyToManyField(User, related_name=\"organized_by\", blank=False)\n\n # Metadata\n date_created = models.DateTimeField(auto_now_add=True)\n date_modified = models.DateTimeField(auto_now=True)\n"},"avg_line_length":{"kind":"number","value":29.9333333333,"string":"29.933333"},"max_line_length":{"kind":"number","value":82,"string":"82"},"alphanum_fraction":{"kind":"number","value":0.7594654788,"string":"0.759465"}}},{"rowIdx":46260,"cells":{"hexsha":{"kind":"string","value":"744976428a3f9d0b93a5bf2aee0bfc9497a2c2d3"},"size":{"kind":"number","value":661,"string":"661"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Codeforces_problems/Merge It A/solution.py"},"max_stars_repo_name":{"kind":"string","value":"KAHund/CompetitiveCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6ed211a2f795569f5c2f18c2f660520d99d41ca0"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":165,"string":"165"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-10-03T08:01:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T02:42:08.000Z"},"max_issues_repo_path":{"kind":"string","value":"Codeforces_problems/Merge It A/solution.py"},"max_issues_repo_name":{"kind":"string","value":"KAHund/CompetitiveCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6ed211a2f795569f5c2f18c2f660520d99d41ca0"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":383,"string":"383"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-10-03T07:39:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-20T07:06:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"Codeforces_problems/Merge It A/solution.py"},"max_forks_repo_name":{"kind":"string","value":"KAHund/CompetitiveCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6ed211a2f795569f5c2f18c2f660520d99d41ca0"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":380,"string":"380"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-03T08:05:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-19T06:56:59.000Z"},"content":{"kind":"string","value":"# We can check the divisibility by 3 by taking the sum of digits.\n# If remainder is 0, we consider it as it is. If remainder is 1 or 2 we can combine them greedily.\n# We can then combine 3 numbers each with remainder 1 or 2.\ndef sum_of(n):\n\ts = 0\n\tnum = n\n\twhile(num>0):\n\t\ts+=num%10\n\t\tnum = num//10\n\treturn s\n\nfor i in range(int(input())):\n\tn = int(input())\n\tl = list(map(int, input().split()))\n\tx_0 = 0\n\tx_1 = 0\n\tx_2 = 0\n\tfor i in range(n):\n\t\ttemp = sum_of(l[i])\n\t\tif(temp%3 == 0):\n\t\t\tx_0 += 1\n\t\telif((temp-1)%3 == 0):\n\t\t\tx_1 += 1\n\t\telse:\n\t\t\tx_2+=1\n\ttemp = min(x_1, x_2)\n\tcount = x_0 + temp\n\tx_1-=temp\n\tx_2-=temp\n\tcount += x_1//3\n\tcount += x_2//3\n\tprint(count)"},"avg_line_length":{"kind":"number","value":20.65625,"string":"20.65625"},"max_line_length":{"kind":"number","value":98,"string":"98"},"alphanum_fraction":{"kind":"number","value":0.6066565809,"string":"0.606657"}}},{"rowIdx":46261,"cells":{"hexsha":{"kind":"string","value":"7af4d8f7d4503155cd03cb13845759a7cfd88c78"},"size":{"kind":"number","value":8705,"string":"8,705"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"backend/apps/ineedstudent/views.py"},"max_stars_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_stars_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"backend/apps/ineedstudent/views.py"},"max_issues_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_issues_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"backend/apps/ineedstudent/views.py"},"max_forks_repo_name":{"kind":"string","value":"n-hackert/match4healthcare"},"max_forks_repo_head_hexsha":{"kind":"string","value":"761248c27b49e568c545c643a72eac9a040649d7"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from django.shortcuts import render\nfrom django.template import loader\nfrom django.http import HttpResponse\n\nfrom apps.mapview.utils import plzs\nfrom apps.iamstudent.models import Student\nfrom apps.ineedstudent.models import Hospital\nfrom apps.ineedstudent.forms import HospitalForm, EmailToHospitalForm\nfrom django.utils.translation import gettext_lazy as _\n\nfrom django.shortcuts import render\n\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom apps.mapview.utils import plzs, get_plzs_close_to, haversine\nimport django_tables2 as tables\nfrom django_tables2 import TemplateColumn\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom apps.accounts.decorator import student_required, hospital_required\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom functools import lru_cache\nfrom apps.mapview.views import get_ttl_hash\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\nfrom apps.iamstudent.models import EmailToHospital\nfrom django.contrib import messages\nfrom datetime import datetime\nimport time\nfrom apps.accounts.utils import send_password_set_email\nfrom apps.ineedstudent.forms import HospitalFormZustimmung\n\nfrom django.views.decorators.gzip import gzip_page\n\n\nclass StudentTable(tables.Table):\n info = TemplateColumn(template_name='info_button.html')\n checkbox = TemplateColumn(template_name='checkbox_studenttable.html')\n\n class Meta:\n model = Student\n template_name = \"django_tables2/bootstrap4.html\"\n exclude = ['uuid','registration_date','id']\n fields = ['user']\n\n\n# Should be safe against BREACH attack because we don't have user input in reponse body\n@gzip_page\ndef hospital_overview(request):\n locations_and_number = prepare_hospitals(ttl_hash=get_ttl_hash(60))\n template = loader.get_template('map_hospitals.html')\n context = {\n 'locations': list(locations_and_number.values()),\n }\n return HttpResponse(template.render(context, request))\n\n@lru_cache(maxsize=1)\ndef prepare_hospitals(ttl_hash=None):\n hospitals = Hospital.objects.filter(user__validated_email=True, is_approved=True, appears_in_map=True)\n locations_and_number = {}\n for hospital in hospitals:\n if len(hospital.sonstige_infos) != 0:\n cc = hospital.countrycode\n plz = hospital.plz\n key = cc + \"_\" + plz\n if key in locations_and_number:\n locations_and_number[key][\"count\"] += 1\n locations_and_number[key][\"uuid\"] = None\n else:\n lat, lon, ort = plzs[cc][plz]\n locations_and_number[key] = {\n \"uuid\": hospital.uuid,\n \"countrycode\": cc,\n \"plz\": plz,\n \"count\": 1,\n \"lat\": lat,\n \"lon\": lon,\n \"ort\": ort\n }\n return locations_and_number\n\n@login_required\ndef hospital_list(request, countrycode, plz):\n\n if countrycode not in plzs or plz not in plzs[countrycode]:\n # TODO: niceren error werfen\n return HttpResponse(\"Postleitzahl: \" + plz + \" ist keine valide Postleitzahl in \" + countrycode)\n\n lat, lon, ort = plzs[countrycode][plz]\n\n table = HospitalTable(Hospital.objects.filter(user__validated_email=True, is_approved=True, plz=plz, appears_in_map=True))\n table.paginate(page=request.GET.get(\"page\", 1), per_page=25)\n context = {\n 'countrycode': countrycode,\n 'plz': plz,\n 'ort': ort,\n 'table': table}\n\n return render(request, \"list_hospitals_by_plz.html\", context)\n\n@login_required\n@hospital_required\ndef zustimmung(request):\n user = request.user\n h = Hospital.objects.get(user=user)\n if request.method == 'POST':\n form_info = HospitalFormZustimmung(request.POST, instance=h)\n\n if form_info.is_valid():\n h.save()\n return HttpResponseRedirect(\"/accounts/login_redirect\")\n\n else:\n form_info = HospitalFormZustimmung()\n return render(request, 'zustimmung.html', {'form_info': form_info })\n\nclass HospitalTable(tables.Table):\n info = TemplateColumn(template_name='info_button.html')\n\n class Meta:\n model = Hospital\n template_name = \"django_tables2/bootstrap4.html\"\n fields = ['firmenname','ansprechpartner']\n exclude = ['uuid','registration_date','id']\n\nclass ApprovalHospitalTable(HospitalTable):\n info = TemplateColumn(template_name='info_button.html')\n status = TemplateColumn(template_name='approval_button.html')\n delete = TemplateColumn(template_name='delete_button.html')\n class Meta:\n model = Hospital\n template_name = \"django_tables2/bootstrap4.html\"\n fields = ['firmenname','ansprechpartner','user','telefon','plz','user__validated_email', 'approval_date', 'approved_by']\n exclude = ['uuid','id', 'registration_date']\n\n@login_required\ndef hospital_view(request,uuid):\n h = Hospital.objects.filter(uuid=uuid)[0]\n initial = {\n 'subject': _('Neues Hilfsangebot'),\n 'message': _('Hallo, ich habe ihr Gesuche auf der Plattform match4healthcare gesehen und bin für die Stelle qualifiziert.\\nIch bin...\\nIch möchte helfen in dem...')\n }\n\n email_form = EmailToHospitalForm(initial=initial)\n\n if request.POST and request.user.is_student and request.user.validated_email:\n s = request.user.student\n\n email_form = EmailToHospitalForm(request.POST, initial=initial)\n\n if email_form.is_valid():\n start_text = _(\"Hallo %s,\\n\\nSie haben über unsere Plattform match4healthcare von %s (%s) eine Antwort auf Ihre Anzeige bekommen.\\n\"\n \"Falls Sie keine Anfragen mehr bekommen möchten, deaktivieren Sie Ihre \"\n \"Anzeige im Profil online.\\n\\n\" % (h.ansprechpartner, s.name_first, request.user.email))\n message = start_text + \\\n \"===============================================\\n\\n\" + \\\n email_form.cleaned_data['message'] + \\\n \"\\n\\n===============================================\\n\\n\" + \\\n \"Mit freundlichen Grüßen,\\nIhr match4healthcare Team\"\n emailtohospital = EmailToHospital.objects.create(student=s,hospital=h,message=email_form.cleaned_data['message'],subject=email_form.cleaned_data['subject'])\n\n\n email = EmailMessage(\n subject='[match4healthcare] ' + email_form.cleaned_data['subject'],\n body=message,\n from_email=settings.NOREPLY_MAIL,\n to=[h.user.email]\n )\n email.send()\n emailtohospital.send_date = datetime.now()\n emailtohospital.save()\n\n return render(request,'hospital_contacted.html')\n\n lat1, lon1, ort1 = plzs[h.countrycode][h.plz]\n\n context = {\n 'hospital': h,\n 'uuid': h.uuid,\n 'ort': ort1,\n 'hospital': h,\n 'mail': h.user.username,\n }\n\n if request.user.is_student:\n s = Student.objects.get(user=request.user)\n lat2, lon2, context[\"student_ort\"] = plzs[s.countrycode][s.plz]\n context[\"distance\"] = int(haversine(lon1, lat1, lon2, lat2))\n context[\"plz_student\"] = s.plz\n\n\n context['email_form'] = email_form\n\n return render(request, 'hospital_view.html', context)\n\nfrom .forms import PostingForm\nfrom .tables import ContactedTable\nfrom django.db import models\n\n@login_required\n@hospital_required\ndef change_posting(request):\n if request.method == 'POST':\n anzeige_form = PostingForm(request.POST,instance=request.user.hospital)\n\n if anzeige_form.is_valid():\n anzeige_form.save()\n messages.add_message(request, messages.INFO,_('Deine Anzeige wurde erfolgreich aktualisiert.'))\n\n else:\n anzeige_form = PostingForm(instance=request.user.hospital)\n\n context = {\n 'anzeige_form': anzeige_form\n }\n return render(request, 'change_posting.html', context)\n\n\n@login_required\n@hospital_required\ndef hospital_dashboard(request):\n\n # tabelle kontaktierter Studis\n values = ['student','registration_date','message','subject']\n qs = request.user.hospital.emailtosend_set.all().values(*values,is_activated=models.F('student__is_activated' ))\n kontaktiert_table = ContactedTable(qs)\n\n context = {\n 'already_contacted': len(qs) > 0,\n 'has_posting': request.user.hospital.appears_in_map,\n 'posting_text': request.user.hospital.sonstige_infos,\n 'kontaktiert_table' : kontaktiert_table\n }\n return render(request, 'hospital_dashboard.html', context)\n"},"avg_line_length":{"kind":"number","value":36.7299578059,"string":"36.729958"},"max_line_length":{"kind":"number","value":172,"string":"172"},"alphanum_fraction":{"kind":"number","value":0.6734060885,"string":"0.673406"}}},{"rowIdx":46262,"cells":{"hexsha":{"kind":"string","value":"3b12472f1b51fd437b3679375bdf34ea708ea5eb"},"size":{"kind":"number","value":501,"string":"501"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/PrintRaw/PrintRaw_test.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/PrintRaw/PrintRaw_test.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/PrintRaw/PrintRaw_test.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"import demistomock as demisto\n\n\ndef test_main(mocker):\n from PrintRaw import main\n\n # test custom fields with short names\n mocker.patch.object(demisto, 'args', return_value={\n 'value': '\\tthat was a tab \\n\\n\\nthree newlines\\tafter another tab\\n'\n })\n mocker.patch.object(demisto, 'results')\n main()\n assert demisto.results.call_count == 1\n results = demisto.results.call_args[0][0]\n assert results == r\"'\\tthat was a tab \\n\\n\\nthree newlines\\tafter another tab\\n'\"\n"},"avg_line_length":{"kind":"number","value":31.3125,"string":"31.3125"},"max_line_length":{"kind":"number","value":86,"string":"86"},"alphanum_fraction":{"kind":"number","value":0.6846307385,"string":"0.684631"}}},{"rowIdx":46263,"cells":{"hexsha":{"kind":"string","value":"797dfd4217090df93567748343aab39dd6b56778"},"size":{"kind":"number","value":18619,"string":"18,619"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"event_detector/gttm/cluster_detection/cluster_handler.py"},"max_stars_repo_name":{"kind":"string","value":"MahdiFarnaghi/gtm"},"max_stars_repo_head_hexsha":{"kind":"string","value":"adbec372786262607291f901a444a0ebe9e98b48"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"event_detector/gttm/cluster_detection/cluster_handler.py"},"max_issues_repo_name":{"kind":"string","value":"MahdiFarnaghi/gtm"},"max_issues_repo_head_hexsha":{"kind":"string","value":"adbec372786262607291f901a444a0ebe9e98b48"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"event_detector/gttm/cluster_detection/cluster_handler.py"},"max_forks_repo_name":{"kind":"string","value":"MahdiFarnaghi/gtm"},"max_forks_repo_head_hexsha":{"kind":"string","value":"adbec372786262607291f901a444a0ebe9e98b48"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import os\nfrom datetime import datetime\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport sys\nfrom sklearn.cluster import DBSCAN\nimport geopandas as gp\nimport pandas as pd\nimport numpy as np\n# from sklearn.cluster.optics_ import OPTICS\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom gttm.nlp.vectorize import VectorizerUtil\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics import euclidean_distances, silhouette_score, pairwise_distances\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom gttm.mathematics import math_func\nfrom gttm.ioie.geodata import add_geometry\n\n\nclass ClusterHandler:\n\n def __init__(self, alpha, beta, gama, temporal_extent: float, spatial_extent: float, min_cluster_size=10,\n dbscan_eps=4.5, metric='default', min_textual_distance=0.5):\n self.labels = None\n self.label_codes = None\n self.space = None\n self.reachability = None\n self.silhouette_coefficient = None\n self.alpha = alpha\n self.beta = beta\n self.gama = gama\n self.temporal_extent = temporal_extent\n self.temporal_extent_boundary = 3 * 60 * 60 # seconds\n self.spatial_extent = spatial_extent\n self.spatial_extent_divide_by_factor = 20\n self.spatial_extent_boundary = 40000 # meters based on kNN graph\n self.min_cluster_size = min_cluster_size\n self.dbscan_eps = dbscan_eps\n self.min_textual_distance = min_textual_distance\n # self.defaut_metric = self.metric_xyt_euc_times_c_cos_boundary_on_c\n self.defaut_metric = self.metric_07_weighted_sum_xy_euc_t_euc_c_cos_normalized_with_extents_boundary_on_c\n if metric != 'default':\n if metric == '01':\n self.defaut_metric = self.metric_01_c_cos\n if metric == '02':\n self.defaut_metric = self.metric_02_xy_euc\n if metric == '03':\n self.defaut_metric = self.metric_03_t_euc\n if metric == '04':\n self.defaut_metric = self.metric_04_norm1_xy_euc_t_euc_c_cos\n if metric == '05':\n self.defaut_metric = self.metric_05_weighted_sum_xy_euc_t_euc_c_cos\n if metric == '06':\n self.defaut_metric = self.metric_06_weighted_sum_xy_euc_t_euc_c_cos_normalized_with_extents\n if metric == '07':\n self.defaut_metric = self.metric_07_weighted_sum_xy_euc_t_euc_c_cos_normalized_with_extents_boundary_on_c\n if metric == '08':\n self.defaut_metric = self.metric_08_xyt_euc_times_c_cos\n if metric == '09':\n self.defaut_metric = self.metric_09_xyt_euc_times_c_cos_boundary_on_c\n\n def clear(self):\n self.labels = None\n self.label_codes = None\n self.space = None\n self.reachability = None\n self.silhouette_coefficient = None\n\n def cluster_optics(self, gdf: gp.GeoDataFrame, vect_method, lang, verbose=False):\n \"\"\"\n\n :type gdf: object\n \"\"\"\n if verbose:\n print('\\tStart cluster_detection (optics)...')\n s_time = datetime.now()\n\n self.space = None\n self.reachability = None\n\n xytc = self.generate_xy_projected_t_c_matrix(gdf, vect_method, lang)\n\n # metric = self.weighted_xy_euc_t_euc_c_cos_with_boundaries\n clust = OPTICS(metric=self.defaut_metric, min_cluster_size=self.min_cluster_size)\n\n clust.fit(xytc)\n\n self.labels = clust.labels_\n # labels_ordered = clust.labels_[clust.ordering_]\n self.label_codes = np.unique(self.labels)\n if verbose:\n print('\\t\\tNumber of records: {}'.format(len(xytc)))\n print('\\t\\tCluster labels: {}'.format(str(self.label_codes)))\n self.space = np.arange(len(ClusterHandler.generate_xy_matrix(gdf)))\n self.reachability = clust.reachability_[clust.ordering_]\n\n self.silhouette_coefficient = ClusteringQuality.silhouette_coefficient(xytc, self.labels, self.label_codes,\n self.defaut_metric)\n\n dur = datetime.now() - s_time\n if verbose:\n print('\\tClustering was finished ({} seconds).'.format(dur.seconds))\n return self.labels, self.label_codes, self.silhouette_coefficient\n\n def cluster_dbscan(self, gdf: gp.GeoDataFrame, vect_method, lang, verbose=False):\n if verbose:\n print('\\tStart cluster_detection (dbscan)...')\n s_time = datetime.now()\n\n xytc = self.generate_xy_projected_t_c_matrix(gdf, vect_method, lang)\n\n clust = DBSCAN(metric=self.defaut_metric, eps=self.dbscan_eps, min_samples=self.min_cluster_size)\n clust.fit(xytc)\n\n self.labels = clust.labels_\n self.label_codes = np.unique(self.labels)\n self.silhouette_coefficient = ClusteringQuality.silhouette_coefficient(xytc, self.labels, self.label_codes,\n self.defaut_metric)\n\n dur = datetime.now() - s_time\n if verbose:\n print('\\tClustering was finished ({} seconds).'.format(dur.seconds))\n return self.labels, self.label_codes, self.silhouette_coefficient\n\n def generate_kNN_plot(self, gdf: gp.GeoDataFrame, vect_method, lang, file_path):\n print('\\tGenerating kNN plot started (at {}) ...'.format(datetime.now().strftime(\"%Y%m%d-%H%M\")))\n print('vect_method: {}, metric: {}'.format(vect_method, str(self.defaut_metric)))\n s_time = datetime.now()\n xytc = self.generate_xy_projected_t_c_matrix(gdf, vect_method, lang)\n\n nbrs = NearestNeighbors(n_neighbors=self.min_cluster_size, metric=self.defaut_metric).fit(xytc)\n distances, indices = nbrs.kneighbors(xytc)\n\n def get_col(arr, col):\n return map(lambda x: x[col], arr)\n\n dist_col = list(get_col(distances, self.min_cluster_size - 1))\n dist_col_sorted = sorted(dist_col)\n num_of_points = len(dist_col_sorted)\n import matplotlib.pyplot as plt\n plt.plot(list(range(1, num_of_points + 1)), dist_col_sorted)\n plt.ylabel('Distance (spatial, temporal and textual)')\n plt.savefig(file_path, dpi=300)\n plt.close()\n dur = datetime.now() - s_time\n print('\\tGenerating kNN plot finished ({} seconds).'.format(dur.seconds))\n pass\n\n @staticmethod\n def generate_xy_projected_t_c_matrix(gdf, vect_method, lang):\n x = np.asarray(gdf.geometry.x)[:, np.newaxis]\n y = np.asarray(gdf.geometry.y)[:, np.newaxis]\n\n t = np.asarray(gdf[['t']])\n\n c = np.asarray(gdf['c'])\n c_vect_dense = None\n\n if vect_method == 'tfidf':\n c_vect_dense = VectorizerUtil.vectorize_tfidf(c, lang)\n elif vect_method == 'bow':\n c_vect_dense = VectorizerUtil.vectorize_count(c, lang)\n elif vect_method == 'w2v':\n c_vect_dense = VectorizerUtil.vectorize_word2vec(c, lang)\n elif vect_method == 'fasttext':\n c_vect_dense = VectorizerUtil.vectorize_fasttext(c, lang)\n elif vect_method == 'glove':\n c_vect_dense = VectorizerUtil.vectorize_glove(c, lang)\n\n xytc = np.concatenate((x, y, t, c_vect_dense), axis=1) # , c[:, np.newaxis]\n return xytc\n\n @staticmethod\n def metric_01_c_cos(a, b):\n try:\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n return c_distance\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n @staticmethod\n def metric_02_xy_euc(a, b):\n try:\n xy_distance = euclidean_distances(a[np.newaxis, 0:2], b[np.newaxis, 0:2])\n return xy_distance\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n @staticmethod\n def metric_03_t_euc(a, b):\n try:\n t_distance = euclidean_distances(a[np.newaxis, 2:3], b[np.newaxis, 2:3])\n return t_distance\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n def metric_04_norm1_xy_euc_t_euc_c_cos(self, a, b):\n xy_distance = euclidean_distances(a[np.newaxis, 0:2], b[np.newaxis, 0:2])\n t_distance = euclidean_distances(a[np.newaxis, 2:3], b[np.newaxis, 2:3])\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:4], b[np.newaxis, 3:4]))\n dist = self.alpha * xy_distance + self.beta * t_distance + self.gama * c_distance\n\n return dist\n\n def metric_05_weighted_sum_xy_euc_t_euc_c_cos(self, a, b):\n try:\n xy_distance = euclidean_distances(a[np.newaxis, 0:2], b[np.newaxis, 0:2])\n xy_distance_norm = math_func.linear(xy_distance, 3000)\n t_distance = euclidean_distances(a[np.newaxis, 2:3], b[np.newaxis, 2:3])\n t_distance_norm = math_func.linear(t_distance, 8 * 60 * 60) # x0 in second\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n dist = self.alpha * xy_distance_norm + self.beta * t_distance_norm + self.gama * c_distance\n return dist\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n def metric_06_weighted_sum_xy_euc_t_euc_c_cos_normalized_with_extents(self, a, b):\n try:\n xy_distance = euclidean_distances(a[np.newaxis, 0:2], b[np.newaxis, 0:2])\n xy_distance_norm = math_func.linear(xy_distance, self.spatial_extent / self.spatial_extent_divide_by_factor)\n\n t_distance = euclidean_distances(a[np.newaxis, 2:3], b[np.newaxis, 2:3])\n t_distance_norm = math_func.linear(t_distance, self.temporal_extent * 60 * 60) # x0 in second\n\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n\n return self.alpha * xy_distance_norm + self.beta * t_distance_norm + self.gama * c_distance\n\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n def metric_07_weighted_sum_xy_euc_t_euc_c_cos_normalized_with_extents_boundary_on_c(self, a, b):\n try:\n xy_distance = euclidean_distances(a[np.newaxis, 0:2], b[np.newaxis, 0:2])\n xy_distance_norm = math_func.linear(xy_distance, self.spatial_extent / self.spatial_extent_divide_by_factor)\n\n t_distance = euclidean_distances(a[np.newaxis, 2:3], b[np.newaxis, 2:3])\n t_distance_norm = math_func.linear(t_distance, self.temporal_extent * 60 * 60) # x0 in second\n\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n if c_distance > self.min_textual_distance:\n return sys.maxsize\n\n return self.alpha * xy_distance_norm + self.beta * t_distance_norm + self.gama * c_distance\n\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n @staticmethod\n def metric_08_xyt_euc_times_c_cos(a, b):\n try:\n xyt_distance = euclidean_distances(a[np.newaxis, 0:3], b[np.newaxis, 0:3])\n\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n\n return xyt_distance * c_distance\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n def metric_09_xyt_euc_times_c_cos_boundary_on_c(self, a, b):\n try:\n xyt_distance = euclidean_distances(a[np.newaxis, 0:3], b[np.newaxis, 0:3])\n\n c_distance = np.absolute(cosine_distances(a[np.newaxis, 3:], b[np.newaxis, 3:]))\n if c_distance > self.min_textual_distance:\n return sys.maxsize\n\n return xyt_distance * c_distance\n except Exception as ex:\n print(ex)\n return sys.maxsize\n\n @staticmethod\n def calculate_distance(gdf, norm):\n xy = np.asarray(\n gdf[['x', 'y']] * 10000) # pd.merge(gdf[geom_col].x, gdf[geom_col].y, left_index=True, right_index=True)\n spatial_distance = euclidean_distances(xy)\n norm_spatial_distance = preprocessing.normalize(spatial_distance, norm=norm)\n t = np.asarray(gdf[['t']])\n temporal_distance = euclidean_distances(t)\n norm_temporal_distance = preprocessing.normalize(temporal_distance, norm=norm)\n c = np.asarray(gdf['c'])\n vectorizer = TfidfVectorizer()\n c_vect = vectorizer.fit_transform(c)\n content_distance = np.absolute(cosine_distances(c_vect))\n norm_content_distance = preprocessing.normalize(content_distance, norm=norm)\n distances = alpha * norm_spatial_distance + beta * norm_content_distance + gama * norm_temporal_distance\n return distances\n\n @staticmethod\n def link_clusters(gdf_new, gdf_new_label, gdf_old, global_lable_codes, linking_coef=.8, verbose=False):\n if verbose:\n print('\\tStart linking clusters ...')\n s_time = datetime.now()\n\n gdf_new['l'] = gdf_new_label\n if gdf_old is None:\n gdf_new['label'] = gdf_new_label\n [global_lable_codes.append(x) for x in np.unique(gdf_new_label)]\n changed_labels = gdf_new_label\n else:\n # gdf_new['label'] = -1\n changed_labels = - np.ones(gdf_new_label.shape)\n # np.intersect1d(n, o).count()\n # print(\"\\t\\tNumber of common tweets: {}\".format(len(np.intersect1d(gdf_new.id.values, gdf_old.id.values))))\n inter_over_union = len(np.intersect1d(gdf_new.id.values, gdf_old.id.values)) / len(\n np.union1d(gdf_new.id.values, gdf_old.id.values))\n # print(\"\\t\\tNumber of common tweets over all tweets: {}\".format(inter_over_union))\n # print(\"\\t\\told cluster code: {}\".format(gdf_new[gdf_new.l >= 0].l.unique()))\n # print(\"\\t\\tnew cluster code: {}\".format(gdf_old[gdf_old.l >= 0].l.unique()))\n # np.intersect1d(gdf_new[gdf_new.l == n], gdf_old[gdf_old.l == o]).count()\n for n in gdf_new[gdf_new.l >= 0].l.unique():\n max_rel_strength = 0\n max_rel_strength_label = -1\n for o in gdf_old[gdf_old.label >= 0].label.unique():\n rel_strength = len(\n np.intersect1d(gdf_new[gdf_new.l == n].id.values, gdf_old[gdf_old.label == o].id.values)) / len(\n np.union1d(gdf_new[gdf_new.l == n].id.values, gdf_old[gdf_old.label == o].id.values))\n if rel_strength > max_rel_strength:\n max_rel_strength = rel_strength\n max_rel_strength_label = o\n if max_rel_strength > (inter_over_union * linking_coef):\n # gdf_new[gdf_new.l == n].label = o\n changed_labels[gdf_new_label == n] = max_rel_strength_label\n else:\n new_label = np.max(global_lable_codes) + 1\n # gdf_new[gdf_new.l == n].label = new_label\n changed_labels[gdf_new_label == n] = new_label\n global_lable_codes.append(new_label)\n\n gdf_new['label'] = changed_labels\n\n # intersect = [\n # [[n, o, len(np.intersect1d(gdf_new[gdf_new.l == n].id.values, gdf_old[gdf_old.l == o].id.values)) / len(\n # np.union1d(gdf_new[gdf_new.l == n].id.values, gdf_old[gdf_old.l == o].id.values))] for o in\n # gdf_old[gdf_old.l >= 0].l.unique()] for n in gdf_new[gdf_new.l >= 0].l.unique()]\n # for i_n in range(len(intersect)):\n # idx = np.argmax(intersect[i_n])\n # if intersect[i_n][idx] >= inter_over_union * linking_coef:\n # gdf_new[]\n # pprint(intersect)\n changed_label_code = np.unique(changed_labels)\n\n dur = datetime.now() - s_time\n if verbose:\n print('\\tLinking clusters was finished ({} seconds).'.format(dur.seconds))\n return changed_labels, changed_label_code\n\n @staticmethod\n def generate_xy_matrix(gdf):\n xy = np.asarray(\n gdf[['x', 'y']] * 10000) # pd.merge(gdf[geom_col].x, gdf[geom_col].y, left_index=True, right_index=True)\n return xy\n\n @staticmethod\n def generate_xytc_matrix(gdf):\n xy = np.asarray(\n gdf[['x', 'y']] * 10000) # pd.merge(gdf[geom_col].x, gdf[geom_col].y, left_index=True, right_index=True)\n t = np.asarray(gdf[['t']])\n c = np.asarray(gdf['c'])\n vectorizer = TfidfVectorizer()\n c_vect = vectorizer.fit_transform(c)\n c_vect_dense = np.asarray(c_vect.todense())\n xytc = np.concatenate((xy, t, c_vect_dense), axis=1)\n return xytc\n\n @staticmethod\n def generate_xytc_0_1_scaled_matrix(gdf, norm='l2'):\n scaler = MinMaxScaler()\n x = np.asarray(gdf[['x']])\n x_norm = scaler.fit(x).transform(x)\n\n y = np.asarray(gdf[['y']])\n y_norm = scaler.fit(y).transform(y)\n\n t = np.asarray(gdf[['t']])\n t_norm = scaler.fit(t).transform(t)\n\n c = np.asarray(gdf['c'])\n vectorizer = TfidfVectorizer()\n c_vect = vectorizer.fit_transform(c)\n c_vect_dense = np.asarray(c_vect.todense())\n xytc = np.concatenate((x_norm, y_norm, t_norm, c_vect_dense), axis=1)\n return xytc\n\n\nclass ClusteringQuality:\n def __init__(self):\n pass\n\n @classmethod\n def silhouette_coefficient(cls, xytc, labels, label_codes, metric):\n if len(label_codes[label_codes != -1]) <= 1:\n print('$' * 60)\n print('Unable to calculate silhouette coefficient. Cluster codes: {}'.format(str(label_codes)))\n print('$' * 60)\n return sys.maxsize, sys.maxsize, sys.maxsize, sys.maxsize\n else:\n dist = pairwise_distances(xytc[labels != -1], metric=metric)\n sillhouete_dist = silhouette_score(xytc[:, 0:2][labels != -1], labels[labels != -1], metric='euclidean')\n sillhouete_time = silhouette_score(xytc[:, 3:4][labels != -1], labels[labels != -1], metric='euclidean')\n sillhouete_content = silhouette_score(xytc[:, 4:][labels != -1], labels[labels != -1], metric='cosine')\n sillhouete_overall = silhouette_score(dist, labels[labels != -1], metric='precomputed')\n # print('\\tSilhouette score: {}'.format(res))\n return sillhouete_dist, sillhouete_time, sillhouete_content, sillhouete_overall\n"},"avg_line_length":{"kind":"number","value":44.4367541766,"string":"44.436754"},"max_line_length":{"kind":"number","value":122,"string":"122"},"alphanum_fraction":{"kind":"number","value":0.6280681025,"string":"0.628068"}}},{"rowIdx":46264,"cells":{"hexsha":{"kind":"string","value":"691c70b9c8ade9bcf9dcf6c0ca515e1e5dcee5f1"},"size":{"kind":"number","value":6419,"string":"6,419"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"hardware/seat_cam/pixy_build/libpixyusb_swig/pixy.py"},"max_stars_repo_name":{"kind":"string","value":"BlueHC/TTHack-2018--Easy-Rider-1"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8cd8f66de88ff80751a1083350c38985ac26914d"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"hardware/seat_cam/pixy_build/libpixyusb_swig/pixy.py"},"max_issues_repo_name":{"kind":"string","value":"BlueHC/TTHack-2018--Easy-Rider-1"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8cd8f66de88ff80751a1083350c38985ac26914d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"hardware/seat_cam/pixy_build/libpixyusb_swig/pixy.py"},"max_forks_repo_name":{"kind":"string","value":"BlueHC/TTHack-2018--Easy-Rider-1"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8cd8f66de88ff80751a1083350c38985ac26914d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# This file was automatically generated by SWIG (http://www.swig.org).\n# Version 3.0.10\n#\n# Do not make changes to this file unless you know what you are doing--modify\n# the SWIG interface file instead.\n\n\n\n\n\nfrom sys import version_info as _swig_python_version_info\nif _swig_python_version_info >= (2, 7, 0):\n def swig_import_helper():\n import importlib\n pkg = __name__.rpartition('.')[0]\n mname = '.'.join((pkg, '_pixy')).lstrip('.')\n try:\n return importlib.import_module(mname)\n except ImportError:\n return importlib.import_module('_pixy')\n _pixy = swig_import_helper()\n del swig_import_helper\nelif _swig_python_version_info >= (2, 6, 0):\n def swig_import_helper():\n from os.path import dirname\n import imp\n fp = None\n try:\n fp, pathname, description = imp.find_module('_pixy', [dirname(__file__)])\n except ImportError:\n import _pixy\n return _pixy\n if fp is not None:\n try:\n _mod = imp.load_module('_pixy', fp, pathname, description)\n finally:\n fp.close()\n return _mod\n _pixy = swig_import_helper()\n del swig_import_helper\nelse:\n import _pixy\ndel _swig_python_version_info\ntry:\n _swig_property = property\nexcept NameError:\n pass # Python < 2.2 doesn't have 'property'.\n\ntry:\n import builtins as __builtin__\nexcept ImportError:\n import __builtin__\n\ndef _swig_setattr_nondynamic(self, class_type, name, value, static=1):\n if (name == \"thisown\"):\n return self.this.own(value)\n if (name == \"this\"):\n if type(value).__name__ == 'SwigPyObject':\n self.__dict__[name] = value\n return\n method = class_type.__swig_setmethods__.get(name, None)\n if method:\n return method(self, value)\n if (not static):\n if _newclass:\n object.__setattr__(self, name, value)\n else:\n self.__dict__[name] = value\n else:\n raise AttributeError(\"You cannot add attributes to %s\" % self)\n\n\ndef _swig_setattr(self, class_type, name, value):\n return _swig_setattr_nondynamic(self, class_type, name, value, 0)\n\n\ndef _swig_getattr(self, class_type, name):\n if (name == \"thisown\"):\n return self.this.own()\n method = class_type.__swig_getmethods__.get(name, None)\n if method:\n return method(self)\n raise AttributeError(\"'%s' object has no attribute '%s'\" % (class_type.__name__, name))\n\n\ndef _swig_repr(self):\n try:\n strthis = \"proxy of \" + self.this.__repr__()\n except __builtin__.Exception:\n strthis = \"\"\n return \"<%s.%s; %s >\" % (self.__class__.__module__, self.__class__.__name__, strthis,)\n\ntry:\n _object = object\n _newclass = 1\nexcept __builtin__.Exception:\n class _object:\n pass\n _newclass = 0\n\nclass BlockArray(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, BlockArray, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, BlockArray, name)\n __repr__ = _swig_repr\n\n def __init__(self, nelements):\n this = _pixy.new_BlockArray(nelements)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _pixy.delete_BlockArray\n __del__ = lambda self: None\n\n def __getitem__(self, index):\n return _pixy.BlockArray___getitem__(self, index)\n\n def __setitem__(self, index, value):\n return _pixy.BlockArray___setitem__(self, index, value)\n\n def cast(self):\n return _pixy.BlockArray_cast(self)\n if _newclass:\n frompointer = staticmethod(_pixy.BlockArray_frompointer)\n else:\n frompointer = _pixy.BlockArray_frompointer\nBlockArray_swigregister = _pixy.BlockArray_swigregister\nBlockArray_swigregister(BlockArray)\n\ndef BlockArray_frompointer(t):\n return _pixy.BlockArray_frompointer(t)\nBlockArray_frompointer = _pixy.BlockArray_frompointer\n\n\ndef pixy_init():\n return _pixy.pixy_init()\npixy_init = _pixy.pixy_init\n\ndef pixy_get_blocks(max_blocks, blocks):\n return _pixy.pixy_get_blocks(max_blocks, blocks)\npixy_get_blocks = _pixy.pixy_get_blocks\n\ndef pixy_close():\n return _pixy.pixy_close()\npixy_close = _pixy.pixy_close\nclass Block(_object):\n __swig_setmethods__ = {}\n __setattr__ = lambda self, name, value: _swig_setattr(self, Block, name, value)\n __swig_getmethods__ = {}\n __getattr__ = lambda self, name: _swig_getattr(self, Block, name)\n __repr__ = _swig_repr\n __swig_setmethods__[\"type\"] = _pixy.Block_type_set\n __swig_getmethods__[\"type\"] = _pixy.Block_type_get\n if _newclass:\n type = _swig_property(_pixy.Block_type_get, _pixy.Block_type_set)\n __swig_setmethods__[\"signature\"] = _pixy.Block_signature_set\n __swig_getmethods__[\"signature\"] = _pixy.Block_signature_get\n if _newclass:\n signature = _swig_property(_pixy.Block_signature_get, _pixy.Block_signature_set)\n __swig_setmethods__[\"x\"] = _pixy.Block_x_set\n __swig_getmethods__[\"x\"] = _pixy.Block_x_get\n if _newclass:\n x = _swig_property(_pixy.Block_x_get, _pixy.Block_x_set)\n __swig_setmethods__[\"y\"] = _pixy.Block_y_set\n __swig_getmethods__[\"y\"] = _pixy.Block_y_get\n if _newclass:\n y = _swig_property(_pixy.Block_y_get, _pixy.Block_y_set)\n __swig_setmethods__[\"width\"] = _pixy.Block_width_set\n __swig_getmethods__[\"width\"] = _pixy.Block_width_get\n if _newclass:\n width = _swig_property(_pixy.Block_width_get, _pixy.Block_width_set)\n __swig_setmethods__[\"height\"] = _pixy.Block_height_set\n __swig_getmethods__[\"height\"] = _pixy.Block_height_get\n if _newclass:\n height = _swig_property(_pixy.Block_height_get, _pixy.Block_height_set)\n __swig_setmethods__[\"angle\"] = _pixy.Block_angle_set\n __swig_getmethods__[\"angle\"] = _pixy.Block_angle_get\n if _newclass:\n angle = _swig_property(_pixy.Block_angle_get, _pixy.Block_angle_set)\n\n def __init__(self):\n this = _pixy.new_Block()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this\n __swig_destroy__ = _pixy.delete_Block\n __del__ = lambda self: None\nBlock_swigregister = _pixy.Block_swigregister\nBlock_swigregister(Block)\n\n# This file is compatible with both classic and new-style classes.\n\n\n"},"avg_line_length":{"kind":"number","value":32.5837563452,"string":"32.583756"},"max_line_length":{"kind":"number","value":91,"string":"91"},"alphanum_fraction":{"kind":"number","value":0.6876460508,"string":"0.687646"}}},{"rowIdx":46265,"cells":{"hexsha":{"kind":"string","value":"d6b64253d098b09f24aab1942d7750af45aec81b"},"size":{"kind":"number","value":645,"string":"645"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"plan/urls.py"},"max_stars_repo_name":{"kind":"string","value":"MIXISAMA/MIS-backend"},"max_stars_repo_head_hexsha":{"kind":"string","value":"7aaa1be773718de1beb3ce0080edca7c4114b7ad"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"plan/urls.py"},"max_issues_repo_name":{"kind":"string","value":"MIXISAMA/MIS-backend"},"max_issues_repo_head_hexsha":{"kind":"string","value":"7aaa1be773718de1beb3ce0080edca7c4114b7ad"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"plan/urls.py"},"max_forks_repo_name":{"kind":"string","value":"MIXISAMA/MIS-backend"},"max_forks_repo_head_hexsha":{"kind":"string","value":"7aaa1be773718de1beb3ce0080edca7c4114b7ad"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-04-20T07:26:51.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-04-30T11:51:54.000Z"},"content":{"kind":"string","value":"from django.urls import path\nfrom plan.views import Requirements, RoughRequirements, DetailedRequirements\nfrom plan.views import OfferingCourses, FieldsOfStudy, IndicatorFactors\nfrom plan.views import BasisTemplates\n\nurlpatterns = [\n path(\"requirements/\", Requirements.as_view()),\n path(\"rough_requirements/\", RoughRequirements.as_view()),\n path(\"detailed_requirements/\", DetailedRequirements.as_view()),\n path(\"offering_courses/\", OfferingCourses.as_view()),\n path(\"fields_of_study/\", FieldsOfStudy.as_view()),\n path(\"indicator_factors/\", IndicatorFactors.as_view()),\n path(\"basis_templates/\", BasisTemplates.as_view()),\n]\n"},"avg_line_length":{"kind":"number","value":43,"string":"43"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.7705426357,"string":"0.770543"}}},{"rowIdx":46266,"cells":{"hexsha":{"kind":"string","value":"03180b1651d96ec104a136171bca2a414505b752"},"size":{"kind":"number","value":920,"string":"920"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/mymail.py"},"max_stars_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2020-07-29T16:43:46.000Z"},"max_issues_repo_path":{"kind":"string","value":"python/mymail.py"},"max_issues_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/mymail.py"},"max_forks_repo_name":{"kind":"string","value":"EstherLacan/jiangfw"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a449b1925742873c76dc1b3284aedb359204bc76"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# coding:utf-8\nfrom smtplib import SMTP\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nimport os\n\n\ndef myMail(from_email,passwd,to_email,project,content):\n SMTPSVR = SMTP('smtp.exmail.qq.com')\n to = ','.join(to_email)\n msg = MIMEMultipart('alternatvie')\n msg['Subject'] = Header(project,\"utf-8\")\n msg['From'] = r\"%s \" % Header(\"info@mhealth365.com\",\"utf-8\")\n msg['To'] = to\n content = MIMEText(content,'html', 'utf-8')\n msg.attach(content)\n\t\n sendSvr = SMTPSVR\n sendSvr.login(from_email,passwd)\n errs = sendSvr.sendmail(from_email,to_email,msg.as_string())\n sendSvr.quit()\n\nif __name__=='__main__':\n from_email = 'info@mhealth365.com'\n passwd = 'mHealth365Dev'\n to_email = ['jiangfengwei_2@126.com']\n project = 'ecg'\n content = '
hello
'\n myMail(from_email,passwd,to_email,project,content)\n"},"avg_line_length":{"kind":"number","value":29.6774193548,"string":"29.677419"},"max_line_length":{"kind":"number","value":64,"string":"64"},"alphanum_fraction":{"kind":"number","value":0.6869565217,"string":"0.686957"}}},{"rowIdx":46267,"cells":{"hexsha":{"kind":"string","value":"03187b4948b19bec012b3e0e5d648fdc7537ae5b"},"size":{"kind":"number","value":825,"string":"825"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Python/Sonstige_Uebungen/fractal_like/penta_fractal_turtle.py"},"max_stars_repo_name":{"kind":"string","value":"Apop85/Scripts"},"max_stars_repo_head_hexsha":{"kind":"string","value":"1d8dad316c55e1f1343526eac9e4b3d0909e4873"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python/Sonstige_Uebungen/fractal_like/penta_fractal_turtle.py"},"max_issues_repo_name":{"kind":"string","value":"Apop85/Scripts"},"max_issues_repo_head_hexsha":{"kind":"string","value":"1d8dad316c55e1f1343526eac9e4b3d0909e4873"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":6,"string":"6"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-12-24T15:15:09.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-01-13T01:58:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"Python/Sonstige_Uebungen/fractal_like/penta_fractal_turtle.py"},"max_forks_repo_name":{"kind":"string","value":"Apop85/Scripts"},"max_forks_repo_head_hexsha":{"kind":"string","value":"1d8dad316c55e1f1343526eac9e4b3d0909e4873"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n###\n# File: penta_fractal_turtle.py\n# Project: Sonstige_Uebungen\n# Created Date: Thursday 28.02.2019, 12:07\n# Author: Apop85\n# -----\n# Last Modified: Friday 01.03.2019, 12:50\n# -----\n# Copyright (c) 2019 Apop85\n# This software is published under the MIT license.\n# Check http://www.opensource.org/licenses/MIT for further informations\n# -----\n# Description: Try of drawing a pentagram fractal\n###\n\nfrom turtle import *\n\ndef penta_cycle(x):\n if x < 10:\n return\n for i in range(5):\n forward(x)\n right(144)\n penta_cycle(x/2)\n\ndef move_to_start():\n up()\n back(250)\n right(90)\n back(50)\n left(90)\n down()\n speed()\n\nclear()\ntry:\n speed(0)\n move_to_start()\n penta_cycle(500)\n input()\nexcept:\n print('Script aborted')"},"avg_line_length":{"kind":"number","value":18.75,"string":"18.75"},"max_line_length":{"kind":"number","value":71,"string":"71"},"alphanum_fraction":{"kind":"number","value":0.623030303,"string":"0.62303"}}},{"rowIdx":46268,"cells":{"hexsha":{"kind":"string","value":"036fdc17306e13cb5a1bba1a15f36adba42f04e9"},"size":{"kind":"number","value":2675,"string":"2,675"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"TreeModelLib/GrowthAndDeathDynamics/Mortality/RandomGrowth/RandomGrowth.py"},"max_stars_repo_name":{"kind":"string","value":"mcwimm/pyMANGA"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6c7b53087e53b116bb02f91c33974f3dfd9a46de"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-03-16T08:35:50.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-16T08:35:50.000Z"},"max_issues_repo_path":{"kind":"string","value":"TreeModelLib/GrowthAndDeathDynamics/Mortality/RandomGrowth/RandomGrowth.py"},"max_issues_repo_name":{"kind":"string","value":"mcwimm/pyMANGA"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6c7b53087e53b116bb02f91c33974f3dfd9a46de"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":67,"string":"67"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-11-14T11:29:52.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-09T14:37:11.000Z"},"max_forks_repo_path":{"kind":"string","value":"TreeModelLib/GrowthAndDeathDynamics/Mortality/RandomGrowth/RandomGrowth.py"},"max_forks_repo_name":{"kind":"string","value":"mcwimm/pyMANGA"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6c7b53087e53b116bb02f91c33974f3dfd9a46de"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":6,"string":"6"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-11-12T11:11:41.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-08-12T13:57:22.000Z"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@date: 2021-Today\n@author: marie-christin.wimmler@tu-dresden.de\n\"\"\"\n\nimport numpy as np\nfrom TreeModelLib.GrowthAndDeathDynamics.Mortality.Random import Random\n\n\nclass RandomGrowth(Random):\n def __init__(self, args, case):\n super(Random, self).__init__(args, case)\n # Read input parameters from xml file\n self.getInputParameters(args)\n # Default values if no inputs are given\n try:\n self.k_die\n except:\n # Calibration factor default: 1e-12\n self.k_die = 1e-12\n print(\"NOTE: Use default `probability`: \" + str(self.k_die) +\n \".\")\n\n def getSurvival(self, args):\n self.survive = 1\n # Calculate the probability to die\n args.delta_volume = args.volume - args.volume_before\n\n # = dV/dt/V\n relative_volume_increment = args.delta_volume / (args.time *\n args.volume)\n self.p_die = self.k_die / relative_volume_increment\n\n # Get a random number\n r = np.random.uniform(0, 1, 1)\n if r < self.p_die:\n self.survive = 0\n print(\"\\t Tree died randomly. Random number: \" + str(r[0]) +\n \", p: \" + str(self.p_die))\n\n return self.survive\n\n def getMortalityVariables(self, args, growth_concept_information):\n # Variable to store volume of previous time step (m³)\n try:\n args.volume_before = growth_concept_information[\n \"volume_previous_ts\"]\n\n if args.volume_before == \"NaN\":\n args.volume_before = 0\n except KeyError:\n args.volume_before = 0\n\n def setMortalityVariables(self, args, growth_concept_information):\n # The current tree volume is the volume of t-1 in the next time step\n growth_concept_information[\"volume_previous_ts\"] = \\\n args.volume\n return growth_concept_information\n\n def getInputParameters(self, args):\n # All tags are optional\n missing_tags = [\"type\", \"mortality\", \"k_die\"]\n for arg in args.iterdescendants():\n tag = arg.tag\n if tag == \"k_die\":\n self.k_die = float(args.find(\"k_die\").text)\n elif tag == \"type\":\n case = args.find(\"type\").text\n try:\n missing_tags.remove(tag)\n except ValueError:\n print(\"WARNING: Tag \" + tag +\n \" not specified for \" + super().getConceptName() +\n \" (\" + case + \") \" +\n \"mortality initialisation!\")\n"},"avg_line_length":{"kind":"number","value":34.2948717949,"string":"34.294872"},"max_line_length":{"kind":"number","value":76,"string":"76"},"alphanum_fraction":{"kind":"number","value":0.5592523364,"string":"0.559252"}}},{"rowIdx":46269,"cells":{"hexsha":{"kind":"string","value":"30581b42bd7880006b315eb86b70d6a55f536d33"},"size":{"kind":"number","value":674,"string":"674"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"PMIa/2015/Donkor_A_H/task_6_10.py"},"max_stars_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"PMIa/2015/Donkor_A_H/task_6_10.py"},"max_issues_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"PMIa/2015/Donkor_A_H/task_6_10.py"},"max_forks_repo_name":{"kind":"string","value":"YukkaSarasti/pythonintask"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eadf4245abb65f4400a3bae30a4256b4658e009c"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Задача 6. Вариант 10.\n# Создайте игру, в которой компьютер загадывает название одной из трех стран, входящих в военно-политический блок \"Тройственный союз\", а игрок должен его угадать.\n \n# Donkor A.H.\n# 14.04.2016\n\nimport random\nx=random.choice(['Германия','Австро-Венгрия','Италия'])\ny=input('Сыграем в игру. Я загадываю вам одну из трёх стран входящих в военно-политический блок \"Тройственный союз\", а вы должны угадать, какая именно это страна ? ')\nz=1\nwhile y!=x:\n \n print(\"Вы не угадали!\")\n z+=1\n y=input('Попробуйте снова ')\nelse:\n print(\"Всё верно!Вы угадали!!!\")\n print(\"Число ваших попыток - \"+ str(z))\ninput(\"\\nВведите Enter, чтобы завершить\")"},"avg_line_length":{"kind":"number","value":35.4736842105,"string":"35.473684"},"max_line_length":{"kind":"number","value":166,"string":"166"},"alphanum_fraction":{"kind":"number","value":0.7077151335,"string":"0.707715"}}},{"rowIdx":46270,"cells":{"hexsha":{"kind":"string","value":"063f6a9c7e6d7ee6eab51fa15b7d8a3d503ca147"},"size":{"kind":"number","value":11557,"string":"11,557"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"master.py"},"max_stars_repo_name":{"kind":"string","value":"ameliecordier/IIK"},"max_stars_repo_head_hexsha":{"kind":"string","value":"57b40d6b851a1c2369604049d1820e5b572c6227"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"master.py"},"max_issues_repo_name":{"kind":"string","value":"ameliecordier/IIK"},"max_issues_repo_head_hexsha":{"kind":"string","value":"57b40d6b851a1c2369604049d1820e5b572c6227"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"master.py"},"max_forks_repo_name":{"kind":"string","value":"ameliecordier/IIK"},"max_forks_repo_head_hexsha":{"kind":"string","value":"57b40d6b851a1c2369604049d1820e5b572c6227"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from datahandler import expertPatterns\nfrom datahandler import miningPatterns\nfrom datahandler import analyser as analyser\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport os\nimport time\n\ndef plot_two_results(norev, rev, fignum, legend, pp):\n \"\"\"\n Utilitaire d'affichage d'une courbe de comparaison révision / no révision\n \"\"\"\n\n x = list(range(len(norev.results)))\n xrev = list(range(len(rev.results)))\n \n norevList = []\n revList = []\n \n for elt in norev.results:\n norevList.append(elt[\"idxMining\"])\n \n for elt in rev.results:\n revList.append(elt[\"idxMining\"])\n \n\n plt.figure(fignum)\n plt.plot(x, norevList, 'r', linestyle=\"-\", label=\"Sans révision\")\n plt.plot(xrev, revList, 'g', linestyle=\"--\", label=\"Avec révision\")\n plt.xlabel('Itération')\n plt.ylabel('Rang du pattern')\n plt.title(legend)\n plt.legend()\n plt.savefig(pp, format=\"pdf\")\n\n\ndef plot_three_results(rand, freq, cove, figNum, legend, pp):\n \"\"\" \n Utilitaire d'affichage d'une courbe de comparaison des trois méthodes\n \"\"\"\n\n # Génération des graphs\n x = list(range(len(rand.results)))\n y = list(range(len(freq.results)))\n z = list(range(len(cove.results)))\n\n randomList = []\n freqList = []\n covList = []\n\n for elt in rand.results:\n randomList.append(elt[\"idxMining\"])\n for elt in freq.results:\n freqList.append(elt[\"idxMining\"])\n for elt in cove.results:\n covList.append(elt[\"idxMining\"])\n\n plt.figure(figNum)\n plt.plot(x, randomList, 'r', linestyle=\"-\", label=\"Random\")\n plt.plot(y, freqList, 'g', linestyle=\"--\", label=\"Fréq\")\n plt.plot(z, covList, 'b', linestyle=\"-.\", label=\"Cov\")\n plt.xlabel('Itération')\n plt.ylabel('Rang du pattern')\n plt.title(legend)\n plt.legend()\n plt.savefig(pp, format=\"pdf\")\n\n\ndef threefold_comp(mining, expert, nameExpe, sortingFreq, sortingCov):\n \"\"\"\n Compare random, fréquence et couverture événementielle\n Pour fréquence et couverture, le critère de tri est celui passé en paramètre\n Les résultats sont stockés dans le répertoire nameExpe\n \"\"\"\n\n # Lecture des patterns\n ep = expertPatterns.ExpertPatterns()\n ep.getPatterns(expert)\n\n # Création du répertoire pour stocker les résultats\n try:\n os.mkdir(\"DATA/\" + nameExpe)\n except:\n print(\"Directory already there\")\n\n\n # Random\n randnorevbegin = time.time()\n mpRandNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/no-rev_beforeSortRandom.csv\"\n mpRandNoRev.toFile(fname)\n anaRandNoRev = mpRandNoRev.findPatterns(ep)\n fname = \"DATA/\" + nameExpe + \"/no-rev_analyseRandom.csv\"\n anaRandNoRev.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/no-rev_afterSortRandom.csv\"\n mpRandNoRev.toFile(fname)\n del mpRandNoRev\n randnorevend = time.time()\n randnorevtime = randnorevend-randnorevbegin\n print(randnorevtime)\n\n\n # Freq\n freqnorevbegin = time.time()\n mpFreqNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/no-rev_beforeSortFreq.csv\"\n mpFreqNoRev.toFile(fname)\n mpFreqNoRev.sortBy(sortingFreq)\n anaFreqNoRev = mpFreqNoRev.findPatterns(ep)\n fname = \"DATA/\" + nameExpe + \"/no-rev_analyseFreq.csv\"\n anaFreqNoRev.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/no-rev_afterSortFreq.csv\"\n mpFreqNoRev.toFile(fname)\n del mpFreqNoRev\n freqnorevend = time.time()\n freqnorevtime = freqnorevend-freqnorevbegin\n print(freqnorevtime)\n\n\n # Cov evt\n covnorevbegin = time.time()\n mpCoveNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/no-rev_beforeSortCovEvt.csv\"\n mpCoveNoRev.toFile(fname)\n mpCoveNoRev.sortBy(sortingCov)\n anaCoveNoRev = mpCoveNoRev.findPatterns(ep)\n fname = \"DATA/\" + nameExpe + \"/no-rev_analyseCovEvt.csv\"\n anaCoveNoRev.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/no-rev_afterSortCovEvt.csv\"\n mpCoveNoRev.toFile(fname)\n del mpCoveNoRev\n covnorevend = time.time()\n covnorevtime = covnorevend-covnorevbegin\n print(covnorevtime)\n\n # Random\n randbegin = time.time()\n mpRand = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/rev_beforeSortRandom.csv\"\n mpRand.toFile(fname)\n anaRand = mpRand.findPatternsWithRevision(ep)\n fname = \"DATA/\" + nameExpe + \"/rev_analyseRandom.csv\"\n anaRand.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/rev_afterSortRandom.csv\"\n mpRand.toFile(fname)\n del mpRand\n randend = time.time()\n randtime = randend - randbegin\n print(randtime)\n\n\n # Freq\n freqbegin = time.time()\n mpFreq = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/rev_beforeSortFreq.csv\"\n mpFreq.toFile(fname)\n mpFreq.sortBy(sortingFreq)\n anaFreq = mpFreq.findPatternsWithRevision(ep)\n fname = \"DATA/\" + nameExpe + \"/rev_analyseFreq.csv\"\n anaFreq.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/rev_afterSortFreq.csv\"\n mpFreq.toFile(fname)\n del mpFreq\n freqend = time.time()\n freqtime = freqend - freqbegin\n print(freqtime)\n\n # Cov evt\n covbegin = time.time()\n mpCove = miningPatterns.Patterns(mining, \";\", 13, 11)\n fname = \"DATA/\" + nameExpe + \"/rev_beforeSortCovEvt.csv\"\n mpCove.toFile(fname)\n mpCove.sortBy(sortingCov)\n anaCove = mpCove.findPatternsWithRevision(ep)\n fname = \"DATA/\" + nameExpe + \"/rev_analyseCovEvt.csv\"\n anaCove.toFile(fname)\n fname = \"DATA/\" + nameExpe + \"/rev_afterSortCovEvt.csv\"\n mpCove.toFile(fname)\n del mpCove\n covend = time.time()\n covtime = covend - covbegin\n print(covtime)\n\n\n\n # Génération des graphes résultats \n pdfname = \"DATA/\" + nameExpe + \"/results.pdf\"\n pp = PdfPages(pdfname)\n\n legende = \"Comparaison des résultats sans révision\"\n plot_three_results(anaRandNoRev, anaFreqNoRev, anaCoveNoRev, 1, legende, pp)\n \n legende = \"Comparaison des résultats avec révision\"\n plot_three_results(anaRand, anaFreq, anaCove, 2, legende, pp)\n\n legende = \"Performances de random\"\n plot_two_results(anaRandNoRev, anaRand, 3, legende, pp)\n\n legende = \"Performances de freq\"\n plot_two_results(anaFreqNoRev, anaFreq, 4, legende, pp)\n \n legende = \"Performances de cov evt\"\n plot_two_results(anaCoveNoRev, anaCove, 5, legende, pp)\n\n pp.close()\n\n print(randtime, covtime, freqtime, randnorevtime, freqnorevtime, covnorevtime)\n\ndef threefold_compNoFiles(mining, expert, nameExpe, sortingFreq, sortingCov):\n \"\"\"\n Compare random, fréquence et couverture événementielle\n Pour fréquence et couverture, le critère de tri est celui passé en paramètre\n Les résultats sont stockés dans le répertoire nameExpe\n Spécificité : à part les graphes résultats, pas de fichiers générés\n \"\"\"\n\n # Lecture des patterns\n ep = expertPatterns.ExpertPatterns()\n ep.getPatterns(expert)\n\n # Création du répertoire pour stocker les résultats\n try:\n os.mkdir(\"DATA/\" + nameExpe)\n except:\n print(\"Directory already there\")\n\n # Random\n a = time.time()\n mpRandNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n anaRandNoRev = mpRandNoRev.findPatterns(ep)\n b = time.time()\n del mpRandNoRev\n print(b-a)\n\n # Freq\n mpFreqNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n mpFreqNoRev.sortBy(sortingFreq)\n anaFreqNoRev = mpFreqNoRev.findPatterns(ep)\n del mpFreqNoRev\n\n # Cov evt\n mpCoveNoRev = miningPatterns.Patterns(mining, \";\", 13, 11)\n mpCoveNoRev.sortBy(sortingCov)\n anaCoveNoRev = mpCoveNoRev.findPatterns(ep)\n del mpCoveNoRev\n\n # Random\n mpRand = miningPatterns.Patterns(mining, \";\", 13, 11)\n anaRand = mpRand.findPatternsWithRevision(ep)\n del mpRand\n\n # Freq\n mpFreq = miningPatterns.Patterns(mining, \";\", 13, 11)\n mpFreq.sortBy(sortingFreq)\n anaFreq = mpFreq.findPatternsWithRevision(ep)\n del mpFreq\n\n # Cov evt\n mpCove = miningPatterns.Patterns(mining, \";\", 13, 11)\n mpCove.sortBy(sortingCov)\n anaCove = mpCove.findPatternsWithRevision(ep)\n del mpCove\n\n # Génération des graphes résultats \n pdfname = \"DATA/\" + nameExpe + \"/results.pdf\"\n pp = PdfPages(pdfname)\n\n legende = \"Comparaison des résultats sans révision\"\n plot_three_results(anaRandNoRev, anaFreqNoRev, anaCoveNoRev, 1, legende, pp)\n \n legende = \"Comparaison des résultats avec révision\"\n plot_three_results(anaRand, anaFreq, anaCove, 2, legende, pp)\n\n legende = \"Performances de random\"\n plot_two_results(anaRandNoRev, anaRand, 3, legende, pp)\n\n legende = \"Performances de freq\"\n plot_two_results(anaFreqNoRev, anaFreq, 4, legende, pp)\n \n legende = \"Performances de cov evt\"\n plot_two_results(anaCoveNoRev, anaCove, 5, legende, pp)\n\n pp.close()\n\n\n# Expé 1 : ibert original, tri simple\n'''\nmining = \"DATA/v1_ibert_fouille.csv\"\nexpert = \"DATA/v1_ibert_expert.csv\"\nxpname = \"v1_ibert_standard\"\nsortingFreq = [(\"freq\", \"desc\")]\nsortingCov = [(\"cov evt\", \"desc\")]\n'''\n\n# Expé 2 : ibert original, tri par longueur en premier\n'''\nmining = \"DATA/v1_ibert_fouille.csv\"\nexpert = \"DATA/v1_ibert_expert.csv\"\nxpname = \"v1_ibert_tri_longueur\"\nsortingFreq = [(\"long\", \"desc\"), (\"freq\", \"desc\")]\nsortingCov = [(\"long\", \"desc\"), (\"cov evt\", \"desc\")]\n'''\n\n# Expé 3 : debussy original, tri simple\n'''\nmining = \"DATA/v1_debussy_fouille.csv\"\nexpert = \"DATA/v1_debussy_expert.csv\"\nxpname = \"v1_debussy_standard\"\nsortingFreq = [(\"freq\", \"desc\")]\nsortingCov = [(\"cov evt\", \"desc\")]\n'''\n\n# Expé 4 : debussy original, tri par longueur en premier\n'''\nmining = \"DATA/v1_debussy_fouille.csv\"\nexpert = \"DATA/v1_debussy_expert.csv\"\nxpname = \"v1_debussy_tri_longueur\"\nsortingFreq = [(\"long\", \"desc\"), (\"freq\", \"desc\")]\nsortingCov = [(\"long\", \"desc\"), (\"cov evt\", \"desc\")]\n'''\n\n# Expé 5 : reichert original, tri simple\n'''\nmining = \"DATA/v1_reichert_fouille.csv\"\nexpert = \"DATA/v1_reichert_expert.csv\"\nxpname = \"v1_reichert_standard\"\nsortingFreq = [(\"freq\", \"desc\")]\nsortingCov = [(\"cov evt\", \"desc\")]\n'''\n\n# Expé 6 : reichert original, tri par longueur en premier\n'''\nmining = \"DATA/v1_reichert_fouille.csv\"\nexpert = \"DATA/v1_reichert_expert.csv\"\nxpname = \"v1_reichert_tri_longueur\"\nsortingFreq = [(\"long\", \"desc\"), (\"freq\", \"desc\")]\nsortingCov = [(\"long\", \"desc\"), (\"cov evt\", \"desc\")]\n'''\n\n# Expé 7 : ibert v2, tri simple\n# Attention, pour celle-ci les fichiez n'ont pas été générés\n# TODO\n'''\nmining = \"DATA/v2_ibert_fouille.csv\"\nexpert = \"DATA/v2_ibert_expert.csv\"\nxpname = \"v2_ibert_standard\"\nsortingFreq = [(\"freq\", \"desc\")]\nsortingCov = [(\"cov evt\", \"desc\")]\n'''\n\n# Expé 8 : ibert v2, tri par longueur en premier\n# Super long (2h10)\n'''\nmining = \"DATA/v2_ibert_fouille.csv\"\nexpert = \"DATA/v2_ibert_expert.csv\"\nxpname = \"v2_ibert_tri_longueur\"\nsortingFreq = [(\"long\", \"desc\"), (\"freq\", \"desc\")]\nsortingCov = [(\"long\", \"desc\"), (\"cov evt\", \"desc\")]\n'''\n\n# Expé 9 : debussy v2, tri simple\n'''\nmining = \"DATA/v2_debussy_fouille.csv\"\nexpert = \"DATA/v2_debussy_expert.csv\"\nxpname = \"v2_debussy_standard\"\nsortingFreq = [(\"freq\", \"desc\")]\nsortingCov = [(\"cov evt\", \"desc\")]\n'''\n\n# Expé 10 : debussy v2, tri par longueur en premier\n'''\nmining = \"DATA/v2_debussy_fouille.csv\"\nexpert = \"DATA/v2_debussy_expert.csv\"\nxpname = \"v2_debussy_tri_longueur\"\nsortingFreq = [(\"long\", \"desc\"), (\"freq\", \"desc\")]\nsortingCov = [(\"long\", \"desc\"), (\"cov evt\", \"desc\")]\n'''\n\n\n# Main code\na = time.time()\nthreefold_comp(mining, expert, xpname, sortingFreq, sortingCov)\nb = time.time()\n\nprint(b-a)\n"},"avg_line_length":{"kind":"number","value":29.9404145078,"string":"29.940415"},"max_line_length":{"kind":"number","value":82,"string":"82"},"alphanum_fraction":{"kind":"number","value":0.6769057714,"string":"0.676906"}}},{"rowIdx":46271,"cells":{"hexsha":{"kind":"string","value":"2335e7284437d794717be84ed886ee2601043ec0"},"size":{"kind":"number","value":804,"string":"804"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/098_encasulamento/main.py"},"max_stars_repo_name":{"kind":"string","value":"pedrohd21/Cursos-Feitos"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b223aad83867bfa45ad161d133e33c2c200d42bd"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/098_encasulamento/main.py"},"max_issues_repo_name":{"kind":"string","value":"pedrohd21/Cursos-Feitos"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b223aad83867bfa45ad161d133e33c2c200d42bd"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Curso_Python/Secao4-Python-introducao-a-programacao-orientada-a-objetos-POO/098_encasulamento/main.py"},"max_forks_repo_name":{"kind":"string","value":"pedrohd21/Cursos-Feitos"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b223aad83867bfa45ad161d133e33c2c200d42bd"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\npublic, protected, private\n_ privado/protected (public _)\n__ privado (_NOMECLASSE__nomeatributo)\n\"\"\"\n\n\nclass BaseDeDados:\n def __init__(self):\n self.__dados = {}\n\n @property\n def dados(self):\n return self.__dados\n\n def inserir_clientes(self, id, nome):\n if 'clientes' not in self.__dados:\n self.__dados['clientes'] = {id: nome}\n else:\n self.__dados['clientes'].update({id: nome})\n\n def lista_cliente(self):\n for id, nome in self.__dados['clientes'].items():\n print(id, nome)\n\n def apaga_cliente(self, id):\n del self.__dados['clientes'][id]\n\n\nbd = BaseDeDados()\nbd.inserir_clientes(1, 'Pedro')\nbd.inserir_clientes(1, 'Pedro')\nbd.inserir_clientes(2, 'Pedro')\nbd.inserir_clientes(3, 'Pedro')\nprint(bd.dados)\n\n\n"},"avg_line_length":{"kind":"number","value":21.1578947368,"string":"21.157895"},"max_line_length":{"kind":"number","value":57,"string":"57"},"alphanum_fraction":{"kind":"number","value":0.6305970149,"string":"0.630597"}}},{"rowIdx":46272,"cells":{"hexsha":{"kind":"string","value":"cc88b9bd7b332888ec7b1b6dfb9dc3e367f010ef"},"size":{"kind":"number","value":892,"string":"892"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/constants.py"},"max_stars_repo_name":{"kind":"string","value":"ZiningZhu/InfoProbe"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0bf241356c5bfcfc4760a195eecf7c80f75379aa"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-02-06T01:52:00.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-02-06T01:52:00.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/constants.py"},"max_issues_repo_name":{"kind":"string","value":"SPOClab-ca/InfoProbe"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0bf241356c5bfcfc4760a195eecf7c80f75379aa"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/constants.py"},"max_forks_repo_name":{"kind":"string","value":"SPOClab-ca/InfoProbe"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0bf241356c5bfcfc4760a195eecf7c80f75379aa"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\nspacy_model_names = {\n \"en\": \"en_core_web_md\",\n \"fr\": \"fr_core_news_md\",\n \"es\": \"es_core_news_md\"\n}\n\n# 17 for English.\n# 15 for French: replace \"INTJ\" (7 entries) or \"SYM\" with \"X\" (1296 entries).\n# 16 for Spanish: Change \"X\" (1 entry) to \"INTJ\" (27 entries) \nspacy_pos_dict = {\n \"en\": ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB', 'X'],\n \"fr\": ['ADJ', 'ADP', 'ADV', 'AUX', 'CCONJ', 'DET', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'VERB', 'X', ''],\n \"es\": ['ADJ', 'ADP', 'ADV', 'AUX', 'CONJ', 'DET', 'INTJ', 'NOUN', 'NUM', 'PART', 'PRON', 'PROPN', 'PUNCT', 'SCONJ', 'SYM', 'VERB']\n}\n\ngensim_fasttext_models = {\n \"en\": \"../data/embeddings/fasttext/cc.en.300.bin\",\n \"fr\": \"../data/embeddings/fasttext/cc.fr.300.bin\",\n \"es\": \"../data/embeddings/fasttext/cc.es.300.bin\"\n}"},"avg_line_length":{"kind":"number","value":42.4761904762,"string":"42.47619"},"max_line_length":{"kind":"number","value":141,"string":"141"},"alphanum_fraction":{"kind":"number","value":0.5504484305,"string":"0.550448"}}},{"rowIdx":46273,"cells":{"hexsha":{"kind":"string","value":"4e1ad8de222a2875b9c6d50f8e51a5fd937564f0"},"size":{"kind":"number","value":870,"string":"870"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"TreePrintAllPath.py"},"max_stars_repo_name":{"kind":"string","value":"aertoria/MiscCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"a2e94d0fe0890e6620972f84adcb7976ca9f1408"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"TreePrintAllPath.py"},"max_issues_repo_name":{"kind":"string","value":"aertoria/MiscCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"a2e94d0fe0890e6620972f84adcb7976ca9f1408"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"TreePrintAllPath.py"},"max_forks_repo_name":{"kind":"string","value":"aertoria/MiscCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"a2e94d0fe0890e6620972f84adcb7976ca9f1408"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"class node(object):\n\tdef __init__(self,val):\n\t\tself.val=val\n\t\tself.right=None\n\t\tself.left=None\n\t\t\n\n\n\n\nclass Solution(object):\n\tdef DFS_print(self,root):\n\t\tself.stack=[root]\n\t\tself.DFS(root)\n\t\t#self.stack.pop()\n\t\t\n\tdef DFS(self,node):\n\t\tif node.left == None and node.right == None:\n\t\t\t#go back\n\t\t\tprint map(lambda x:x.val,self.stack)\n\t\t\treturn\n\t\tif node.left <> None: #if node.left<> None and node.left not in self.stack #in case it is a graph\n\t\t\tself.stack.append(node.left)\n\t\t\tself.DFS(node.left)\n\t\t\tself.stack.pop()\n\t\tif node.right<> None:\n\t\t\tself.stack.append(node.right)\n\t\t\tself.DFS(node.right)\n\t\t\tself.stack.pop()\n\t\t\n\n\n\n\n\n\n\n\n##test cases\nnodeA=node('A')\nnodeB=node('B')\nnodeC=node('C')\nnodeD=node('D')\nnodeE=node('E')\nnodeF=node('F')\nnodeA.left = nodeB\nnodeA.right= nodeC\nnodeB.left = nodeD\nnodeC.left = nodeE\nnodeC.right = nodeF\n\ns=Solution()\ns.DFS_print(nodeA)\n\n\t"},"avg_line_length":{"kind":"number","value":15.8181818182,"string":"15.818182"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.6643678161,"string":"0.664368"}}},{"rowIdx":46274,"cells":{"hexsha":{"kind":"string","value":"d18f8f402ac4ed15e9faea47a484ce782a2eb6cc"},"size":{"kind":"number","value":1947,"string":"1,947"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"data/time_gen.py"},"max_stars_repo_name":{"kind":"string","value":"Janrupf/airport-db-seeding"},"max_stars_repo_head_hexsha":{"kind":"string","value":"768a9373f02ede5bf613d09270d2fbe84de37a97"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"data/time_gen.py"},"max_issues_repo_name":{"kind":"string","value":"Janrupf/airport-db-seeding"},"max_issues_repo_head_hexsha":{"kind":"string","value":"768a9373f02ede5bf613d09270d2fbe84de37a97"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"data/time_gen.py"},"max_forks_repo_name":{"kind":"string","value":"Janrupf/airport-db-seeding"},"max_forks_repo_head_hexsha":{"kind":"string","value":"768a9373f02ede5bf613d09270d2fbe84de37a97"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import functools\nimport random\n\n\nclass TimeGen:\n def __init__(self, hour_min_step, hour_max_step):\n self.hour_min_step = hour_min_step\n self.hour_max_step = hour_max_step\n\n self.current_start_hour = 6\n self.current_start_minute = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.current_start_minute += random.randint(0, 4) * 15\n\n if self.current_start_minute >= 60:\n self.current_start_hour += random.randint(self.hour_min_step, self.hour_max_step)\n self.current_start_minute = 0\n\n if self.current_start_hour >= 22:\n self.current_start_hour = 6\n\n end_hour = self.current_start_hour\n end_minute = self.current_start_minute + 15\n\n if end_minute == 60:\n end_hour += 1\n end_minute = 0\n\n return (\n SimpleTimeValue(self.current_start_hour, self.current_start_minute, 0),\n SimpleTimeValue(end_hour, end_minute, 0)\n )\n\n\n@functools.total_ordering\nclass SimpleTimeValue:\n def __init__(self, hour, minute, second):\n self.hour = hour\n self.minute = minute\n self.second = second\n\n def __gt__(self, other):\n if self.hour < other.hour:\n return False\n elif self.hour > other.hour:\n return True\n\n if self.minute < other.minute:\n return False\n elif self.minute > other.minute:\n return True\n\n if self.second < other.second:\n return False\n elif self.second > other.second:\n return True\n\n return False\n\n def __eq__(self, other):\n return self.hour == other.hour and self.minute == other.minute and self.second == other.second\n\n def to_string(self):\n return f\"{str(self.hour).rjust(2, '0')}:{str(self.minute).rjust(2, '0')}:{str(self.second).rjust(2, '0')}\"\n\n def __str__(self):\n return self.to_string()\n"},"avg_line_length":{"kind":"number","value":27.0416666667,"string":"27.041667"},"max_line_length":{"kind":"number","value":114,"string":"114"},"alphanum_fraction":{"kind":"number","value":0.6117103236,"string":"0.61171"}}},{"rowIdx":46275,"cells":{"hexsha":{"kind":"string","value":"d1a180b4c25806487dcb8d5f90f1a3da3b4fe56c"},"size":{"kind":"number","value":13289,"string":"13,289"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/onegov/activity/matching/core.py"},"max_stars_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_stars_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"src/onegov/activity/matching/core.py"},"max_issues_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_issues_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"src/onegov/activity/matching/core.py"},"max_forks_repo_name":{"kind":"string","value":"politbuero-kampagnen/onegov-cloud"},"max_forks_repo_head_hexsha":{"kind":"string","value":"20148bf321b71f617b64376fe7249b2b9b9c4aa9"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"\"\"\" Implements the matching algorithm used to match attendees to occasions.\n\nThe algorithm used is based on Deferred Acceptance. The algorithm has a\nquadratic runtime.\n\n\"\"\"\n\nfrom onegov.activity import Attendee, Booking, Occasion, Period\nfrom onegov.activity.matching.score import Scoring\nfrom onegov.activity.matching.utils import overlaps, LoopBudget, hashable\nfrom onegov.activity.matching.utils import booking_order, unblockable\nfrom onegov.core.utils import Bunch\nfrom itertools import groupby, product\nfrom sortedcontainers import SortedSet\nfrom sqlalchemy.orm import joinedload, defer\n\n\nclass AttendeeAgent(hashable('id')):\n \"\"\" Acts on behalf of the attendee with the goal to get a stable booking\n with an occasion.\n\n A booking/occasion pair is considered stable if there exists no other\n such pair which is preferred by both the attendee and the occasion.\n\n In other words, if there's no other occasion that would accept the\n attendee over another attendee.\n\n \"\"\"\n\n __slots__ = ('id', 'wishlist', 'accepted', 'blocked')\n\n def __init__(self, id, bookings, limit=None, minutes_between=0,\n alignment=None):\n self.id = id\n self.limit = limit\n self.wishlist = SortedSet(bookings, key=booking_order)\n self.accepted = set()\n self.blocked = set()\n self.minutes_between = minutes_between\n self.alignment = alignment\n\n def blocks(self, subject, other):\n return overlaps(\n subject, other, self.minutes_between, self.alignment,\n with_anti_affinity_check=True)\n\n def accept(self, booking):\n \"\"\" Accepts the given booking. \"\"\"\n\n self.wishlist.remove(booking)\n self.accepted.add(booking)\n\n if self.limit and len(self.accepted) >= self.limit:\n self.blocked |= self.wishlist\n else:\n self.blocked |= {\n b for b in self.wishlist if self.blocks(booking, b)\n }\n\n self.wishlist -= self.blocked\n\n def deny(self, booking):\n \"\"\" Removes the given booking from the accepted bookings. \"\"\"\n\n self.wishlist.add(booking)\n self.accepted.remove(booking)\n\n # remove bookings from the blocked list which are not blocked anymore\n for booking in unblockable(\n self.accepted, self.blocked, with_anti_affinity_check=True):\n\n self.blocked.remove(booking)\n self.wishlist.add(booking)\n\n @property\n def is_valid(self):\n \"\"\" Returns True if the results of this agent are valid.\n\n The algorithm should never get to this stage, so this is an extra\n security measure to make sure there's no bug.\n\n \"\"\"\n for a, b in product(self.accepted, self.accepted):\n if a != b and self.blocks(a, b):\n return False\n\n return True\n\n\nclass OccasionAgent(hashable('id')):\n \"\"\" Represents the other side of the Attendee/Occasion pair.\n\n While the attende agent will try to get the best possible occasion\n according to the wishses of the attendee, the occasion agent will\n try to get the best attendee according to the wishes of the occasion.\n\n These wishes may include hard-coded rules or peferences defined by the\n organiser/admin, who may manually prefer certain attendees over others.\n\n \"\"\"\n\n __slots__ = ('occasion', 'bookings', 'attendees', 'score_function')\n\n def __init__(self, occasion, score_function=None):\n self.id = occasion.id\n self.occasion = occasion\n self.bookings = set()\n self.attendees = {}\n self.score_function = score_function or (lambda b: b.score)\n\n @property\n def full(self):\n return len(self.bookings) >= (self.occasion.max_spots)\n\n def preferred(self, booking):\n \"\"\" Returns the first booking with a lower score than the given\n booking (which indicates that the given booking is preferred over\n the returned item).\n\n If there's no preferred booking, None is returned.\n\n \"\"\"\n return next(\n (\n b for b in self.bookings\n if self.score_function(b) < self.score_function(booking)\n ),\n None\n )\n\n def accept(self, attendee, booking):\n self.attendees[booking] = attendee\n self.bookings.add(booking)\n attendee.accept(booking)\n\n def deny(self, booking):\n self.attendees[booking].deny(booking)\n self.bookings.remove(booking)\n del self.attendees[booking]\n\n def match(self, attendee, booking):\n\n # as long as there are spots, automatically accept new requests\n if not self.full:\n self.accept(attendee, booking)\n\n return True\n\n # if the occasion is already full, accept the booking by throwing\n # another one out, if there exists a better fit\n over = self.preferred(booking)\n\n if over:\n self.deny(over)\n self.accept(attendee, booking)\n\n return True\n\n return False\n\n\ndef deferred_acceptance(bookings, occasions,\n score_function=None,\n validity_check=True,\n stability_check=False,\n hard_budget=True,\n default_limit=None,\n attendee_limits=None,\n minutes_between=0,\n alignment=None,\n sort_bookings=True):\n \"\"\" Matches bookings with occasions.\n\n :score_function:\n A function accepting a booking and returning a score. Occasions prefer\n bookings with a higher score over bookings with a lower score, if and\n only if the occasion is not yet full.\n\n The score function is meant to return a constant value for each\n booking during the run of the algorithm. If this is not the case,\n the algorithm might not halt.\n\n :validity_check:\n Ensures that the algorithm doesn't lead to any overlapping bookings.\n Runs in O(b) time, where b is the number of bookings per period.\n\n :stability_check:\n Ensures that the result does not contain any blocking pairs, that is\n it checks that the result is stable. This runs in O(b^3) time, so\n do not run this in production (it's more of a testing tool).\n\n :hard_budget:\n Makes sure that the algorithm halts eventually by raising an exception\n if the runtime budget of O(a*b) is reached (number of attendees\n times the number of bookings).\n\n Feel free to proof that this can't happen and then remove the check ;)\n\n :default_limit:\n The maximum number of bookings which should be accepted for each\n attendee.\n\n :attendee_limits:\n The maximum number of bookings which should be accepted for each\n attendee. Keyed by the attendee id, this dictionary contains\n per-attendee limits. Those fall back to the default_limit.\n\n :minutes_between:\n The minutes between each booking that should be considered\n transfer-time. That is the time it takes to get from one booking\n to another. Basically acts as a suffix to each booking, extending\n it's end time by n minutes.\n\n :alignment:\n Align the date range to the given value. Currently only 'day' is\n supported. When an alignment is active, all bookings are internally\n stretched to at least cover the alignment.\n\n For example, if 'day' is given, a booking that lasts 4 hours is\n considered to last the whole day and it will block out bookings\n on the same day.\n\n Note that the ``minutes_between`` parameter is independent of this.\n That is if there's 90 minutes between bookigns and the bookings are\n aligned to the day, there can only be a booking every other day::\n\n 10:00 - 19:00 becomes 00:00 - 24:00 + 90mins.\n\n Usually you probably do not want minutes_between combined with\n an alignment.\n\n \"\"\"\n assert alignment in (None, 'day')\n\n if sort_bookings:\n bookings = sorted(bookings, key=lambda b: b.attendee_id)\n\n attendee_limits = attendee_limits or {}\n\n # pre-calculate the booking scores\n score_function = score_function or Scoring()\n\n for booking in bookings:\n booking.score = score_function(booking)\n\n # after the booking score has been calculated, the scoring function\n # should no longer be used for performance reasons\n score_function = None\n\n occasions = {o.id: OccasionAgent(o) for o in occasions}\n\n attendees = {\n aid: AttendeeAgent(\n aid,\n limit=attendee_limits.get(aid, default_limit),\n bookings=bookings,\n minutes_between=minutes_between,\n alignment=alignment\n )\n for aid, bookings in groupby(bookings, key=lambda b: b.attendee_id)\n }\n\n # I haven't proven yet that the following loop will always end. Until I\n # do there's a fallback check to make sure that we'll stop at some point\n budget = LoopBudget(max_ticks=len(bookings) * len(attendees))\n\n # while there are attendees with entries in a wishlist\n while next((a for a in attendees.values() if a.wishlist), None):\n\n if budget.limit_reached(as_exception=hard_budget):\n break\n\n candidates = [a for a in attendees.values() if a.wishlist]\n matched = 0\n\n # match attendees to courses\n while candidates:\n candidate = candidates.pop()\n\n for booking in candidate.wishlist:\n if occasions[booking.occasion_id].match(candidate, booking):\n matched += 1\n break # required because the wishlist has been changed\n\n # if no matches were possible the situation can't be improved\n if not matched:\n break\n\n # make sure the algorithm didn't make any mistakes\n if validity_check:\n for a in attendees.values():\n assert a.is_valid\n\n # make sure the result is stable\n if stability_check:\n assert is_stable(attendees.values(), occasions.values())\n\n return Bunch(\n open=set(b for a in attendees.values() for b in a.wishlist),\n accepted=set(b for a in attendees.values() for b in a.accepted),\n blocked=set(b for a in attendees.values() for b in a.blocked)\n )\n\n\ndef deferred_acceptance_from_database(session, period_id, **kwargs):\n period = session.query(Period).filter(Period.id == period_id).one()\n\n b = session.query(Booking)\n b = b.options(joinedload(Booking.occasion))\n b = b.filter(Booking.period_id == period_id)\n b = b.filter(Booking.state != 'cancelled')\n b = b.filter(Booking.created >= period.created)\n b = b.order_by(Booking.attendee_id)\n\n o = session.query(Occasion)\n o = o.filter(Occasion.period_id == period_id)\n o = o.options(\n defer('meeting_point'),\n defer('note'),\n defer('cost')\n )\n\n if period.max_bookings_per_attendee:\n default_limit = period.max_bookings_per_attendee\n attendee_limits = None\n else:\n default_limit = None\n attendee_limits = {\n a.id: a.limit for a in\n session.query(Attendee.id, Attendee.limit)\n }\n\n # fetch it here as it'll be reused multiple times\n bookings = list(b)\n\n results = deferred_acceptance(\n bookings=bookings, occasions=o,\n default_limit=default_limit, attendee_limits=attendee_limits,\n minutes_between=period.minutes_between, alignment=period.alignment,\n sort_bookings=False, **kwargs)\n\n # write the changes to the database\n def update_bookings(targets, state):\n q = session.query(Booking)\n q = q.filter(Booking.state != state)\n q = q.filter(Booking.state != 'cancelled')\n q = q.filter(Booking.period_id == period_id)\n q = q.filter(Booking.id.in_(t.id for t in targets))\n\n for booking in q:\n booking.state = state\n\n with session.no_autoflush:\n update_bookings(results.open, 'open')\n update_bookings(results.accepted, 'accepted')\n update_bookings(results.blocked, 'blocked')\n\n\ndef is_stable(attendees, occasions):\n \"\"\" Returns true if the matching between attendees and occasions is\n stable.\n\n This runs in O(n^4) time, where n is the combination of\n bookings and occasions. So this is a testing tool, not something to\n run in production.\n\n \"\"\"\n\n for attendee in attendees:\n for booking in attendee.accepted:\n for occasion in occasions:\n\n # the booking was actually accepted, skip\n if booking in occasion.bookings:\n continue\n\n # if the current occasion prefers the given booking..\n over = occasion.preferred(booking)\n\n if over:\n for o in occasions:\n if o == occasion:\n continue\n\n # ..and another occasion prefers the loser..\n switch = o.preferred(over)\n\n # .. we have an unstable matching\n if switch and occasion.preferred(switch):\n return False\n\n return True\n"},"avg_line_length":{"kind":"number","value":33.8142493639,"string":"33.814249"},"max_line_length":{"kind":"number","value":78,"string":"78"},"alphanum_fraction":{"kind":"number","value":0.6363157499,"string":"0.636316"}}},{"rowIdx":46276,"cells":{"hexsha":{"kind":"string","value":"88171ad87dc7b1072f2705b938c97bec9545041c"},"size":{"kind":"number","value":10749,"string":"10,749"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"AustinBot/all_cogs/boardgame.py"},"max_stars_repo_name":{"kind":"string","value":"austinmh12/DiscordBots"},"max_stars_repo_head_hexsha":{"kind":"string","value":"55550b68a7ad6423de55e62dbbff93fd88f08ff2"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"AustinBot/all_cogs/boardgame.py"},"max_issues_repo_name":{"kind":"string","value":"austinmh12/DiscordBots"},"max_issues_repo_head_hexsha":{"kind":"string","value":"55550b68a7ad6423de55e62dbbff93fd88f08ff2"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"AustinBot/all_cogs/boardgame.py"},"max_forks_repo_name":{"kind":"string","value":"austinmh12/DiscordBots"},"max_forks_repo_head_hexsha":{"kind":"string","value":"55550b68a7ad6423de55e62dbbff93fd88f08ff2"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from . import log, BASE_PATH, Page, MyCog, chunk\nfrom discord import File\nfrom discord.ext import commands, tasks\nimport asyncio\nfrom PIL import Image, ImageDraw, ImageFont\nfrom random import randint\nimport typing\nfrom . import boardgameFunctions as BGF\nfrom .boardgameFunctions import yahtzee\n\n# Version\nversion = '1.0.0'\n\n# Constants\n\n# Functions\n\n# Classes\nclass BoardGameCog(MyCog):\n\tavailable_games = ['yahtzee']\n\n\tdef __init__(self, bot):\n\t\tsuper().__init__(bot)\n\t\tself.yahtzee_game = None\n\t\tself.iniatited_games = {\n\t\t\t'yahtzee': {'owner': None, 'players': []}\n\t\t}\n\n\t# Functions\n\tdef initiate_game(self, game, user_id):\n\t\tself.iniatited_games[game]['owner'] = user_id\n\t\tself.iniatited_games[game]['players'].append(user_id)\n\n\tdef add_player(self, game, user_id):\n\t\tself.iniatited_games[game]['players'].append(user_id)\n\n\tdef remove_player(self, game, user_id):\n\t\tself.iniatited_games[game]['players'].pop(self.iniatited_games[game]['players'].index(user_id))\n\t\tif user_id == self.iniatited_games[game]['owner']:\n\t\t\tif self.iniatited_games[game]['players']:\n\t\t\t\tself.iniatited_games[game]['owner'] = self.iniatited_games[game]['players'][0]\n\t\t\telse:\n\t\t\t\tself.iniatited_games[game]['owner'] = None\n\n\t# Commands\n\t@commands.command(name='games',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='View all the available board games',\n\t\t\t\t\tbrief='View board games')\n\tasync def games(self, ctx):\n\t\tdesc = 'Welcome to the Board Game Plaza! Here you can view all the available\\n'\n\t\tdesc += 'board games. To initiate, or join, a board game, use **.**\\n'\n\t\tdesc += 'Once all the players who want to play have joined, the **owner** of\\n'\n\t\tdesc += 'the game instance can start the game with **. start**\\n\\n'\n\t\tfor game in __class__.available_games:\n\t\t\tdesc += f'***{game}***\\n'\n\t\treturn await self.paginated_embeds(ctx, Page('Board Games Plaza', desc))\n\n\t###########\n\t# Yahtzee #\n\t###########\n\t@commands.group(name='yahtzee',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tinvoke_without_command=True,\n\t\t\t\t\tdescription='Initiate a game of Yahtzee',\n\t\t\t\t\tbrief='Yahtzee',\n\t\t\t\t\taliases=['y'])\n\tasync def yahtzee_main(self, ctx):\n\t\tif self.yahtzee_game:\n\t\t\treturn await ctx.send('A game of Yahtzee is already ongoing.')\n\t\tyahtzee_info = self.iniatited_games['yahtzee']\n\t\tif not yahtzee_info['owner']:\n\t\t\tself.initiate_game('yahtzee', ctx.author.id)\n\t\t\treturn await ctx.send(f'A game of Yahtzee has been initiated by <@{ctx.author.id}>')\n\t\tif ctx.author.id in yahtzee_info['players']:\n\t\t\tself.remove_player('yahtzee', ctx.author.id)\n\t\t\tawait ctx.send('You have left the game.')\n\t\t\tif not yahtzee_info['owner']:\n\t\t\t\treturn await ctx.send('The game of Yahtzee has been canceled.')\n\t\tself.add_player('yahtzee', ctx.author.id)\n\t\treturn await ctx.send('You have joined the game of Yahtzee')\n\n\t@yahtzee_main.command(name='start',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Start a game that you initiated',\n\t\t\t\t\tbrief='Starts the game')\n\tasync def yahtzee_start(self, ctx):\n\t\tif self.yahtzee_game:\n\t\t\treturn await ctx.send('A game of Yahtzee is already ongoing.')\n\t\tyahtzee_info = self.iniatited_games['yahtzee']\n\t\tif yahtzee_info['owner'] == ctx.author.id:\n\t\t\tself.yahtzee_game = yahtzee.YahtzeeGame(yahtzee_info['players'])\n\t\t\treturn await ctx.send('The game of Yahtzee has started')\n\t\treturn await ctx.send('You didn\\'t initiate a game of Yahtzee.')\n\n\t@yahtzee_main.command(name='end',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='End a game that you initiated',\n\t\t\t\t\tbrief='Ends the game')\n\tasync def yahtzee_end(self, ctx):\n\t\tyahtzee_info = self.iniatited_games['yahtzee']\n\t\tif yahtzee_info['owner'] == ctx.author.id:\n\t\t\tself.yahtzee_game = None\n\t\t\tself.iniatited_games['yahtzee'] = {'owner': None, 'players': []}\n\t\t\treturn await ctx.send('The game of Yahtzee has been ended')\n\t\treturn await ctx.send('You didn\\'t initiate this game.')\n\n\t@yahtzee_main.command(name='roll',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Rolls the yahtzee dice',\n\t\t\t\t\tbrief='Rolls yahtzee dice')\n\tasync def yahtzee_roll(self, ctx):\n\t\tif not self.yahtzee_game:\n\t\t\treturn await ctx.send('There is no Yahtzee game ongoing.')\n\t\tif ctx.author.id != self.yahtzee_game.current_player.id:\n\t\t\treturn await ctx.send('It is not your turn.')\n\t\tif self.yahtzee_game.current_player.remaining_rolls == 0:\n\t\t\treturn await ctx.send('You have no rolls left, use **.yahtzee score **')\n\t\tdice_str = f'{5 - len(self.yahtzee_game.current_player.held_dice)}d6'\n\t\troll_results = BGF.roll_dice(dice_str)\n\t\tself.yahtzee_game.current_player.last_roll = roll_results\n\t\tself.yahtzee_game.current_player.remaining_rolls -= 1\n\t\tself.yahtzee_game.current_player.held_this_turn = False\n\t\treturn await ctx.send(f'You rolled:\\n{\" \".join([str(r) for r in roll_results])}')\n\n\t@yahtzee_main.command(name='hold',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Holds yahtzee dice',\n\t\t\t\t\tbrief='Holds yahtzee dice')\n\tasync def yahtzee_hold(self, ctx, *positions):\n\t\tif not self.yahtzee_game:\n\t\t\treturn await ctx.send('There is no Yahtzee game ongoing.')\n\t\tif ctx.author.id != self.yahtzee_game.current_player.id:\n\t\t\treturn await ctx.send('It is not your turn.')\n\t\tif self.yahtzee_game.current_player.held_this_turn:\n\t\t\treturn await ctx.send('You held dice this turn already, roll again with **.yahtzee roll**')\n\t\tif not positions:\n\t\t\tmsg = f'Your last roll was {\" \".join([str(r) for r in self.yahtzee_game.current_player.last_roll])}\\n'\n\t\t\tmsg += f'Your current held dice are {\" \".join([str(r) for r in self.yahtzee_game.current_player.held_dice])}'\n\t\t\treturn await ctx.send(msg)\n\t\tpositions = list(positions)\n\t\tpositions.sort(reverse=True)\n\t\tfor position in positions:\n\t\t\tif int(position) == 0:\n\t\t\t\tbreak\n\t\t\tself.yahtzee_game.current_player.held_dice.append(self.yahtzee_game.current_player.last_roll[int(position) - 1])\n\t\t\tself.yahtzee_game.current_player.last_roll.pop(int(position) - 1)\n\t\tself.yahtzee_game.current_player.held_this_turn = True\n\t\treturn await ctx.send(f'You hold {\" \".join([str(r) for r in self.yahtzee_game.current_player.held_dice])}')\n\n\t@yahtzee_main.command(name='score',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score(self, ctx, category: typing.Optional[str] = ''):\n\t\tif not self.yahtzee_game:\n\t\t\treturn await ctx.send('There is no Yahtzee game ongoing.')\n\t\tif ctx.author.id != self.yahtzee_game.current_player.id:\n\t\t\treturn await ctx.send('It is not your turn.')\n\t\tif category not in yahtzee.top_categories and category not in yahtzee.bottom_categories:\n\t\t\tf = self.yahtzee_game.current_player.get_board()\n\t\t\tcats = ' '.join([f'***{c}***' for c in self.yahtzee_game.current_player.unscored_categories])\n\t\t\tawait ctx.send(f'These are the categories that you haven\\'t used\\n{cats}', file=f)\n\t\t\treturn f.close()\n\t\tself.yahtzee_game.current_player.calculate_score(category)\n\t\tf = self.yahtzee_game.current_player.get_board()\n\t\tawait ctx.send('Here is your score card', file=f)\n\t\tf.close()\n\t\tself.yahtzee_game.next_player()\n\t\tif self.yahtzee_game.game_done:\n\t\t\treturn await ctx.send(f'Game over! <@{self.yahtzee_game.winner.id}> wins!')\n\t\treturn await ctx.send(f'It\\'s now <@{self.yahtzee_game.current_player.id}>\\'s turn!')\n\n\t@yahtzee_main.command(name='1s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_1s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '1s')\n\n\t@yahtzee_main.command(name='2s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_2s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '2s')\n\n\t@yahtzee_main.command(name='3s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_3s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '3s')\n\n\t@yahtzee_main.command(name='4s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_4s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '4s')\n\n\t@yahtzee_main.command(name='5s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_5s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '5s')\n\n\t@yahtzee_main.command(name='6s',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_6s(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '6s')\n\n\t@yahtzee_main.command(name='3kind',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_3kind(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '3kind')\n\n\t@yahtzee_main.command(name='4kind',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_4kind(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, '4kind')\n\n\t@yahtzee_main.command(name='fullhouse',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_fullhouse(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, 'fullhouse')\n\n\t@yahtzee_main.command(name='small',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_small(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, 'small')\n\n\t@yahtzee_main.command(name='large',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_large(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, 'large')\n\n\t@yahtzee_main.command(name='yahtzee',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_yahtzee(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, 'yahtzee')\n\n\t@yahtzee_main.command(name='chance',\n\t\t\t\t\tpass_context=True,\n\t\t\t\t\tdescription='Calculates the score for the category chosen using your held dice.',\n\t\t\t\t\tbrief='Scores your held dice')\n\tasync def yahtzee_score_chance(self, ctx):\n\t\treturn await self.yahtzee_score(ctx, 'chance')"},"avg_line_length":{"kind":"number","value":40.8707224335,"string":"40.870722"},"max_line_length":{"kind":"number","value":115,"string":"115"},"alphanum_fraction":{"kind":"number","value":0.7246255466,"string":"0.724626"}}},{"rowIdx":46277,"cells":{"hexsha":{"kind":"string","value":"888f2477c245e43c2f99afc8f694b43d8280fa8a"},"size":{"kind":"number","value":627,"string":"627"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"DataCollections/Twitter/CTweets.py"},"max_stars_repo_name":{"kind":"string","value":"moasgh/BumbleBee"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2b0aae7970ab316c7b8b12dd4032b41ee1772aad"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-03-06T05:53:43.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-30T17:31:18.000Z"},"max_issues_repo_path":{"kind":"string","value":"DataCollections/Twitter/CTweets.py"},"max_issues_repo_name":{"kind":"string","value":"moasgh/BumbleBee"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2b0aae7970ab316c7b8b12dd4032b41ee1772aad"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"DataCollections/Twitter/CTweets.py"},"max_forks_repo_name":{"kind":"string","value":"moasgh/BumbleBee"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2b0aae7970ab316c7b8b12dd4032b41ee1772aad"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import re\r\n\r\n\r\ndef clean(text):\r\n ctext = clean_urls(text)\r\n ctext = clean_references(ctext)\r\n ctext = clean_nonechars(ctext)\r\n ctext = ctext.strip()\r\n return ctext\r\n\r\n\r\ndef clean_urls(text):\r\n return re.sub('https?://[A-Za-z0-9./]+', '', text)\r\n\r\n\r\ndef clean_references(text):\r\n return re.sub('@[A-Za-z0-9]+', '', text)\r\n\r\n\r\ndef clean_nonechars(text):\r\n return re.sub('[^a-zA-Z0-9|\\s|#]', '', text)\r\n\r\n\r\ndef clean_hashtags(text):\r\n return re.sub('#', '', text)\r\n\r\n\r\ndef clean_numbers(text):\r\n return re.sub('[0-9]+','',text)\r\n\r\n\r\ndef clean_retweets_char(text):\r\n return re.sub('rt ','',text)\r\n"},"avg_line_length":{"kind":"number","value":18.4411764706,"string":"18.441176"},"max_line_length":{"kind":"number","value":55,"string":"55"},"alphanum_fraction":{"kind":"number","value":0.5789473684,"string":"0.578947"}}},{"rowIdx":46278,"cells":{"hexsha":{"kind":"string","value":"88a218ca3c0616a3dd8b446bc63c37ba8e4a019b"},"size":{"kind":"number","value":3201,"string":"3,201"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"backend/app/crud/crud_base_task.py"},"max_stars_repo_name":{"kind":"string","value":"jinnn-dev/patholearn"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-11-04T17:06:07.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-04T17:06:07.000Z"},"max_issues_repo_path":{"kind":"string","value":"backend/app/crud/crud_base_task.py"},"max_issues_repo_name":{"kind":"string","value":"JamesNeumann/learning-by-annotations"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":21,"string":"21"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-11-01T10:13:56.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-12-02T10:02:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"backend/app/crud/crud_base_task.py"},"max_forks_repo_name":{"kind":"string","value":"jinnn-dev/patholearn"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-12-16T18:20:55.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-12-16T18:20:55.000Z"},"content":{"kind":"string","value":"from typing import List, Optional\r\n\r\nfrom sqlalchemy.orm import Session\r\n\r\nfrom app.crud.base import CRUDBase\r\nfrom app.models.base_task import BaseTask\r\nfrom app.schemas.base_task import BaseTaskUpdate, BaseTaskCreate\r\n\r\n\r\nclass CRUDBaseTask(CRUDBase[BaseTask, BaseTaskCreate, BaseTaskUpdate]):\r\n def get_multi_by_task_group(\r\n self, db: Session, *, task_group_id: int\r\n ) -> List[BaseTask]:\r\n \"\"\"\r\n Returns all BaseTasks to the given TaskGroup.\r\n\r\n :param db: DB-Session\r\n :param task_group_id: id of the TaskGroup\r\n :return: All found TaskGroups\r\n \"\"\"\r\n return (\r\n db.query(self.model).filter(BaseTask.task_group_id == task_group_id).all()\r\n )\r\n\r\n def get_multi_with_no_task_group(\r\n self, db: Session, *, course_id: int\r\n ) -> List[BaseTask]:\r\n \"\"\"\r\n Returns all BaseTasks without a TaskGroup to the given Course.\r\n\r\n :param db: DB-Session\r\n :param course_id: id of the Course\r\n :return: All found Courses\r\n \"\"\"\r\n return (\r\n db.query(self.model)\r\n .filter(BaseTask.course_id == course_id)\r\n .filter(BaseTask.task_group_id.is_(None))\r\n .all()\r\n )\r\n\r\n def get_by_short_name(self, db: Session, *, short_name: str) -> BaseTask:\r\n \"\"\"\r\n Returns the TaskGroup to the Shortname.\r\n\r\n :param db: DB-Session\r\n :param short_name: Shortname of the TaskGroup\r\n :return: The found TaskGroup\r\n \"\"\"\r\n return db.query(self.model).filter(BaseTask.short_name == short_name).first()\r\n\r\n def create_with_slide_id(self, db: Session, *, task_in: BaseTaskCreate) -> BaseTask:\r\n \"\"\"\r\n Creates a new TaskGroup.\r\n\r\n :param db: DB-Session\r\n :param task_in: contains all information to create a new BaseTask\r\n :return: the created BaseTask\r\n \"\"\"\r\n db_obj = BaseTask()\r\n db_obj.task_group_id = task_in.task_group_id\r\n db_obj.slide_id = task_in.slide_id\r\n db_obj.name = task_in.name\r\n db.add(db_obj)\r\n db.commit()\r\n db_obj = db.refresh(db_obj)\r\n return db_obj\r\n\r\n def get_by_name(\r\n self, db: Session, name: str, task_group_id: int\r\n ) -> Optional[BaseTask]:\r\n \"\"\"\r\n Returns the BaseTask with the given name to a TaskGroup\r\n\r\n :param db: DB-Session\r\n :param name: Name of the BaseTask\r\n :param task_group_id: Id of the TaskGroup\r\n :return: the found BaseTask\r\n \"\"\"\r\n return (\r\n db.query(self.model)\r\n .filter(BaseTask.name == name)\r\n .filter(BaseTask.task_group_id == task_group_id)\r\n .first()\r\n )\r\n\r\n def base_task_uses_slide(self, db: Session, slide_id: str) -> bool:\r\n \"\"\"\r\n Checks if a base task uses the given slide\r\n\r\n :param db: DB-Session\r\n :param slide_id: ID of the Slide\r\n :return: If the slide is used by any base task\r\n \"\"\"\r\n return (\r\n db.query(self.model).filter(BaseTask.slide_id == slide_id).first()\r\n is not None\r\n )\r\n\r\n\r\ncrud_base_task = CRUDBaseTask(BaseTask)\r\n"},"avg_line_length":{"kind":"number","value":31.3823529412,"string":"31.382353"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.5870040612,"string":"0.587004"}}},{"rowIdx":46279,"cells":{"hexsha":{"kind":"string","value":"14fac4dfc6d2a571f44cfcd35f51402f4d2edf5f"},"size":{"kind":"number","value":722,"string":"722"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"ANN/past_works/trapezoidal.py"},"max_stars_repo_name":{"kind":"string","value":"joao-frohlich/BCC"},"max_stars_repo_head_hexsha":{"kind":"string","value":"9ed74eb6d921d1280f48680677a2140c5383368d"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-12-08T20:18:15.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-06-07T20:00:07.000Z"},"max_issues_repo_path":{"kind":"string","value":"ANN/past_works/trapezoidal.py"},"max_issues_repo_name":{"kind":"string","value":"joao-frohlich/BCC"},"max_issues_repo_head_hexsha":{"kind":"string","value":"9ed74eb6d921d1280f48680677a2140c5383368d"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":2,"string":"2"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-06-28T03:42:13.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-06-28T16:53:13.000Z"},"max_forks_repo_path":{"kind":"string","value":"ANN/past_works/trapezoidal.py"},"max_forks_repo_name":{"kind":"string","value":"joao-frohlich/BCC"},"max_forks_repo_head_hexsha":{"kind":"string","value":"9ed74eb6d921d1280f48680677a2140c5383368d"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-01-14T19:59:20.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-06-15T11:53:21.000Z"},"content":{"kind":"string","value":"from math import tan\n\n\"\"\"\nFuncionamento\n O algoritmo implementado funciona da seguinte maneira\n É necessário definir a função, os intervalos inferioes e superiores e por fim\n quantidade de iterações\n\n a = limite inferior\n b = limite superior\n n = iterações\n f(x) = função\n\"\"\"\n\n\ndef calcula_integral(f, a, b, n):\n h = (b - a) / n\n s = (f(a) + f(b)) / 2\n for i in range(1, n):\n s += f(a + i * h)\n return h * s\n\n\ndef f(x):\n res = x + tan(tan(abs(1 / 4 * x - 7 / 4))) # alterar funcao\n return res\n\n\na = 3\nb = 11\nn = 19\nresultado = calcula_integral(f, a, b, n)\nprint(\"Integral de f(x) = FUNCAO_AQUI, de %d até %d\" % (a, b))\nprint(\"I ~ %.2f\" % resultado)\nprint(\"%d Iterações\" % n)\n"},"avg_line_length":{"kind":"number","value":20.0555555556,"string":"20.055556"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.5831024931,"string":"0.583102"}}},{"rowIdx":46280,"cells":{"hexsha":{"kind":"string","value":"092e558882814fb270ae1b63b10c91a2f1f27c7c"},"size":{"kind":"number","value":10513,"string":"10,513"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"generate_plots.py"},"max_stars_repo_name":{"kind":"string","value":"silberzwiebel/klimawatch"},"max_stars_repo_head_hexsha":{"kind":"string","value":"fbb5b98e70080581c40821aa8f112c041c853bad"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"generate_plots.py"},"max_issues_repo_name":{"kind":"string","value":"silberzwiebel/klimawatch"},"max_issues_repo_head_hexsha":{"kind":"string","value":"fbb5b98e70080581c40821aa8f112c041c853bad"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"generate_plots.py"},"max_forks_repo_name":{"kind":"string","value":"silberzwiebel/klimawatch"},"max_forks_repo_head_hexsha":{"kind":"string","value":"fbb5b98e70080581c40821aa8f112c041c853bad"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-02-07T09:21:59.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2020-02-07T09:21:59.000Z"},"content":{"kind":"string","value":"# plots\nimport plotly.graph_objects as go\n# make it easier with numeric values\nimport pandas\nimport numpy as np\n# for computing the trend\nfrom scipy.stats import linregress\n# reading command line arguments\nimport sys\n# writing json\nimport json\n# wrapping long lines\nimport textwrap\n# possibility to delete files\nimport os\n\n# read data\nif(len(sys.argv) == 1):\n print(\"No city given, plotting data for Münster ('data/muenster.csv')\")\n city = \"muenster\"\n df = pandas.read_csv(\"data/muenster.csv\")\nelse:\n print(\"Plotting data for\", sys.argv[1])\n city = sys.argv[1]\n try:\n df = pandas.read_csv(\"data/\" + city + \".csv\")\n except:\n print(\"File not found. Does the file data/\", city + \".csv\", \"exist?\")\n exit();\n\n# create plot\nfig = go.Figure()\n\nemission_1990 = {}\n\n# compute category-wise percentage (compared to 1990)\nfor cat in set(df.category):\n if(cat != \"Einwohner\"):\n emission_1990[str(cat)] = float(df[(df.year == 1990) & (df.category == cat) & (df.type == \"real\")].value)\n\n df.loc[df.category == cat, 'percentage'] = df[df.category == cat].value.astype(float) / emission_1990[str(cat)]\n\n# set() only lists unique values\n# this loop plots all categories present in the csv, if type is either \"real\" or \"geplant\"\nfor cat in set(df.category):\n subdf = df[(df.category == cat) & (df.type != \"Einwohner\")]\n\n subdf_real = subdf[subdf.type == \"real\"]\n\n fig.add_trace(go.Scatter(x = subdf_real.year, y = subdf_real.value,\n name = cat + \", real\", mode = \"lines+markers\",\n legendgroup = cat,\n text = subdf_real.percentage,\n hovertemplate =\n \"tatsächliche Emissionen, Kategorie: \" + cat +\n \" Jahr: %{x} \" +\n \"CO2-Emissionen (tausend Tonnen): %{y:.1f} \" +\n \"Prozent von Emissionen 1990: \" + \"%{text:.0%}\" +\n \"\") # no additional legend text in tooltip\n )\n\n subdf_planned = subdf[subdf.type == \"geplant\"]\n fig.add_trace(go.Scatter(x = subdf_planned.year, y = subdf_planned.value, name = cat + \", geplant\",\n mode = \"lines+markers\", line = dict(dash = \"dash\"),\n legendgroup = cat,\n text = subdf_planned.percentage,\n hovertemplate =\n \"geplante Emissionen, Kategorie: \" + cat +\n \" Jahr: %{x} \" +\n \"CO2-Emissionen (tausend Tonnen): %{y:.1f} \" +\n \"Prozent von Emissionen 1990: \" + \"%{text:.0%}\" +\n \"\") # no additional legend text in tooltip\n )\n\n# compute trend based on current data\nsubdf = df[df.category == \"Gesamt\"]\nsubdf_real = subdf[subdf.type == \"real\"]\n\n# variables to write to JSON later on\nyears_past_total_real = list(subdf_real.year)\nvalues_past_total_real = list(subdf_real.value)\n\nslope, intercept, r, p, stderr = linregress(subdf_real.year, subdf_real.value)\n# print info about trend\nprint(\"linearer Trend: Steigung: \", slope, \"Y-Achsenabschnitt: \", intercept, \"R^2: \", r)\n\n# plot trend\nfig.add_trace(go.Scatter(x = subdf.year, y = slope * subdf.year + intercept, name = \"Trend\",\n mode = \"lines\", line = dict(dash = \"dot\"),\n legendgroup = \"future\",\n text = (slope * subdf.year + intercept) / emission_1990[\"Gesamt\"],\n hovertemplate =\n \"bisheriger Trend\" +\n \" Jahr: %{x} \" +\n \"CO2-Emissionen (tausend Tonnen): %{y:.1f} \" +\n \"Prozent von Emissionen 1990: \" + \"%{text:.0%}\" +\n \"\") # no additional legend text in tooltip\n )\n\n\n# compute remaining paris budget\nlast_emissions = np.array(df[df.note == \"last_emissions\"].value)\n# see https://scilogs.spektrum.de/klimalounge/wie-viel-co2-kann-deutschland-noch-ausstossen/\nparis_budget_germany_2019 = 7300000\ninhabitants_germany = 83019213\nparis_budget_per_capita_2019 = paris_budget_germany_2019 / inhabitants_germany\nparis_budget_full_city_2019 = paris_budget_per_capita_2019 * np.array(df[df.type == \"Einwohner\"].value)\n# substract individual CO2 use; roughly 40%, see https://uba.co2-rechner.de/\nparis_budget_wo_individual_city_2019 = paris_budget_full_city_2019 * 0.6\n# substract already emitted CO2 from 2019 onwards; assume last measured budget is 2019 emission\nparis_budget_wo_individual_city_2020 = paris_budget_wo_individual_city_2019 - last_emissions\n\n# compute slope for linear reduction of paris budget\nparis_slope = (-pow(last_emissions, 2)) / (2 * paris_budget_wo_individual_city_2020)\nyears_to_climate_neutral = - last_emissions / paris_slope\nfull_years_to_climate_neutral = int(np.round(years_to_climate_neutral))\n\n# plot paris line\nfuture = list(range(0, full_years_to_climate_neutral, 1)) # from 2020 to 2050\nfuture.append(float(years_to_climate_neutral))\n\n# TODO: make df instead of (double) calculation inline?\nfig.add_trace(go.Scatter(x = np.array(future) + 2020, y = paris_slope * np.array(future) + last_emissions,\n name = \"Paris berechnet\",\n mode = \"lines+markers\", line = dict(dash = \"dash\"),\n legendgroup = \"future\",\n text = (paris_slope * np.array(future) + last_emissions) / emission_1990[\"Gesamt\"],\n hovertemplate =\n \"Paris-Budget\" +\n \" Jahr: %{x:.0f} \" +\n \"CO2-Emissionen (tausend Tonnen): %{y:.1f} \" +\n \"Prozent von Gesamt-Emissionen 1990: \" + \"%{text:.0%}\" +\n \"\") # no additional legend text in tooltip\n )\n\nfig.add_trace(go.Scatter(\n x = [2020],\n y = [emission_1990[\"Gesamt\"] + (emission_1990[\"Gesamt\"] / 30)],\n mode = \"text\",\n text = \"heute\",\n hoverinfo=\"none\",\n showlegend = False)\n)\n\n# horizontal legend; vertical line at 2020\nfig.update_layout(\n title = \"Realität und Ziele\",\n yaxis_title = \"CO2 in tausend Tonnen\",\n xaxis_title = \"Jahr\",\n # horizontal legend\n legend_orientation = \"h\",\n # put legend above plot to avoid overlapping-bug\n legend_xanchor = \"center\",\n legend_y = -0.25,\n legend_x = 0.5,\n legend_font_size = 10,\n # disable dragmode for better mobile experience\n dragmode = False,\n # German number separators\n separators = \",.\",\n # vertical \"today\" line\n shapes = [\n go.layout.Shape(\n type = \"line\",\n x0 = 2020,\n y0 = 0,\n x1 = 2020,\n y1 = emission_1990[\"Gesamt\"],\n )]\n )\n\n# write plot to file\nfig.write_html(\"hugo/layouts/shortcodes/paris_\" + city + \".html\", include_plotlyjs = False,\n config = {'displayModeBar': False}, full_html = False, auto_open = True)\n\n# write computed Paris budget to JSON file for you-draw-it\n\nparis_data = { }\n\nparis_data['chart_id'] = 'you-draw-it'\n\nparis_data['chart'] = {\n 'heading': 'Wie sollte sich der CO2-Ausstoß entwickeln?',\n 'lastPointShownAt': 2020,\n 'y_unit': 't. T.',\n 'data': [] }\n\n# past data\n\npast = range(1990, 2020, 5)\n\nfor y in past:\n try:\n yidx = years_past_total_real.index(y)\n paris_data[\"chart\"][\"data\"].append({\n y: values_past_total_real[yidx]\n })\n except ValueError:\n print(\"You-draw-it-chart: Emissions for\", y, \"unknown. Estimating from the trend.\")\n paris_data[\"chart\"][\"data\"].append({\n y: slope * y + intercept\n })\n\n# years with remaining budget\nparis_years = list(np.array(future[:-1]) + 2020)\nbudget_per_year = list(paris_slope * np.array(future[:-1]) + last_emissions)\n\nfor y in range(len(paris_years)):\n if y % 5 == 0: # print only every 5th year\n paris_data[\"chart\"][\"data\"].append({\n int(paris_years[y]): budget_per_year[y]\n })\n\nclimate_neutral_by = int(np.round(max(paris_years)))\n# range every climate-neutral year, because\n# we don't know the climate-neutral year and can't do 5-year steps\nyears_after_budget = range(climate_neutral_by + 1, 2051, 1)\n\nfor y in years_after_budget:\n if y % 5 == 0: # print only every 5th year\n paris_data[\"chart\"][\"data\"].append({\n y: 0\n })\n\nwith open(\"hugo/data/you_draw_it_\" + city + \".json\", \"w\", encoding='utf8') as outfile:\n json.dump(paris_data, outfile, indent = 2, ensure_ascii=False)\n\n## visualisation of status of modules of Klimaschutzkonzepte\n\ntry:\n modules_df = pandas.read_csv(\"data/\" + city + \"_sachstand.csv\")\nexcept:\n print(\"Sachstand file for \" + city + \" (data/\" + city + \"_sachstand.csv) not found. Not creating module plot.\")\n exit();\n\n# find unique overarching categories (here: first character of ID)\ncategories = set()\nfor c in modules_df[\"id\"]:\n categories.add(c[0:1])\n\n## create a single treemap plot for every overarching category\n\n# delete old plot file\nos.remove(\"hugo/layouts/shortcodes/modules_\" + city + \".html\")\nmodules_plot_file = open(\"hugo/layouts/shortcodes/modules_\" + city + \".html\", \"a\")\n\n\nfor cat in categories:\n\n modules_onecat = modules_df[modules_df.id.str.startswith(cat)]\n\n fig_modules = go.Figure(go.Treemap(\n branchvalues = \"remainder\",\n ids = modules_onecat[\"id\"],\n labels = \"\" + modules_onecat[\"title\"] + \" (\" + modules_onecat[\"id\"] + \")\",\n parents = modules_onecat[\"parent\"],\n values = modules_onecat[\"priority\"],\n marker_colors = modules_onecat[\"assessment\"],\n text = (modules_onecat[\"text\"]).apply(lambda txt: ' '.join(textwrap.wrap(txt, width = 100))),\n textinfo = \"label+text\",\n hovertext = (modules_onecat[\"text\"] + \" (\" + modules_onecat[\"id\"] + \")\"\n \" Priorität: \" + (modules_onecat[\"priority\"]).astype(str) +\n \" Potential: \" + (modules_onecat[\"potential\"]).astype(str)).apply(lambda txt: ' '.join(textwrap.wrap(txt, width = 100))),\n hoverinfo = \"text\",\n pathbar = {\"visible\": True},\n insidetextfont = {\"size\": 75}\n )\n )\n\n fig_modules.update_layout(\n margin = dict(r=10, l=10)\n # ~ height = 750\n )\n\n modules_plot_file.write(fig_modules.to_html(include_plotlyjs = False,\n config={'displayModeBar': False}, full_html = False))\n\n\nmodules_plot_file.close()\n"},"avg_line_length":{"kind":"number","value":37.5464285714,"string":"37.546429"},"max_line_length":{"kind":"number","value":139,"string":"139"},"alphanum_fraction":{"kind":"number","value":0.6126700276,"string":"0.61267"}}},{"rowIdx":46281,"cells":{"hexsha":{"kind":"string","value":"117a2a307d601c9f7de185e43945911010ddb280"},"size":{"kind":"number","value":1573,"string":"1,573"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"test/test_cashflow.py"},"max_stars_repo_name":{"kind":"string","value":"scuervo91/dcapy"},"max_stars_repo_head_hexsha":{"kind":"string","value":"46c9277e607baff437e5707167476d5f7e2cf80c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-05-21T13:26:10.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-15T17:17:01.000Z"},"max_issues_repo_path":{"kind":"string","value":"test/test_cashflow.py"},"max_issues_repo_name":{"kind":"string","value":"scuervo91/dcapy"},"max_issues_repo_head_hexsha":{"kind":"string","value":"46c9277e607baff437e5707167476d5f7e2cf80c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"test/test_cashflow.py"},"max_forks_repo_name":{"kind":"string","value":"scuervo91/dcapy"},"max_forks_repo_head_hexsha":{"kind":"string","value":"46c9277e607baff437e5707167476d5f7e2cf80c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import unittest\nimport numpy as np\nfrom datetime import date\nfrom pandas.testing import assert_frame_equal\nimport pandas as pd\n\nfrom dcapy.cashflow import CashFlow, CashFlowModel\n\nclass TestCashFlow(unittest.TestCase):\n def test_npv(self):\n oil_sell = CashFlow(\n name = 'oil_sell',\n const_value= [10000,5000,8000,12000,30000],\n start = date(2021,1,1),\n end = date(2021,5,1),\n freq_input = 'M'\n )\n oil_capex = CashFlow(\n name = 'oil_capex',\n const_value= [-50000],\n start = date(2021,1,1),\n end = date(2021,1,1),\n freq_input = 'M'\n )\n cm = CashFlowModel(\n name = 'Example Cashflow Model',\n income=[oil_sell],\n capex=[oil_capex]\n )\n \n assert_frame_equal(cm.npv(0.08), pd.DataFrame({'npv':3065.22267}, index=[0.08])) \n \n def test_irr(self):\n oil_sell = CashFlow(\n name = 'oil_sell',\n const_value= [40,39,59,55,20],\n start = date(2021,1,1),\n end = date(2021,5,1),\n freq_input = 'M'\n )\n oil_capex = CashFlow(\n name = 'oil_capex',\n const_value= [-140],\n start = date(2021,1,1),\n end = date(2021,1,1),\n freq_input = 'M'\n )\n cm = CashFlowModel(\n name = 'Example Cashflow Model',\n income=[oil_sell],\n capex=[oil_capex]\n )\n print(cm.irr())\n assert 0.28095 == round(cm.irr(),5)"},"avg_line_length":{"kind":"number","value":29.1296296296,"string":"29.12963"},"max_line_length":{"kind":"number","value":89,"string":"89"},"alphanum_fraction":{"kind":"number","value":0.5060394151,"string":"0.506039"}}},{"rowIdx":46282,"cells":{"hexsha":{"kind":"string","value":"11812f21c9c596dffd83d94ca429f45ff1c17050"},"size":{"kind":"number","value":92,"string":"92"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/python_backup/PRAC_PYTHON/5_for.py"},"max_stars_repo_name":{"kind":"string","value":"SayanGhoshBDA/code-backup"},"max_stars_repo_head_hexsha":{"kind":"string","value":"8b6135facc0e598e9686b2e8eb2d69dd68198b80"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":16,"string":"16"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-11-26T08:39:42.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2019-05-08T10:09:52.000Z"},"max_issues_repo_path":{"kind":"string","value":"python/python_backup/PRAC_PYTHON/5_for.py"},"max_issues_repo_name":{"kind":"string","value":"SayanGhoshBDA/code-backup"},"max_issues_repo_head_hexsha":{"kind":"string","value":"8b6135facc0e598e9686b2e8eb2d69dd68198b80"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":8,"string":"8"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-05-04T06:29:26.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-02-12T05:33:16.000Z"},"max_forks_repo_path":{"kind":"string","value":"python/python_backup/PRAC_PYTHON/5_for.py"},"max_forks_repo_name":{"kind":"string","value":"SayanGhoshBDA/code-backup"},"max_forks_repo_head_hexsha":{"kind":"string","value":"8b6135facc0e598e9686b2e8eb2d69dd68198b80"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":5,"string":"5"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-02-11T16:02:21.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-02-05T07:48:30.000Z"},"content":{"kind":"string","value":"words=[\"Jimut\",\"Python\",\"c\"]\r\nfor w in words: #for just simple for loop\r\n\tprint(w,len(w))\r\n\t"},"avg_line_length":{"kind":"number","value":23,"string":"23"},"max_line_length":{"kind":"number","value":42,"string":"42"},"alphanum_fraction":{"kind":"number","value":0.6304347826,"string":"0.630435"}}},{"rowIdx":46283,"cells":{"hexsha":{"kind":"string","value":"0101f564eff3c78e8c68b76331f82e617d38803b"},"size":{"kind":"number","value":820,"string":"820"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"longest-common-prefix/longest-common-prefix.py"},"max_stars_repo_name":{"kind":"string","value":"hyeseonko/LeetCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"48dfc93f1638e13041d8ce1420517a886abbdc77"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-12-05T14:29:06.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-01T05:46:13.000Z"},"max_issues_repo_path":{"kind":"string","value":"longest-common-prefix/longest-common-prefix.py"},"max_issues_repo_name":{"kind":"string","value":"hyeseonko/LeetCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"48dfc93f1638e13041d8ce1420517a886abbdc77"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"longest-common-prefix/longest-common-prefix.py"},"max_forks_repo_name":{"kind":"string","value":"hyeseonko/LeetCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"48dfc93f1638e13041d8ce1420517a886abbdc77"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"class Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if len(strs)==1:\n return strs[0]\n minlen = min([len(each) for each in strs])\n result=\"\"\n for i in range(minlen):\n basis = strs[-1][i]\n for j in range(len(strs)-1):\n if basis!=strs[j][i]:\n return result\n result+=basis\n return result\n \n # case: long common string (not prefix)\n # result=\"\"\n # for s1 in strs[0]:\n # nolook=False\n # for s2 in strs[1:]:\n # if result+s1 not in s2:\n # nolook=True\n # break\n # print(s1, s2, result, nolook)\n # if nolook==False:\n # result+=s1\n # return result"},"avg_line_length":{"kind":"number","value":31.5384615385,"string":"31.538462"},"max_line_length":{"kind":"number","value":58,"string":"58"},"alphanum_fraction":{"kind":"number","value":0.4390243902,"string":"0.439024"}}},{"rowIdx":46284,"cells":{"hexsha":{"kind":"string","value":"6df7257d5375f69e3ed61983628b9ca3d0df1d9f"},"size":{"kind":"number","value":2607,"string":"2,607"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"official/cv/srcnn/src/dataset.py"},"max_stars_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_stars_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-07-08T13:10:42.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-11-08T02:48:57.000Z"},"max_issues_repo_path":{"kind":"string","value":"official/cv/srcnn/src/dataset.py"},"max_issues_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_issues_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"official/cv/srcnn/src/dataset.py"},"max_forks_repo_name":{"kind":"string","value":"leelige/mindspore"},"max_forks_repo_head_hexsha":{"kind":"string","value":"5199e05ba3888963473f2b07da3f7bca5b9ef6dc"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"number","value":2,"string":"2"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-09-01T06:17:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2019-10-04T08:39:45.000Z"},"content":{"kind":"string","value":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport glob\nimport numpy as np\nimport PIL.Image as pil_image\n\nimport mindspore.dataset as ds\n\nfrom src.config import srcnn_cfg as config\nfrom src.utils import convert_rgb_to_y\n\nclass EvalDataset:\n def __init__(self, images_dir):\n self.images_dir = images_dir\n scale = config.scale\n self.lr_group = []\n self.hr_group = []\n for image_path in sorted(glob.glob('{}/*'.format(images_dir))):\n hr = pil_image.open(image_path).convert('RGB')\n hr_width = (hr.width // scale) * scale\n hr_height = (hr.height // scale) * scale\n hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)\n lr = hr.resize((hr_width // scale, hr_height // scale), resample=pil_image.BICUBIC)\n lr = lr.resize((lr.width * scale, lr.height * scale), resample=pil_image.BICUBIC)\n hr = np.array(hr).astype(np.float32)\n lr = np.array(lr).astype(np.float32)\n hr = convert_rgb_to_y(hr)\n lr = convert_rgb_to_y(lr)\n\n self.lr_group.append(lr)\n self.hr_group.append(hr)\n\n def __len__(self):\n return len(self.lr_group)\n\n def __getitem__(self, idx):\n return np.expand_dims(self.lr_group[idx] / 255., 0), np.expand_dims(self.hr_group[idx] / 255., 0)\n\ndef create_train_dataset(mindrecord_file, batch_size=1, shard_id=0, num_shard=1, num_parallel_workers=4):\n data_set = ds.MindDataset(mindrecord_file, columns_list=[\"lr\", \"hr\"], num_shards=num_shard,\n shard_id=shard_id, num_parallel_workers=num_parallel_workers, shuffle=True)\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set\n\ndef create_eval_dataset(images_dir, batch_size=1):\n dataset = EvalDataset(images_dir)\n data_set = ds.GeneratorDataset(dataset, [\"lr\", \"hr\"], shuffle=False)\n data_set = data_set.batch(batch_size, drop_remainder=True)\n return data_set\n"},"avg_line_length":{"kind":"number","value":41.380952381,"string":"41.380952"},"max_line_length":{"kind":"number","value":105,"string":"105"},"alphanum_fraction":{"kind":"number","value":0.666283084,"string":"0.666283"}}},{"rowIdx":46285,"cells":{"hexsha":{"kind":"string","value":"0994508b38aca6fd6cbebc7f68d9e6a357639caa"},"size":{"kind":"number","value":424,"string":"424"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pyScript/custom_src/GlobalAccess.py"},"max_stars_repo_name":{"kind":"string","value":"Shirazbello/Pyscriptining"},"max_stars_repo_head_hexsha":{"kind":"string","value":"0f2c80a9bb10477d65966faeccc7783f20385c1b"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pyScript/custom_src/GlobalAccess.py"},"max_issues_repo_name":{"kind":"string","value":"Shirazbello/Pyscriptining"},"max_issues_repo_head_hexsha":{"kind":"string","value":"0f2c80a9bb10477d65966faeccc7783f20385c1b"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pyScript/custom_src/GlobalAccess.py"},"max_forks_repo_name":{"kind":"string","value":"Shirazbello/Pyscriptining"},"max_forks_repo_head_hexsha":{"kind":"string","value":"0f2c80a9bb10477d65966faeccc7783f20385c1b"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"class GlobalStorage:\n storage = {'design style': 'dark std',\n 'debugging': False}\n\n def debug(*args):\n s = ''\n for arg in args:\n s += ' '+str(arg)\n if GlobalStorage.storage['debugging']:\n print(' --> DEBUG:', s)\n\n # yyep, that's it....\n # you must be kidding...\n # you MUST be\n # it's actually true....\n # that's ridiculous.\n # indeed.\n"},"avg_line_length":{"kind":"number","value":23.5555555556,"string":"23.555556"},"max_line_length":{"kind":"number","value":46,"string":"46"},"alphanum_fraction":{"kind":"number","value":0.4787735849,"string":"0.478774"}}},{"rowIdx":46286,"cells":{"hexsha":{"kind":"string","value":"09f6606add82e1f791fea814ebb69a0d1607b4b5"},"size":{"kind":"number","value":6412,"string":"6,412"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"src/visitpy/visit_utils/tests/test_encoding.py"},"max_stars_repo_name":{"kind":"string","value":"visit-dav/vis"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c08bc6e538ecd7d30ddc6399ec3022b9e062127e"},"max_stars_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_stars_count":{"kind":"number","value":226,"string":"226"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2018-12-29T01:13:49.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-30T19:16:31.000Z"},"max_issues_repo_path":{"kind":"string","value":"src/visitpy/visit_utils/tests/test_encoding.py"},"max_issues_repo_name":{"kind":"string","value":"visit-dav/vis"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c08bc6e538ecd7d30ddc6399ec3022b9e062127e"},"max_issues_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_issues_count":{"kind":"number","value":5100,"string":"5,100"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2019-01-14T18:19:25.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T23:08:36.000Z"},"max_forks_repo_path":{"kind":"string","value":"src/visitpy/visit_utils/tests/test_encoding.py"},"max_forks_repo_name":{"kind":"string","value":"visit-dav/vis"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c08bc6e538ecd7d30ddc6399ec3022b9e062127e"},"max_forks_repo_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"max_forks_count":{"kind":"number","value":84,"string":"84"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2019-01-24T17:41:50.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-10T10:01:46.000Z"},"content":{"kind":"string","value":"# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt\n# Project developers. See the top-level LICENSE file for dates and other\n# details. No copyright assignment is required to contribute to VisIt.\n\n\"\"\"\n file: test_encoding.py\n author: Cyrus Harrison (cyrush@llnl.gov)\n created: 4/09/2010\n description:\n Unit tests for movie encoding helpers.\n\n\"\"\"\n\nimport unittest\nimport os\nimport sys\nimport glob\n\nfrom visit_utils import encoding\nfrom visit_utils.common import VisItException\n\nfrom os.path import join as pjoin\n\niframes_dir = pjoin(os.path.split(__file__)[0],\"_data\")\niframes = pjoin(iframes_dir,\"wave.movie.%04d.png\")\niframes_short_a = pjoin(iframes_dir,\"wave.movie.%03d.png\")\niframes_short_b = pjoin(iframes_dir,\"wave.movie.%d.png\")\niframes_stereo = pjoin(iframes_dir,\"wave.movie.stereo.%04d.png\")\noutput_dir = pjoin(os.path.split(__file__)[0],\"_output\")\n\ndef lst_slnks():\n return glob.glob(pjoin(iframes_dir,\"_encode.lnk.*\"))\n\ndef clean_slnks():\n slnks = lst_slnks()\n for slnk in slnks:\n os.remove(slnk)\n\ndef check_encoded_file(path):\n if os.path.isfile(path):\n # make sure the file isn't empty\n st = os.stat(path)\n return st.st_size > 0\n return False\n\nclass TestEncoding(unittest.TestCase):\n def setUp(self):\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n def test_encoders(self):\n encoders = encoding.encoders()\n if len(encoders) > 0:\n self.assertTrue(\"mpg\" in encoders)\n self.assertTrue(\"wmv\" in encoders)\n def test_ffmpeg_encoders(self):\n for enc in [\"wmv\",\"mpg\",\"divx\",\"mov\",\"swf\",\"mp4\",\"avi\"]:\n if enc in encoding.encoders():\n ofile = pjoin(output_dir,\"wave.movie.%s\" % enc)\n encoding.encode(iframes,ofile)\n self.assertTrue(check_encoded_file(ofile))\n ofile = pjoin(output_dir,\"wave.movie.slow.%s\" % enc)\n encoding.encode(iframes,ofile,2)\n self.assertTrue(check_encoded_file(ofile))\n def test_sm(self):\n if \"sm\" in encoding.encoders():\n ofile = pjoin(output_dir,\"wave.movie.sm\")\n encoding.encode(iframes,ofile)\n self.assertTrue(check_encoded_file(ofile))\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.slow.sm\")\n encoding.encode(iframes,ofile,2)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n def test_unsupported(self):\n self.assertRaises(VisItException, encoding.encode, iframes,\"wave.movie.bad_ext\")\n def test_sm_stereo(self):\n if \"sm\" in encoding.encoders():\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.stereo.sm\")\n encoding.encode(iframes_stereo,ofile,stereo=True)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.stereo.slow.sm\")\n encoding.encode(iframes_stereo,ofile,2,stereo=True)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n def test_stereo_uneven_frames_error(self):\n self.assertRaises(VisItException, encoding.encode, iframes,\n pjoin(output_dir,\"wave.movie.stereo.bad.sm\"),\n stereo=True)\n def test_extract(self):\n if \"mpg\" in encoding.encoders():\n eframes = pjoin(output_dir,\"extract_out_%04d.png\")\n encoding.encode(iframes,pjoin(output_dir,\"wave.movie.mpg\"))\n encoding.extract(pjoin(output_dir,\"wave.movie.mpg\"),eframes)\n ofile = pjoin(output_dir,\"wave.movie.extract.and.reencode.mpg\")\n encoding.encode(eframes,ofile)\n self.assertTrue(check_encoded_file(ofile))\n def test_pre_lr_stereo(self):\n if \"divx\" in encoding.encoders():\n iframes = pjoin(iframes_dir,\"noise.stereo.left.right.1080p.%04d.png\")\n ofile = pjoin(output_dir,\"noise.movie.stereo.pre.left.right.avi\")\n encoding.encode(iframes,ofile,etype=\"divx\")\n self.assertTrue(check_encoded_file(ofile))\n def test_short_symlinks(self):\n if \"mpg\" in encoding.encoders():\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.test.seq.pattern.03d.mpg\")\n encoding.encode(iframes_short_a,ofile,3)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.test.seq.pattern.d.mpg\")\n encoding.encode(iframes_short_b,ofile,5)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n def test_ffmpeg_input_frame_rate(self):\n for enc in [\"wmv\",\"mpg\",\"divx\",\"mov\",\"swf\",\"mp4\"]:\n if enc in encoding.encoders():\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.input_frame_rate.%s\" % enc)\n encoding.encode(iframes,ofile,input_frame_rate=5)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n def test_ffmpeg_input_and_output_frame_rate(self):\n for enc in [\"wmv\",\"mov\"]:\n if enc in encoding.encoders():\n clean_slnks()\n ofile = pjoin(output_dir,\"wave.movie.input_and_output_frame_rate.%s\" % enc)\n encoding.encode(iframes,ofile,input_frame_rate=5,output_frame_rate=30)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile))\n def test_ffmpeg_reencode_new_format(self):\n encoders = encoding.encoders() \n if \"mpg\" in encoders and \"wmv\" in encoders:\n clean_slnks()\n ofile_src = pjoin(output_dir,\"wave.movie.reencode.src.mpg\")\n ofile_des = pjoin(output_dir,\"wave.movie.reencode.src.wmv\")\n encoding.encode(iframes,ofile_src)\n encoding.encode(ofile_src,ofile_des)\n self.assertEqual(0,len(lst_slnks()))\n self.assertTrue(check_encoded_file(ofile_src))\n self.assertTrue(check_encoded_file(ofile_des))\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n"},"avg_line_length":{"kind":"number","value":42.4635761589,"string":"42.463576"},"max_line_length":{"kind":"number","value":109,"string":"109"},"alphanum_fraction":{"kind":"number","value":0.6305364941,"string":"0.630536"}}},{"rowIdx":46287,"cells":{"hexsha":{"kind":"string","value":"a33b8e97edb34bea127681c20fb239450206556d"},"size":{"kind":"number","value":3732,"string":"3,732"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionUser/ExpanseAggregateAttributionUser.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionUser/ExpanseAggregateAttributionUser.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/ExpanseV2/Scripts/ExpanseAggregateAttributionUser/ExpanseAggregateAttributionUser.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"\"\"\"ExpanseAggregateAttributionUser\n\n\"\"\"\n\nimport demistomock as demisto\nfrom CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import\nfrom CommonServerUserPython import * # noqa\n\nfrom typing import Dict, List, Any, Tuple, Optional\nimport traceback\n\n\n''' STANDALONE FUNCTION '''\n\n\ndef deconstruct_entry(entry: Dict[str, str],\n username_fields: List[str],\n sightings_fields: List[str]) -> Tuple[Optional[str],\n Optional[str],\n Optional[int]]:\n \"\"\"\n deconstruct_entry\n Extracts device relevant fields from a log entry.\n\n :type entry: ``Dict[str, str]``\n :param entry: Log entry as dictionary of fields.\n\n :type sightings_fields: ``List[str]``\n :param sightings_fields: List of possible field names in log entry to be considered as number of occurences.\n\n :type username_fields: ``List[str]``\n :param username_fields: List of possible field names in log entry to be considered as username.\n\n :return: Tuple where the first element is the username or None, the second is the domain extracted from the\n username field and the third element is the number of occurences of the event.\n :rtype: ``Tuple[Optional[str], Optional[str], Optional[int]]``\n \"\"\"\n username = next((entry[field] for field in username_fields if field in entry), None)\n sightings = next((int(entry[field]) for field in sightings_fields if field in entry), 1)\n\n domain = None\n if username is not None and \"\\\\\" in username:\n domain, username = username.split(\"\\\\\", 1)\n\n return username, domain, sightings\n\n\n''' COMMAND FUNCTION '''\n\n\ndef aggregate_command(args: Dict[str, Any]) -> CommandResults:\n input_list = argToList(args.get('input', []))\n current_list = argToList(args.get('current', []))\n\n username_fields = argToList(args.get('username_fields', \"source_user,srcuser,user\"))\n sightings_fields = argToList(args.get('sightings_fields', \"count\"))\n\n current_users = {\n f\"{d['username']}::{d['domain']}\": d\n for d in current_list if d is not None\n }\n\n for entry in input_list:\n if not isinstance(entry, dict):\n continue\n\n username, domain, sightings = deconstruct_entry(\n entry,\n username_fields=username_fields,\n sightings_fields=sightings_fields\n )\n\n if username is None:\n continue\n if domain is None:\n domain = \"\"\n\n user_key = f\"{username}::{domain}\"\n current_state = current_users.get(user_key, None)\n if current_state is None:\n current_state = {\n 'username': username,\n 'domain': domain,\n 'sightings': 0,\n 'groups': [],\n 'description': None,\n }\n current_users[user_key] = current_state\n\n if sightings is not None:\n current_state['sightings'] += sightings\n\n markdown = '## ExpanseAggregateAttributionUser'\n outputs = list(current_users.values())\n\n return CommandResults(\n readable_output=markdown,\n outputs=outputs or None,\n outputs_prefix=\"Expanse.AttributionUser\",\n outputs_key_field=[\"username\", \"domain\"]\n )\n\n\n''' MAIN FUNCTION '''\n\n\ndef main():\n try:\n return_results(aggregate_command(demisto.args()))\n except Exception as ex:\n demisto.error(traceback.format_exc()) # print the traceback\n return_error(f'Failed to execute ExpanseAggregateAttributionUser. Error: {str(ex)}')\n\n\n''' ENTRY POINT '''\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n main()\n"},"avg_line_length":{"kind":"number","value":31.1,"string":"31.1"},"max_line_length":{"kind":"number","value":112,"string":"112"},"alphanum_fraction":{"kind":"number","value":0.6205787781,"string":"0.620579"}}},{"rowIdx":46288,"cells":{"hexsha":{"kind":"string","value":"a3a247716b05181eb2ad340dbb31ef8d5f76c0d0"},"size":{"kind":"number","value":5288,"string":"5,288"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"V1/utils/models.py"},"max_stars_repo_name":{"kind":"string","value":"marsXyr/GESRL"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"V1/utils/models.py"},"max_issues_repo_name":{"kind":"string","value":"marsXyr/GESRL"},"max_issues_repo_head_hexsha":{"kind":"string","value":"3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"V1/utils/models.py"},"max_forks_repo_name":{"kind":"string","value":"marsXyr/GESRL"},"max_forks_repo_head_hexsha":{"kind":"string","value":"3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import numpy as np\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Adam\nimport torch.nn.functional as F\n\n\n#USE_CUDA = torch.cuda.is_available()\nUSE_CUDA = False\nif USE_CUDA:\n FloatTensor = torch.cuda.FloatTensor\nelse:\n FloatTensor = torch.FloatTensor\n\n\ndef to_numpy(var):\n return var.cpu().data.numpy()\n\n\ndef to_tensor(x):\n return torch.FloatTensor(x)\n\n\ndef soft_update(target, source, tau):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)\n\ndef hard_update(target, source):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)\n\n\nclass RLNN(nn.Module):\n\n def __init__(self, state_dim, action_dim):\n super(RLNN, self).__init__()\n self.state_dim = state_dim\n self.action_dim = action_dim\n\n def set_params(self, w):\n for i, param in enumerate(self.parameters()):\n param.data.copy_(torch.from_numpy(w).view(param.size()))\n\n def get_params(self):\n params = [to_numpy(v) for v in self.parameters()]\n return deepcopy(params[0])\n\n def get_grads(self):\n grads = [to_numpy(v.grad) for v in self.parameters()]\n return deepcopy(grads[0])\n\n def get_size(self):\n return self.get_params().shape[0]\n\n def load_model(self, filename, net_name):\n if filename is None:\n return\n\n self.load_state_dict(\n torch.load('{}/{}.pkl'.format(filename, net_name), map_location=lambda storage, loc: storage))\n\n def save_model(self, output, net_name):\n torch.save(self.state_dict(), '{}/{}.pkl'.format(output, net_name))\n\n\nclass LinearPolicy(RLNN):\n \"\"\"\n Linear policy class that computes action as .\n \"\"\"\n\n def __init__(self, state_dim, action_dim, max_action, args):\n super(LinearPolicy, self).__init__(state_dim, action_dim)\n\n self.l1 = nn.Linear(self.state_dim, self.action_dim, bias=False)\n\n self.optimizer = Adam(self.parameters(), lr=args.actor_lr)\n self.tau = args.tau\n # self.theta = args['theta']\n self.max_action = max_action\n if USE_CUDA:\n self.cuda()\n\n def forward(self, x):\n\n out = self.l1(x)\n\n # abs_out = torch.abs(out)\n # abs_out_sum = torch.sum(abs_out).view(-1, 1)\n # abs_out_mean = abs_out_sum / self.action_dim / self.theta\n # ones = torch.ones(abs_out_mean.size())\n # ones = ones.cuda()\n # mod = torch.where(abs_out_mean >= 1, abs_out_mean, ones)\n # out = out / mod\n #\n out = self.max_action * torch.tanh(out)\n\n return out\n\n def update(self, memory, batch_size, critic, policy_t):\n # Sample replay buffer\n states, _, _, _, _ = memory.sample(batch_size)\n\n # Compute actor loss\n policy_loss = -critic(states, self(states)).mean()\n\n # Optimize the policy\n self.optimizer.zero_grad()\n policy_loss.backward()\n grads = self.get_grads() # Get policy gradients\n self.optimizer.step()\n\n # Update the frozen target models\n for param, target_param in zip(self.parameters(), policy_t.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n\n return grads\n\n\nclass Critic(RLNN):\n\n def __init__(self, state_dim, action_dim, args):\n super(Critic, self).__init__(state_dim, action_dim)\n self.l1 = nn.Linear(state_dim + action_dim, 64)\n self.l2 = nn.Linear(64, 64)\n self.l3 = nn.Linear(64, 1)\n\n if args.layer_norm:\n self.n1 = nn.LayerNorm(64)\n self.n2 = nn.LayerNorm(64)\n self.layer_norm = args.layer_norm\n self.optimizer = Adam(self.parameters(), lr=args.critic_lr)\n self.tau = args.tau\n self.discount = args.discount\n if USE_CUDA:\n self.cuda()\n\n def forward(self, x, u):\n if not self.layer_norm:\n x = F.leaky_relu(self.l1(torch.cat([x, u], 1)))\n x = F.leaky_relu(self.l2(x))\n x = self.l3(x)\n\n else:\n x = F.leaky_relu(self.n1(self.l1(torch.cat([x, u], 1))))\n x = F.leaky_relu(self.n2(self.l2(x)))\n x = self.l3(x)\n\n return x\n\n def update(self, memory, batch_size, policy, critic_t):\n # Sample replay buffer\n states, n_states, actions, rewards, dones = memory.sample(batch_size)\n\n # Q target = reward + discount * Q(next_state, pi(next_state))\n with torch.no_grad():\n target_Q = critic_t(n_states, policy(n_states))\n target_Q = rewards + (1 - dones) * self.discount * target_Q\n\n # Get current Q estimate\n current_Q = self.forward(states, actions)\n\n # Compute critic loss\n critic_loss = nn.MSELoss()(current_Q, target_Q)\n\n # Optimize the critic\n self.optimizer.zero_grad()\n critic_loss.backward()\n self.optimizer.step()\n\n # Update the frozen target models\n for param, target_param in zip(self.parameters(), critic_t.parameters()):\n target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)\n"},"avg_line_length":{"kind":"number","value":30.2171428571,"string":"30.217143"},"max_line_length":{"kind":"number","value":106,"string":"106"},"alphanum_fraction":{"kind":"number","value":0.6172465961,"string":"0.617247"}}},{"rowIdx":46289,"cells":{"hexsha":{"kind":"string","value":"6e55b0a9a232735b589386d0f72b1b9579ed262b"},"size":{"kind":"number","value":460,"string":"460"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"packages/watchmen-dqc/src/watchmen_dqc/monitor/rule/disabled_rules.py"},"max_stars_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"packages/watchmen-dqc/src/watchmen_dqc/monitor/rule/disabled_rules.py"},"max_issues_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"packages/watchmen-dqc/src/watchmen_dqc/monitor/rule/disabled_rules.py"},"max_forks_repo_name":{"kind":"string","value":"Indexical-Metrics-Measure-Advisory/watchmen"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c54ec54d9f91034a38e51fd339ba66453d2c7a6d"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from typing import List\n\nfrom watchmen_model.dqc import MonitorRuleCode\n\ndisabled_rules: List[MonitorRuleCode] = [\n\tMonitorRuleCode.RAW_MISMATCH_STRUCTURE, # ignored now\n\tMonitorRuleCode.FACTOR_MISMATCH_DATE_TYPE, # should be detected on pipeline run\n\tMonitorRuleCode.FACTOR_USE_CAST, # should be detected on pipeline run\n\tMonitorRuleCode.FACTOR_BREAKS_MONOTONE_INCREASING, # ignored now\n\tMonitorRuleCode.FACTOR_BREAKS_MONOTONE_DECREASING # ignored now\n]\n"},"avg_line_length":{"kind":"number","value":38.3333333333,"string":"38.333333"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.8391304348,"string":"0.83913"}}},{"rowIdx":46290,"cells":{"hexsha":{"kind":"string","value":"287189638a67b6c452ceb33fb318213a3f4059c3"},"size":{"kind":"number","value":1049,"string":"1,049"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pyventskalender/tag15_loesung.py"},"max_stars_repo_name":{"kind":"string","value":"kopp/pyventskalender"},"max_stars_repo_head_hexsha":{"kind":"string","value":"6f6455f3c1db07f65a772b2716e4be95fbcd1804"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"pyventskalender/tag15_loesung.py"},"max_issues_repo_name":{"kind":"string","value":"kopp/pyventskalender"},"max_issues_repo_head_hexsha":{"kind":"string","value":"6f6455f3c1db07f65a772b2716e4be95fbcd1804"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"pyventskalender/tag15_loesung.py"},"max_forks_repo_name":{"kind":"string","value":"kopp/pyventskalender"},"max_forks_repo_head_hexsha":{"kind":"string","value":"6f6455f3c1db07f65a772b2716e4be95fbcd1804"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"from typing import Set, List, Optional\n\ntry:\n from pyventskalender.tag14_loesung import VERLOREN_BEI_SO_VIELEN_FEHLERN\nexcept ImportError:\n from tag14_loesung import VERLOREN_BEI_SO_VIELEN_FEHLERN\n\n\ndef ist_buchstabe(eingabe_von_nutzer: str) -> bool:\n if len(eingabe_von_nutzer) != 1:\n return False\n return True\n\n\ndef ist_aufgeben(eingabe_von_nutzer: str) -> bool:\n return eingabe_von_nutzer.lower() == \"ich gebe auf\"\n\n\ndef bewerte_geratenen_buchstaben(\n buchstabe: str,\n noch_gesuchte_buchstaben: Set[str],\n falsch_geratene_buchstaben: List[str]\n ) -> Optional[bool]:\n if buchstabe in noch_gesuchte_buchstaben:\n noch_gesuchte_buchstaben.remove(buchstabe)\n if len(noch_gesuchte_buchstaben) == 0:\n return \"gewonnen\"\n else:\n return \"richtig-geraten\"\n else:\n falsch_geratene_buchstaben.append(buchstabe)\n if len(falsch_geratene_buchstaben) >= VERLOREN_BEI_SO_VIELEN_FEHLERN:\n return \"verloren\"\n else:\n return \"falsch-geraten\"\n"},"avg_line_length":{"kind":"number","value":29.1388888889,"string":"29.138889"},"max_line_length":{"kind":"number","value":77,"string":"77"},"alphanum_fraction":{"kind":"number","value":0.7073403241,"string":"0.70734"}}},{"rowIdx":46291,"cells":{"hexsha":{"kind":"string","value":"c6e2e5f03e48ea9792a6dde89b285bc867447f51"},"size":{"kind":"number","value":702,"string":"702"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"pacman-arch/test/pacman/tests/replace110.py"},"max_stars_repo_name":{"kind":"string","value":"Maxython/pacman-for-termux"},"max_stars_repo_head_hexsha":{"kind":"string","value":"3b208eb9274cbfc7a27fca673ea8a58f09ebad47"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":23,"string":"23"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-05-21T19:11:06.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T18:14:20.000Z"},"max_issues_repo_path":{"kind":"string","value":"source/pacman-6.0.1/test/pacman/tests/replace110.py"},"max_issues_repo_name":{"kind":"string","value":"Scottx86-64/dotfiles-1"},"max_issues_repo_head_hexsha":{"kind":"string","value":"51004b1e2b032664cce6b553d2052757c286087d"},"max_issues_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_issues_count":{"kind":"number","value":11,"string":"11"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2021-05-21T12:08:44.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-12-21T08:30:08.000Z"},"max_forks_repo_path":{"kind":"string","value":"source/pacman-6.0.1/test/pacman/tests/replace110.py"},"max_forks_repo_name":{"kind":"string","value":"Scottx86-64/dotfiles-1"},"max_forks_repo_head_hexsha":{"kind":"string","value":"51004b1e2b032664cce6b553d2052757c286087d"},"max_forks_repo_licenses":{"kind":"list like","value":["Unlicense"],"string":"[\n \"Unlicense\"\n]"},"max_forks_count":{"kind":"number","value":1,"string":"1"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2021-09-26T08:44:40.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-09-26T08:44:40.000Z"},"content":{"kind":"string","value":"self.description = \"Replace a package with a file in 'backup' (local modified)\"\n# FS#24543\n\nlp = pmpkg(\"dummy\")\nlp.files = [\"etc/dummy.conf*\", \"bin/dummy\"]\nlp.backup = [\"etc/dummy.conf\"]\nself.addpkg2db(\"local\", lp)\n\nsp = pmpkg(\"replacement\")\nsp.replaces = [\"dummy\"]\nsp.files = [\"etc/dummy.conf\", \"bin/dummy*\"]\nsp.backup = [\"etc/dummy.conf\"]\nself.addpkg2db(\"sync\", sp)\n\nself.args = \"-Su\"\n\nself.addrule(\"!PKG_EXIST=dummy\")\nself.addrule(\"PKG_EXIST=replacement\")\n\nself.addrule(\"FILE_EXIST=etc/dummy.conf\")\nself.addrule(\"!FILE_MODIFIED=etc/dummy.conf\")\nself.addrule(\"!FILE_PACNEW=etc/dummy.conf\")\nself.addrule(\"!FILE_PACSAVE=etc/dummy.conf\")\n\nself.addrule(\"FILE_EXIST=bin/dummy\")\n\nself.expectfailure = True\n"},"avg_line_length":{"kind":"number","value":25.0714285714,"string":"25.071429"},"max_line_length":{"kind":"number","value":79,"string":"79"},"alphanum_fraction":{"kind":"number","value":0.7108262108,"string":"0.710826"}}},{"rowIdx":46292,"cells":{"hexsha":{"kind":"string","value":"05e635c35d7e288bb6171b843f14ec6a962152da"},"size":{"kind":"number","value":923,"string":"923"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics.py"},"max_stars_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_stars_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics.py"},"max_issues_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_issues_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Python/zzz_training_challenge/Python_Challenge/solutions/ch02_math/solutions/ex08_combinatorics.py"},"max_forks_repo_name":{"kind":"string","value":"Kreijeck/learning"},"max_forks_repo_head_hexsha":{"kind":"string","value":"eaffee08e61f2a34e01eb8f9f04519aac633f48c"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# Beispielprogramm für das Buch \"Python Challenge\"\n#\n# Copyright 2020 by Michael Inden\n\nimport math\n\n\ndef solve_quadratic_simple():\n for a in range(1, 100):\n for b in range(1, 100):\n for c in range(1, 100):\n # if a ** 2 + b ** 2 == c ** 2:\n if a * a + b * b == c * c:\n print(\"a =\", a, \"/ b =\", b, \"/ c =\", c)\n\n\ndef solve_quadratic():\n for a in range(1, 100):\n for b in range(1, 100):\n c = int(math.sqrt(a * a + b * b))\n if c < 100 and a * a + b * b == c * c:\n print(\"a =\", a, \"/ b =\", b, \"/ c =\", c)\n\n\ndef solve_quadratic_shorter():\n return [(a, b, c) for a in range(1, 100) for b in range(1, 100)\n for c in range(1, 100) if a * a + b * b == c * c]\n\n\ndef main():\n solve_quadratic_simple()\n solve_quadratic()\n\n print(solve_quadratic_shorter())\n\n\nif __name__ == \"__main__\":\n main()\n"},"avg_line_length":{"kind":"number","value":23.6666666667,"string":"23.666667"},"max_line_length":{"kind":"number","value":67,"string":"67"},"alphanum_fraction":{"kind":"number","value":0.4842903575,"string":"0.48429"}}},{"rowIdx":46293,"cells":{"hexsha":{"kind":"string","value":"af6aa1df80b339741689942f942062f889d05ae8"},"size":{"kind":"number","value":8733,"string":"8,733"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Liquid-optimizer/rf.py"},"max_stars_repo_name":{"kind":"string","value":"PasaLab/YAO"},"max_stars_repo_head_hexsha":{"kind":"string","value":"2e70203197cd79f9522d65731ee5dc0eb236b005"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2021-08-30T14:12:09.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-01-20T02:14:22.000Z"},"max_issues_repo_path":{"kind":"string","value":"Liquid-optimizer/rf.py"},"max_issues_repo_name":{"kind":"string","value":"PasaLab/YAO"},"max_issues_repo_head_hexsha":{"kind":"string","value":"2e70203197cd79f9522d65731ee5dc0eb236b005"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"Liquid-optimizer/rf.py"},"max_forks_repo_name":{"kind":"string","value":"PasaLab/YAO"},"max_forks_repo_head_hexsha":{"kind":"string","value":"2e70203197cd79f9522d65731ee5dc0eb236b005"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# _*_coding:utf-8_*_\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\n\r\n\r\ndef load_data(trainfile, testfile):\r\n\ttraindata = pd.read_csv(trainfile)\r\n\ttestdata = pd.read_csv(testfile)\r\n\tfeature_data = traindata.iloc[:, 1:-1]\r\n\tlabel_data = traindata.iloc[:, -1]\r\n\ttest_feature = testdata.iloc[:, 1:-1]\r\n\ttest_label = testdata.iloc[:, -1]\r\n\treturn feature_data, label_data, test_feature, test_label\r\n\r\n\r\ndef random_forest_train(feature_data, label_data, test_feature):\r\n\tfrom sklearn.ensemble import RandomForestRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparams = {\r\n\t\t'n_estimators': 70,\r\n\t\t'max_depth': 13,\r\n\t\t'min_samples_split': 10,\r\n\t\t'min_samples_leaf': 5, # 10\r\n\t\t'max_features': len(X_train.columns)\r\n\t}\r\n\t# print(X_test)\r\n\tmodel = RandomForestRegressor(**params)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\t# print(abs(y_test - y_pred) / y_test)\r\n\t# print(RMSE)\r\n\t'''\r\n\tsubmit = pd.read_csv(submitfile)\r\n\tprint(submit)\r\n\tsubmit['CPU'] = model.predict(test_feature)\r\n\tsubmit.to_csv('my_random_forest_prediction1.csv', index=False)\r\n\tprint(submit)\r\n\tprint(model.predict(test_feature))\r\n\t'''\r\n\treturn model.predict(test_feature)\r\n\r\n\r\ndef linear_regression_train(feature_data, label_data, test_feature):\r\n\tfrom sklearn.linear_model import LinearRegression\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparams = {}\r\n\t# print(X_test)\r\n\tmodel = LinearRegression(**params)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\t# print(abs(y_test - y_pred) / y_test)\r\n\t# print(RMSE)\r\n\treturn model.predict(test_feature)\r\n\r\n\r\ndef adaboost_train(feature_data, label_data, test_feature):\r\n\tfrom sklearn.ensemble import AdaBoostRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparams = {}\r\n\t# print(X_test)\r\n\tmodel = AdaBoostRegressor(**params)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\t# print(abs(y_test - y_pred) / y_test)\r\n\t# print(RMSE)\r\n\treturn model.predict(test_feature)\r\n\r\n\r\ndef gbdt_train(feature_data, label_data, test_feature):\r\n\tfrom sklearn.ensemble import GradientBoostingRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparams = {\r\n\t\t'loss': 'ls',\r\n\t\t'n_estimators': 70,\r\n\t\t'max_depth': 13,\r\n\t\t'min_samples_split': 10,\r\n\t\t'min_samples_leaf': 5, # 10\r\n\t\t'max_features': len(X_train.columns)\r\n\t}\r\n\t# print(X_test)\r\n\tmodel = GradientBoostingRegressor(**params)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\t# print(abs(y_test - y_pred) / y_test)\r\n\t# print(RMSE)\r\n\treturn model.predict(test_feature)\r\n\r\n\r\ndef decision_tree_train(feature_data, label_data, test_feature):\r\n\tfrom sklearn.tree import DecisionTreeRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparams = {\r\n\t\t'max_depth': 13,\r\n\t}\r\n\t# print(X_test)\r\n\tmodel = DecisionTreeRegressor(**params)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\t# print(abs(y_test - y_pred) / y_test)\r\n\t# print(RMSE)\r\n\treturn model.predict(test_feature)\r\n\r\n\r\ndef random_forest_parameter_tuning1(feature_data, label_data):\r\n\tfrom sklearn.ensemble import RandomForestRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\tfrom sklearn.model_selection import GridSearchCV\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparam_test1 = {\r\n\t\t'n_estimators': range(10, 71, 10)\r\n\t}\r\n\tmodel = GridSearchCV(estimator=RandomForestRegressor(\r\n\t\tmin_samples_split=100, min_samples_leaf=20, max_depth=8, max_features='sqrt',\r\n\t\trandom_state=10), param_grid=param_test1, cv=5\r\n\t)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\tprint(RMSE)\r\n\treturn model.best_score_, model.best_params_\r\n\r\n\r\ndef random_forest_parameter_tuning2(feature_data, label_data):\r\n\tfrom sklearn.ensemble import RandomForestRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\tfrom sklearn.model_selection import GridSearchCV\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparam_test2 = {\r\n\t\t'max_depth': range(3, 14, 2),\r\n\t\t'min_samples_split': range(50, 201, 20)\r\n\t}\r\n\tmodel = GridSearchCV(estimator=RandomForestRegressor(\r\n\t\tn_estimators=70, min_samples_leaf=20, max_features='sqrt', oob_score=True,\r\n\t\trandom_state=10), param_grid=param_test2, cv=5\r\n\t)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\tprint(RMSE)\r\n\treturn model.best_score_, model.best_params_\r\n\r\n\r\ndef random_forest_parameter_tuning3(feature_data, label_data, test_feature):\r\n\tfrom sklearn.ensemble import RandomForestRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\tfrom sklearn.model_selection import GridSearchCV\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparam_test3 = {\r\n\t\t'min_samples_split': range(10, 90, 20),\r\n\t\t'min_samples_leaf': range(10, 60, 10),\r\n\t}\r\n\tmodel = GridSearchCV(estimator=RandomForestRegressor(\r\n\t\tn_estimators=70, max_depth=13, max_features='sqrt', oob_score=True,\r\n\t\trandom_state=10), param_grid=param_test3, cv=5\r\n\t)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\tprint(RMSE)\r\n\treturn model.best_score_, model.best_params_\r\n\r\n\r\ndef random_forest_parameter_tuning4(feature_data, label_data, test_feature):\r\n\tfrom sklearn.ensemble import RandomForestRegressor\r\n\tfrom sklearn.model_selection import train_test_split\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\tfrom sklearn.model_selection import GridSearchCV\r\n\r\n\tX_train, X_test, y_train, y_test = train_test_split(feature_data, label_data, test_size=0.01)\r\n\tparam_test4 = {\r\n\t\t'max_features': range(3, 9, 2)\r\n\t}\r\n\tmodel = GridSearchCV(estimator=RandomForestRegressor(\r\n\t\tn_estimators=70, max_depth=13, min_samples_split=10, min_samples_leaf=10, oob_score=True,\r\n\t\trandom_state=10), param_grid=param_test4, cv=5\r\n\t)\r\n\tmodel.fit(X_train, y_train)\r\n\t# 对测试集进行预测\r\n\ty_pred = model.predict(X_test)\r\n\t# 计算准确率\r\n\tMSE = mean_squared_error(y_test, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\tprint(RMSE)\r\n\treturn model.best_score_, model.best_params_\r\n\r\n\r\nif __name__ == '__main__':\r\n\talgorithm = os.getenv('algorithm', 'rf')\r\n\ttrainfile = 'data/train.csv'\r\n\ttestfile = 'data/test.csv'\r\n\tfeature_data, label_data, test_feature, test_label = load_data(trainfile, testfile)\r\n\tif algorithm == 'lr':\r\n\t\ty_pred = linear_regression_train(feature_data, label_data, test_feature)\r\n\telif algorithm == 'ada':\r\n\t\ty_pred = adaboost_train(feature_data, label_data, test_feature)\r\n\telif algorithm == 'gbdt':\r\n\t\ty_pred = gbdt_train(feature_data, label_data, test_feature)\r\n\telif algorithm == 'dt':\r\n\t\ty_pred = decision_tree_train(feature_data, label_data, test_feature)\r\n\telse:\r\n\t\ty_pred = random_forest_train(feature_data, label_data, test_feature)\r\n\r\n\tfrom sklearn.metrics import mean_squared_error\r\n\r\n\tMSE = mean_squared_error(test_label, y_pred)\r\n\tRMSE = np.sqrt(MSE)\r\n\tvar = np.var(test_label)\r\n\tr2 = 1 - MSE / var\r\n\t# print(abs(test_label - y_pred) / test_label)\r\n\tprint(RMSE, r2)\r\n\tdisplay_diff = os.getenv('display_diff', '0')\r\n\tif display_diff == '1':\r\n\t\tfor i in range(20):\r\n\t\t\tprint(\"{},{},{}\".format(test_label[i], y_pred[i], (y_pred[i] - test_label[i]) / test_label[i]))\r\n"},"avg_line_length":{"kind":"number","value":32.8308270677,"string":"32.830827"},"max_line_length":{"kind":"number","value":99,"string":"99"},"alphanum_fraction":{"kind":"number","value":0.7467078896,"string":"0.746708"}}},{"rowIdx":46294,"cells":{"hexsha":{"kind":"string","value":"59049206796b7df07f42606a9f63a197e9b7c8cc"},"size":{"kind":"number","value":2393,"string":"2,393"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"andinopy/interfaces/rfid_keyboard_interface.py"},"max_stars_repo_name":{"kind":"string","value":"andino-systems/andinopy"},"max_stars_repo_head_hexsha":{"kind":"string","value":"28fc09fbdd67dd690b9b3f80f03a05c342c777e1"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"andinopy/interfaces/rfid_keyboard_interface.py"},"max_issues_repo_name":{"kind":"string","value":"andino-systems/andinopy"},"max_issues_repo_head_hexsha":{"kind":"string","value":"28fc09fbdd67dd690b9b3f80f03a05c342c777e1"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"andinopy/interfaces/rfid_keyboard_interface.py"},"max_forks_repo_name":{"kind":"string","value":"andino-systems/andinopy"},"max_forks_repo_head_hexsha":{"kind":"string","value":"28fc09fbdd67dd690b9b3f80f03a05c342c777e1"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"# _ _ _\n# / \\ _ __ __| (_)_ __ ___ _ __ _ _\n# / _ \\ | '_ \\ / _` | | '_ \\ / _ \\| '_ \\| | | |\n# / ___ \\| | | | (_| | | | | | (_) | |_) | |_| |\n# /_/ \\_\\_| |_|\\__,_|_|_| |_|\\___/| .__/ \\__, |\n# |_| |___/\n# by Jakob Groß\nimport abc\nfrom andinopy import andinopy_logger\n\n\nclass rfid_keyboard_interface(abc.ABC):\n on_rfid_string = None\n on_function_button = None\n on_keyboard_button = None\n\n def __init__(self):\n self._rfid_buffer: str = \"\"\n self._rfid_mode: bool = False\n\n @abc.abstractmethod\n def start(self) -> None:\n raise NotImplementedError(\"meta class method not overwritten\")\n\n @abc.abstractmethod\n def stop(self) -> None:\n raise NotImplementedError(\"meta class method not overwritten\")\n\n @abc.abstractmethod\n def buzz_display(self, duration_ms: int) -> None:\n raise NotImplementedError(\"meta class method not overwritten\")\n\n @abc.abstractmethod\n def _send_to_controller(self, value: str) -> None:\n raise NotImplementedError(\"meta class method not overwritten\")\n\n def _on_char_received(self, char_received: str):\n if char_received != ' ' and char_received != '':\n andinopy_logger.debug(f\"received char from display: {char_received}\")\n if char_received == ':':\n if self._rfid_mode:\n self._rfid_mode = False\n if self.on_rfid_string is not None:\n self.on_rfid_string(self._rfid_buffer)\n self._rfid_buffer = \"\"\n\n else:\n self._rfid_mode = True\n elif self._rfid_mode:\n self._rfid_buffer += char_received\n elif 'a' <= char_received <= 'f':\n if self.on_function_button is not None:\n self.on_function_button(\"F\" + str(ord(char_received) - 96))\n elif '0' <= char_received <= '9':\n if self.on_keyboard_button is not None:\n self.on_keyboard_button(char_received)\n else:\n function_match = {\n '+': \"UP\",\n '-': \"DOWN\",\n 'o': \"OK\",\n 'x': \"ESC\",\n '<': \"DEL\",\n }\n self.on_function_button(function_match[char_received])\n"},"avg_line_length":{"kind":"number","value":36.2575757576,"string":"36.257576"},"max_line_length":{"kind":"number","value":81,"string":"81"},"alphanum_fraction":{"kind":"number","value":0.5173422482,"string":"0.517342"}}},{"rowIdx":46295,"cells":{"hexsha":{"kind":"string","value":"a731cb8565ca1f559b567aac3fbbbf4f9002bf3a"},"size":{"kind":"number","value":818,"string":"818"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Hackerrank_problems/Organizing_container_of_balls/solution.py"},"max_stars_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_stars_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":165,"string":"165"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-10-03T08:01:11.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T02:42:08.000Z"},"max_issues_repo_path":{"kind":"string","value":"Hackerrank_problems/Organizing_container_of_balls/solution.py"},"max_issues_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_issues_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":383,"string":"383"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2020-10-03T07:39:11.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-11-20T07:06:35.000Z"},"max_forks_repo_path":{"kind":"string","value":"Hackerrank_problems/Organizing_container_of_balls/solution.py"},"max_forks_repo_name":{"kind":"string","value":"gbrls/CompetitiveCode"},"max_forks_repo_head_hexsha":{"kind":"string","value":"b6f1b817a655635c3c843d40bd05793406fea9c6"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":380,"string":"380"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2020-10-03T08:05:04.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-19T06:56:59.000Z"},"content":{"kind":"string","value":"# Logic\n# The required solution can be obtained by simply sorting the arrays. After sorting check if the arrays are exactly same or not.\n# If the arrays are same, it's possible to obtain the desired configuration, otherwise it's impossible.\n\ndef organizingContainers(container):\n rows = [sum(x) for x in container]\n cols = [sum(y) for y in zip(*container)]\n rows, cols = sorted(rows), sorted(cols)\n if(all(x == y for x, y in zip(rows, cols))):\n return \"Possible\"\n else:\n return \"Impossible\"\n\n\nif __name__ == '__main__':\n\n q = int(input())\n for q_itr in range(q):\n n = int(input())\n container = []\n\n for _ in range(n):\n container.append(list(map(int, input().rstrip().split())))\n\n result = organizingContainers(container)\n\n print(result)\n"},"avg_line_length":{"kind":"number","value":29.2142857143,"string":"29.214286"},"max_line_length":{"kind":"number","value":128,"string":"128"},"alphanum_fraction":{"kind":"number","value":0.6332518337,"string":"0.633252"}}},{"rowIdx":46296,"cells":{"hexsha":{"kind":"string","value":"abb1a975a2d32cd0ef7a733a84328f95433e4943"},"size":{"kind":"number","value":1078,"string":"1,078"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"devices/device.py"},"max_stars_repo_name":{"kind":"string","value":"MiaranaDIY/Salamigal"},"max_stars_repo_head_hexsha":{"kind":"string","value":"44ac98fa6463d46694e1f9343a0ebc788e7a88f8"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2017-08-02T12:26:34.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-01-13T01:06:26.000Z"},"max_issues_repo_path":{"kind":"string","value":"devices/device.py"},"max_issues_repo_name":{"kind":"string","value":"MiaranaDIY/Salamigal"},"max_issues_repo_head_hexsha":{"kind":"string","value":"44ac98fa6463d46694e1f9343a0ebc788e7a88f8"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"devices/device.py"},"max_forks_repo_name":{"kind":"string","value":"MiaranaDIY/Salamigal"},"max_forks_repo_head_hexsha":{"kind":"string","value":"44ac98fa6463d46694e1f9343a0ebc788e7a88f8"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":3,"string":"3"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2017-02-14T22:10:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2021-01-02T14:26:43.000Z"},"content":{"kind":"string","value":"import time\n\nclass Device:\n#Global variable\n instant_count = 0\n def __init__(self):\n #Increment instant counter\n Device.instant_count += 1\n #load watt for power usage calculation and device property\n self.load_watt = 0\n self.name = 'Device'\n self.location = 'Location'\n self.group = 'Group'\n self.streaming = 0\n self.state = 1\n self.started_time = time.time()\n \n #Set device load watt\n def set_watt(self, lw = 0):\n try:\n self.load_watt = int(lw)\n return lw\n except Exception as err:\n return None\n pass \n \n #Get device ON time to calculate power usage (Hours)\n def get_ontime(self):\n if(self.state):\n return (int(time.time()) - int(self.started_time)) / 60 / 60\n else:\n return 0\n \n #Calculate power usage in Wh\n def get_usage(self):\n try:\n return self.get_ontime() * int(self.load_watt)\n except Exception as err:\n return None\n pass "},"avg_line_length":{"kind":"number","value":26.95,"string":"26.95"},"max_line_length":{"kind":"number","value":72,"string":"72"},"alphanum_fraction":{"kind":"number","value":0.5528756957,"string":"0.552876"}}},{"rowIdx":46297,"cells":{"hexsha":{"kind":"string","value":"abd3818cc13d4399585b2153d538159610366df5"},"size":{"kind":"number","value":2213,"string":"2,213"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"experimental/BibTeX/tst_BibTeX_grammar.py"},"max_stars_repo_name":{"kind":"string","value":"jecki/DHParser"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c6c1bd7db2de85b5997a3640242f4f444532304e"},"max_stars_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2020-12-25T19:37:42.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2021-03-26T04:59:12.000Z"},"max_issues_repo_path":{"kind":"string","value":"experimental/BibTeX/tst_BibTeX_grammar.py"},"max_issues_repo_name":{"kind":"string","value":"jecki/DHParser"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c6c1bd7db2de85b5997a3640242f4f444532304e"},"max_issues_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_issues_count":{"kind":"number","value":6,"string":"6"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2018-08-07T22:48:52.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2021-10-07T18:38:20.000Z"},"max_forks_repo_path":{"kind":"string","value":"experimental/BibTeX/tst_BibTeX_grammar.py"},"max_forks_repo_name":{"kind":"string","value":"jecki/DHParser"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c6c1bd7db2de85b5997a3640242f4f444532304e"},"max_forks_repo_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python3\n\n\"\"\"tst_BibTeX_grammar.py - runs the unit tests for the BibTeX grammar\n\nAuthor: Eckhart Arnold \n\nCopyright 2017 Bavarian Academy of Sciences and Humanities\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nimport sys\n\nscriptpath = os.path.dirname(__file__) or '.'\nfor path in (os.path.join('../../examples', '..'), '.'):\n fullpath = os.path.abspath(os.path.join(scriptpath, path))\n if fullpath not in sys.path:\n sys.path.append(fullpath)\n\ntry:\n from DHParser import configuration\n import DHParser.dsl\n from DHParser import testing\nexcept ModuleNotFoundError:\n print('Could not import DHParser. Please adjust sys.path in file '\n '\"%s\" manually' % __file__)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n configuration.access_presets()\n configuration.set_preset_value('test_parallelization', True)\n configuration.finalize_presets()\n if not DHParser.dsl.recompile_grammar('BibTeX.ebnf', force=False): # recompiles Grammar only if it has changed\n print('\\nErrors while recompiling \"BibTeX.ebnf\":\\n--------------------------------------\\n\\n')\n with open('BibTeX_ebnf_ERRORS.txt') as f:\n print(f.read())\n sys.exit(1)\n\n sys.path.append('')\n # must be appended after module creation, because otherwise an ImportError is raised under Windows\n\n from BibTeXParser import get_grammar, get_transformer\n\n error_report = testing.grammar_suite('test_grammar', get_grammar,\n get_transformer, report='REPORT', verbose=True)\n if error_report:\n print('\\n')\n print(error_report)\n sys.exit(1)\n else:\n print('\\nSUCCESS! All tests passed :-)')\n"},"avg_line_length":{"kind":"number","value":34.578125,"string":"34.578125"},"max_line_length":{"kind":"number","value":115,"string":"115"},"alphanum_fraction":{"kind":"number","value":0.6913691821,"string":"0.691369"}}},{"rowIdx":46298,"cells":{"hexsha":{"kind":"string","value":"e6357fe1d0d77b7784505bdbc43908218403fd36"},"size":{"kind":"number","value":424,"string":"424"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/URLEncode/URLEncode.py"},"max_stars_repo_name":{"kind":"string","value":"diCagri/content"},"max_stars_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"number","value":799,"string":"799"},"max_stars_repo_stars_event_min_datetime":{"kind":"string","value":"2016-08-02T06:43:14.000Z"},"max_stars_repo_stars_event_max_datetime":{"kind":"string","value":"2022-03-31T11:10:11.000Z"},"max_issues_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/URLEncode/URLEncode.py"},"max_issues_repo_name":{"kind":"string","value":"diCagri/content"},"max_issues_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"number","value":9317,"string":"9,317"},"max_issues_repo_issues_event_min_datetime":{"kind":"string","value":"2016-08-07T19:00:51.000Z"},"max_issues_repo_issues_event_max_datetime":{"kind":"string","value":"2022-03-31T21:56:04.000Z"},"max_forks_repo_path":{"kind":"string","value":"Packs/CommonScripts/Scripts/URLEncode/URLEncode.py"},"max_forks_repo_name":{"kind":"string","value":"diCagri/content"},"max_forks_repo_head_hexsha":{"kind":"string","value":"c532c50b213e6dddb8ae6a378d6d09198e08fc9f"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"number","value":1297,"string":"1,297"},"max_forks_repo_forks_event_min_datetime":{"kind":"string","value":"2016-08-04T13:59:00.000Z"},"max_forks_repo_forks_event_max_datetime":{"kind":"string","value":"2022-03-31T23:43:06.000Z"},"content":{"kind":"string","value":"import demistomock as demisto\nfrom CommonServerPython import *\nfrom urllib.parse import quote, unquote\n\n''' MAIN FUNCTION '''\n\n\ndef main(args):\n value = args.get('value')\n decoded_value = unquote(value)\n return quote(decoded_value)\n\n\nif __name__ in ('__main__', '__builtin__', 'builtins'):\n try:\n return_results(main(demisto.args()))\n except Exception as exc:\n return_error(str(exc), error=exc)\n"},"avg_line_length":{"kind":"number","value":22.3157894737,"string":"22.315789"},"max_line_length":{"kind":"number","value":55,"string":"55"},"alphanum_fraction":{"kind":"number","value":0.6910377358,"string":"0.691038"}}},{"rowIdx":46299,"cells":{"hexsha":{"kind":"string","value":"558b103f7c41451affd5e793e3983b51f516433b"},"size":{"kind":"number","value":488,"string":"488"},"ext":{"kind":"string","value":"py"},"lang":{"kind":"string","value":"Python"},"max_stars_repo_path":{"kind":"string","value":"python/requests/advanced_usage/retry_on_failure.py"},"max_stars_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_stars_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_stars_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_stars_count":{"kind":"null"},"max_stars_repo_stars_event_min_datetime":{"kind":"null"},"max_stars_repo_stars_event_max_datetime":{"kind":"null"},"max_issues_repo_path":{"kind":"string","value":"python/requests/advanced_usage/retry_on_failure.py"},"max_issues_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_issues_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_issues_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_issues_count":{"kind":"null"},"max_issues_repo_issues_event_min_datetime":{"kind":"null"},"max_issues_repo_issues_event_max_datetime":{"kind":"null"},"max_forks_repo_path":{"kind":"string","value":"python/requests/advanced_usage/retry_on_failure.py"},"max_forks_repo_name":{"kind":"string","value":"zeroam/TIL"},"max_forks_repo_head_hexsha":{"kind":"string","value":"43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1"},"max_forks_repo_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"max_forks_count":{"kind":"null"},"max_forks_repo_forks_event_min_datetime":{"kind":"null"},"max_forks_repo_forks_event_max_datetime":{"kind":"null"},"content":{"kind":"string","value":"import requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\n\nretry_strategy = Retry(\n total=3,\n status_forcelist=[429, 500, 502, 503, 504],\n method_whitelist=['HEAD', 'GET', 'OPTIONS'],\n backoff_factor=1\n)\n\nadapter = HTTPAdapter(max_retries=retry_strategy)\n\nhttp = requests.Session()\nhttp.mount('https://', adapter)\nhttp.mount('http://', adapter)\n\nresponse = http.get('https://en.wikipedia.org/w/api.php')\nprint(response)\n"},"avg_line_length":{"kind":"number","value":23.2380952381,"string":"23.238095"},"max_line_length":{"kind":"number","value":57,"string":"57"},"alphanum_fraction":{"kind":"number","value":0.7295081967,"string":"0.729508"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":462,"numItemsPerPage":100,"numTotalItems":48262,"offset":46200,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjcxOTQ3NCwic3ViIjoiL2RhdGFzZXRzL2Jqb2VybnAvdGhlLXN0YWNrLWRlZHVwLXB5dGhvbi1kZXVfTGF0biIsImV4cCI6MTc1NjcyMzA3NCwiaXNzIjoiaHR0cHM6Ly9odWdnaW5nZmFjZS5jbyJ9.tS9IriJyTwZ__Y5whGwIfPLO2LNKJdIWbqCdd_-TAUgopCrbry8yWtGWHgC4nUnQsqg6oz3ip93fiGUNyLXbBw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
#Elliptic curve basics, tools for finding rational points, and ECDSA implementation.
#Brendan Cordy, 2015
from fractions import Fraction
from math import ceil, sqrt
from random import SystemRandom, randrange
from hashlib import sha256
from time import time
import pyotp
import datetime
#Affine Point (+Infinity) on an Elliptic Curve ---------------------------------------------------
class Point(object):
#Construct a point with two given coordindates.
def __init__(self, x, y):
self.x, self.y = x, y
self.inf = False
#Construct the point at infinity.
@classmethod
def atInfinity(cls):
P = cls(0, 0)
P.inf = True
return P
#The secp256k1 generator.
@classmethod
def secp256k1(cls):
return cls(55066263022277343669578718895168534326250603453777594175500187360389116729240,
32670510020758816978083085130507043184471273380659243275938904335757337482424)
def __str__(self):
if self.inf:
return 'Inf'
else:
return '(' + str(self.x) + ',' + str(self.y) + ')'
def __eq__(self,other):
if self.inf:
return other.inf
elif other.inf:
return self.inf
else:
return self.x == other.x and self.y == other.y
def is_infinite(self):
return self.inf
#Elliptic Curves over any Field ------------------------------------------------------------------
class Curve(object):
#Set attributes of a general Weierstrass cubic y^2 = x^3 + ax^2 + bx + c over any field.
def __init__(self, a, b, c, char, exp):
self.a, self.b, self.c = a, b, c
self.char, self.exp = char, exp
print(self)
f = open("otp_seed.txt", "r")
seed = f.readline().strip()
f.close()
self.hotp = pyotp.HOTP(seed)
def __str__(self):
#Cases for 0, 1, -1, and general coefficients in the x^2 term.
if self.a == 0:
aTerm = ''
elif self.a == 1:
aTerm = ' + x^2'
elif self.a == -1:
aTerm = ' - x^2'
elif self.a < 0:
aTerm = " - " + str(-self.a) + 'x^2'
else:
aTerm = " + " + str(self.a) + 'x^2'
#Cases for 0, 1, -1, and general coefficients in the x term.
if self.b == 0:
bTerm = ''
elif self.b == 1:
bTerm = ' + x'
elif self.b == -1:
bTerm = ' - x'
elif self.b < 0:
bTerm = " - " + str(-self.b) + 'x'
else:
bTerm = " + " + str(self.b) + 'x'
#Cases for 0, 1, -1, and general coefficients in the constant term.
if self.c == 0:
cTerm = ''
elif self.c < 0:
cTerm = " - " + str(-self.c)
else:
cTerm = " + " + str(self.c)
#Write out the nicely formatted Weierstrass equation.
self.eq = 'y^2 = x^3' + aTerm + bTerm + cTerm
#Print prettily.
if self.char == 0:
return self.eq + ' over Q'
elif self.exp == 1:
return self.eq + ' over ' + 'F_' + str(self.char)
else:
return self.eq + ' over ' + 'F_' + str(self.char) + '^' + str(self.exp)
#Compute the discriminant.
def discriminant(self):
a, b, c = self.a, self.b, self.c
return -4*a*a*a*c + a*a*b*b + 18*a*b*c - 4*b*b*b - 27*c*c
#Compute the order of a point on the curve.
def order(self, P):
Q = P
orderP = 1
#Add P to Q repeatedly until obtaining the identity (point at infinity).
while not Q.is_infinite():
Q = self.add(P,Q)
orderP += 1
return orderP
#List all multiples of a point on the curve.
def generate(self, P):
Q = P
orbit = [str(Point.atInfinity())]
#Repeatedly add P to Q, appending each (pretty printed) result.
while not Q.is_infinite():
orbit.append(str(Q))
Q = self.add(P,Q)
return orbit
#Double a point on the curve.
def double(self, P):
return self.add(P,P)
#Add P to itself k times.
def mult(self, P, k):
if P.is_infinite():
return P
elif k == 0:
return Point.atInfinity()
elif k < 0:
return self.mult(self.invert(P), -k)
else:
#Convert k to a bitstring and use peasant multiplication to compute the product quickly.
b = bin(k)[2:]
return self.repeat_additions(P, b, 1)
#Add efficiently by repeatedly doubling the given point, and adding the result to a running
#total when, after the ith doubling, the ith digit in the bitstring b is a one.
def repeat_additions(self, P, b, n):
if b == '0':
return Point.atInfinity()
elif b == '1':
return P
elif b[-1] == '0':
return self.repeat_additions(self.double(P), b[:-1], n+1)
elif b[-1] == '1':
return self.add(P, self.repeat_additions(self.double(P), b[:-1], n+1))
#Returns a pretty printed list of points.
def show_points(self):
return [str(P) for P in self.get_points()]
#Generate a secure OTP based on minutes and seconds with a 10 second slack
def getRandomOTP(self):
now = datetime.datetime.now()
return int(self.hotp.at( (now.minute + 1) * (now.second // 10) ))
#Elliptic Curves over Prime Order Fields ---------------------------------------------------------
class CurveOverFp(Curve):
#Construct a Weierstrass cubic y^2 = x^3 + ax^2 + bx + c over Fp.
def __init__(self, a, b, c, p):
Curve.__init__(self, a, b, c, p, 1)
#The secp256k1 curve.
@classmethod
def secp256k1(cls):
return cls(0, 0, 7, 2**256-2**32-2**9-2**8-2**7-2**6-2**4-1)
def contains(self, P):
if P.is_infinite():
return True
else:
# print('\t', (P.y*P.y) % self.char)
# print('\t', (P.x*P.x*P.x + self.a*P.x*P.x + self.b*P.x + self.c) % self.char )
return (P.y*P.y) % self.char == (P.x*P.x*P.x + self.a*P.x*P.x + self.b*P.x + self.c) % self.char
def get_points(self):
#Start with the point at infinity.
points = [Point.atInfinity()]
#Just brute force the rest.
for x in range(self.char):
for y in range(self.char):
P = Point(x,y)
if (y*y) % self.char == (x*x*x + self.a*x*x + self.b*x + self.c) % self.char:
points.append(P)
return points
def invert(self, P):
if P.is_infinite():
return P
else:
return Point(P.x, -P.y % self.char)
def add(self, P_1, P_2):
#Adding points over Fp and can be done in exactly the same way as adding over Q,
#but with of the all arithmetic now happening in Fp.
y_diff = (P_2.y - P_1.y) % self.char
x_diff = (P_2.x - P_1.x) % self.char
if P_1.is_infinite():
return P_2
elif P_2.is_infinite():
return P_1
elif x_diff == 0 and y_diff != 0:
return Point.atInfinity()
elif x_diff == 0 and y_diff == 0:
if P_1.y == 0:
return Point.atInfinity()
else:
ld = ((3*P_1.x*P_1.x + 2*self.a*P_1.x + self.b) * mult_inv(2*P_1.y, self.char)) % self.char
else:
ld = (y_diff * mult_inv(x_diff, self.char)) % self.char
nu = (P_1.y - ld*P_1.x) % self.char
x = (ld*ld - self.a - P_1.x - P_2.x) % self.char
y = (-ld*x - nu) % self.char
return Point(x,y)
#Extended Euclidean algorithm.
def euclid(sml, big):
#When the smaller value is zero, it's done, gcd = b = 0*sml + 1*big.
if sml == 0:
return (big, 0, 1)
else:
#Repeat with sml and the remainder, big%sml.
g, y, x = euclid(big % sml, sml)
#Backtrack through the calculation, rewriting the gcd as we go. From the values just
#returned above, we have gcd = y*(big%sml) + x*sml, and rewriting big%sml we obtain
#gcd = y*(big - (big//sml)*sml) + x*sml = (x - (big//sml)*y)*sml + y*big.
return (g, x - (big//sml)*y, y)
#Compute the multiplicative inverse mod n of a with 0 < a < n.
def mult_inv(a, n):
g, x, y = euclid(a, n)
#If gcd(a,n) is not one, then a has no multiplicative inverse.
if g != 1:
raise ValueError('multiplicative inverse does not exist')
#If gcd(a,n) = 1, and gcd(a,n) = x*a + y*n, x is the multiplicative inverse of a.
else:
return x % n
#ECDSA functions ---------------------------------------------------------------------------------
#Use sha256 to hash a message, and return the hash value as an interger.
def hash(message):
# return int(sha256(message).hexdigest(), 16)
return int(sha256(str(message).encode('utf-8')).hexdigest(), 16)
#Hash the message and return integer whose binary representation is the the L leftmost bits
#of the hash value, where L is the bit length of n.
def hash_and_truncate(message, n):
h = hash(message)
b = bin(h)[2:len(bin(n))]
return int(b, 2)
#Generate a keypair using the point P of order n on the given curve. The private key is a
#positive integer d smaller than n, and the public key is Q = dP.
def generate_keypair(curve, P, n):
sysrand = SystemRandom()
d = sysrand.randrange(1, n)
Q = curve.mult(P, d)
return (d, Q)
#Create a digital signature for the string message using a given curve with a distinguished
#point P which generates a prime order subgroup of size n.
def sign(message, curve, P, n, keypair):
#Extract the private and public keys, and compute z by hashing the message.
d, Q = keypair # 130,(1341,1979)
z = hash_and_truncate(message, n) # 0xfb
#Choose a randomly selected secret point kP then compute r and s.
r, s = 0, 0
while r == 0 or s == 0:
k = curve.getRandomOTP()
R = curve.mult(P, k)
r = R.x % n
s = (mult_inv(k, n) * (z + r*d)) % n
print('ECDSA sig of \"' + message+ '\" : (Q, r, s) = (' + str(Q) + ', ' + str(r) + ', ' + str(s) + ')')
return (Q, r, s)
#Verify the string message is authentic, given an ECDSA signature generated using a curve with
#a distinguished point P that generates a prime order subgroup of size n.
def verify(message, curve, P, n, sig):
Q, r, s = sig
#Confirm that Q is on the curve.
if Q.is_infinite() or not curve.contains(Q):
return False
#Confirm that Q has order that divides n.
if not curve.mult(Q,n).is_infinite():
return False
#Confirm that r and s are at least in the acceptable range.
if r > n or s > n:
return False
#Compute z in the same manner used in the signing procedure,
#and verify the message is authentic.
z = hash_and_truncate(message, n)
w = mult_inv(s, n) % n
u_1, u_2 = z * w % n, r * w % n
C_1, C_2 = curve.mult(P, u_1), curve.mult(Q, u_2)
C = curve.add(C_1, C_2)
return r % n == C.x % n
36.73871
109
0.533936
df2feb09124b292e71c2fed0e87e5d3a3194ba7f
1,571
py
Python
src/onegov/feriennet/redirects.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[
"MIT"
]
null
null
null
src/onegov/feriennet/redirects.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[
"MIT"
]
null
null
null
src/onegov/feriennet/redirects.py
politbuero-kampagnen/onegov-cloud
20148bf321b71f617b64376fe7249b2b9b9c4aa9
[
"MIT"
]
null
null
null
from onegov.feriennet import FeriennetApp
from onegov.core.redirect import Redirect
@FeriennetApp.path(path='/angebote')
class AngeboteRedirect(Redirect):
to = '/activities'
@FeriennetApp.path(path='/angebot', absorb=True)
class AngebotRedirect(Redirect):
to = '/activity'
@FeriennetApp.path(path='/durchfuehrungen', absorb=True)
class DurchfuehrungenRedirect(Redirect):
to = '/occasions'
@FeriennetApp.path(path='/perioden')
class PeriodenRedirect(Redirect):
to = '/periods'
@FeriennetApp.path(path='/periode', absorb=True)
class PeriodeRedirect(Redirect):
to = '/period'
@FeriennetApp.path(path='/meine-buchungen')
class MeineBuchungenRedirect(Redirect):
to = '/my-bookings'
@FeriennetApp.path(path='/buchung', absorb=True)
class BuchungRedirect(Redirect):
to = '/booking'
@FeriennetApp.path(path='/zuteilungen')
class ZuteilungenRedirect(Redirect):
to = '/matching'
@FeriennetApp.path(path='/rechnungen')
class RechnungenRedirect(Redirect):
to = '/billing'
@FeriennetApp.path(path='/rechnungsaktion', absorb=True)
class RechnungsAktionRedirect(Redirect):
to = '/invoice-action'
@FeriennetApp.path(path='/meine-rechnungen')
class MeineRechnungenRedirect(Redirect):
to = '/my-bills'
@FeriennetApp.path(path='/teilnehmer')
class TeilnehmerRedirect(Redirect):
to = '/attendees'
@FeriennetApp.path(path='/mitteilungen')
class MitteilungenRedirect(Redirect):
to = '/notifications'
@FeriennetApp.path(path='/mitteilung', absorb=True)
class MitteilungRedirect(Redirect):
to = '/notification'
21.520548
56
0.737747
80113db6fd49bdbee56d7db184c2b3980e848a7e
1,599
py
Python
modules/fadb/adbBin.py
Bym24v/FAS
efbcf606c49dd591857e0e537bc5b9f082c13405
[
"MIT"
]
3
2018-02-11T11:34:30.000Z
2020-05-06T12:11:03.000Z
modules/fadb/adbBin.py
Bym24v/FAS
efbcf606c49dd591857e0e537bc5b9f082c13405
[
"MIT"
]
null
null
null
modules/fadb/adbBin.py
Bym24v/FAS
efbcf606c49dd591857e0e537bc5b9f082c13405
[
"MIT"
]
null
null
null
import os, threading
from subprocess import Popen, PIPE
from modules.fmongodb.fClient import FASClient
instFas = FASClient()
def runAdb():
# Path
path = os.getcwd() + "/tools/adb/./adb"
arg = "logcat"
arg2 = "-v"
arg3 = "brief"
# Open
p = Popen([path, arg, arg2, arg3], stdout=PIPE)
# Loop
for line in iter(p.stdout.readline, b''):
line = " ".join(line.split())
priority = line[0]
tag = line[2:line.find('(')]
pid = line[line.find('(')+1:line.find(')')]
msg = line[line.find(':')+2:: ]
#print 'PRIORITY: ' + priority + '\t TAG: ' + tag + '\t PID: ' + pid + '\t MESSAGE: ' + msg
#instFas.FasSaveRawData(priority, tag, pid, msg)
p.terminate()
def FASGetDevices():
# Path
path = os.getcwd() + "/tools/adb/./adb"
arg = "devices"
# Open
p = Popen([path, arg], stdout=PIPE)
# Loop
for line in iter(p.stdout.readline, b''):
print line
#line = " ".join(line.split())
#priority = line[0]
#tag = line[2:line.find('(')]
#pid = line[line.find('(')+1:line.find(')')]
#msg = line[line.find(':')+2:: ]
#print 'PRIORITY: ' + priority + '\t TAG: ' + tag + '\t PID: ' + pid + '\t MESSAGE: ' + msg
#instFas.FasSaveRawData(priority, tag, pid, msg)
p.terminate()
def FASDevices():
# New Thread
t = threading.Thread(target=FASGetDevices)
t.start()
# Start adb
def FAStartADB():
# New Thread
t = threading.Thread(target=runAdb)
t.start()
from proxy.http.parser import HttpParser, httpParserTypes, httpParserStates
from proxy.http.proxy import HttpProxyBasePlugin
class PluginBase(HttpProxyBasePlugin):
'''Modified HttpProxyBasePlugin from proxy.py. Provides more functionality'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request = HttpParser(httpParserTypes.REQUEST_PARSER)
self.response = HttpParser(httpParserTypes.RESPONSE_PARSER)
def before_upstream_connection(self, connection):
return self.intercept_connection(connection)
def handle_client_request(self, request):
rtval = self.intercept_request(request)
self.request = rtval
return rtval
def handle_upstream_chunk(self, chunk):
self.response.parse(chunk.tobytes())
if self.response.state == httpParserStates.COMPLETE:
self.intercept_response(self.response)
return memoryview(self.intercept_chunk(chunk.tobytes()))
def on_upstream_connection_close(self):
self.close_connection()
def intercept_connection(self, conn):
'''
Intercept Connection. The `conn` argument represents
HttpParser class from proxy.http.parser.
Intercept the initial connection state and represent
the data before the connection is actuall established.
For TLS Interception, it would most probably be a
CONNECT request. You can check host, port and other initials
in this section.
Modified/Orignal `conn` must be returned in this function.
Returning `None` will drop the connection.
'''
return conn
def intercept_request(self, request):
'''
Intercept Request. The `request` argument represents
HttpParser class from proxy.http.parser
Intercept the orignal request sent to the server. The
request can be modified at this stage.
You will see GET/POST/HEAD and other method requests
here. You can modify the request body and other
necessary details here.
Modified/Orignal `request` must be returned in this function.
Returning `None` will not send the request to the server.
'''
return request
def intercept_chunk(self, chunk):
'''
Intercept chunks received from the server. The `chunk`
argument represents built-in `bytes` object from
python.
Intercept response in chunks received from server.
It is not in the final form yet. So, you can the received
response here
You will see mostly plain bytes in this section with what's
incoming. This chunk from this function is then appended
to response in `interept_response`.
Modified/Orignal `chunk` must be returned in this function.
Empty bytes object will append nothing to response
Return `None` will cancel the response at that very moment.
'''
return chunk
def intercept_response(self, response):
'''
Intercept response received from the server. The `response`
argument represents HttpParser from proxy.http.parser.
Intercept the response combined from the chunks received in
`intercept_chunk` method. The response can only be viewed
at this stage. It is not to be modified here
NOTE: Changing or returning response here won't have any effect
on the orignal response. For modifying response see
`intercept_response` method.
This function doesn't return anything.
'''
return response
def close_connection(self):
'''
This methods gets called when the connection has been closed.
It doesn't have arguments.
The request and response both can be analyzed at this part
and can be accessed at `self.request` and `self.response`
respectively.
The modifications in here won't mean anything. But you can
deduce other results in this section.
This function doesn't return anything and basically used for
analysis.
'''
pass
37.5
81
0.647356
d62207e0f706f400ebade3f8e5e56cd2814dd6a8
2,145
py
Python
tool_discovery/ontology.py
FAIRplus/WP3_FAIR_tooling
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
[
"Apache-2.0"
]
null
null
null
tool_discovery/ontology.py
FAIRplus/WP3_FAIR_tooling
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
[
"Apache-2.0"
]
13
2021-06-01T10:07:02.000Z
2022-03-24T12:16:26.000Z
tool_discovery/ontology.py
FAIRplus/WP3_FAIR_tooling
3c6470c4f5fc3d686b4571711bb7ed6f849a9622
[
"Apache-2.0"
]
null
null
null
import biotools_API_querying as bAq
import pandas as pd
topics = ["ontology_annotation", "ontology_management","ontology_engineering","ontology_mapping"]
for topic in topics:
print(topic+"/n/n")
# parsing zooma results
terms_file='keywords/'+topic+'_EDAM_curated.csv'
ontology_edam_terms, terms_label_ontology, free_terms = bAq.parse_zooma_results(terms_file)
# queries
ontology_edam_results_general, ontology_edam_results_detailed = bAq.query_for_terms(ontology_edam_terms, True)
ontology_free_results_general, ontology_free_results_detailed = bAq.query_for_terms(free_terms, False)
# join results
ontology_joint_results = bAq.join_results(ontology_edam_results_detailed, ontology_free_results_detailed)
all_ontology_tools = bAq.merge_tools_lists([ontology_edam_results_general, ontology_free_results_general])
# arrange by term
tools_per_term_ontology = bAq.tools_per_term(ontology_edam_results_general)
tools_per_term_ontology = {terms_label_ontology[term]:tools_per_term_ontology[term] for term in tools_per_term_ontology.keys()}
tools_per_term_free_ontology = bAq.tools_per_term(ontology_free_results_general)
tools_per_term_free_ontology = {term:tools_per_term_free_ontology[term] for term in tools_per_term_free_ontology.keys()}
# ontology related annotations count. Free text and EDAM queries
matches_tools_ontology = bAq.count_matches_edam_free(tools_per_term_ontology, tools_per_term_free_ontology, all_ontology_tools)
ontology_annot_count_df = pd.DataFrame(list(matches_tools_ontology.items()), columns= ['tool',topic+'_count']).sort_values(topic+'_count', ascending=False)
#ontology_annot_count_df.to_csv("outputs/20210122_ontology_ranked_by_counts.csv")
# Rank tools using keywords and weight
ontology_ranked = "keywords/"+topic+"_EDAM_ranked.csv"
ranked_keys = bAq.read_ranking(ontology_ranked)
max_matches = max(ontology_annot_count_df[topic+'_count'])
df_ranked_tools = bAq.rank_tools(all_ontology_tools,ranked_keys, tools_per_term_ontology, tools_per_term_free_ontology, ontology_joint_results, max_matches)
df_ranked_tools.to_csv('outputs/ontology/'+topic+'_ranked_tools.csv', index=False)
55
157
0.841958
d657cfa5d950abfe1b3a8f2beea931048e50cae9
3,058
py
Python
Aggregator/agg_gmap_transit_score.py
socialdistancingdashboard/virushack
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
[
"Apache-2.0"
]
29
2020-03-21T00:47:51.000Z
2021-07-17T15:50:33.000Z
Aggregator/agg_gmap_transit_score.py
socialdistancingdashboard/virushack
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
[
"Apache-2.0"
]
7
2020-03-21T14:04:26.000Z
2022-03-02T08:05:40.000Z
Aggregator/agg_gmap_transit_score.py
socialdistancingdashboard/virushack
6ef69d26c5719d0bf257f4594ed2488dd73cdc40
[
"Apache-2.0"
]
13
2020-03-21T01:08:08.000Z
2020-04-08T17:21:11.000Z
from coords_to_kreis import coords_convert
import boto3
import json
import time
from datetime import date, timedelta
import pandas as pd
import csv
import numpy as np
import settings
def aggregate(date):
s3_client = boto3.client('s3')
#date = date.today() - timedelta(days = 1)
#print(date)
data = pd.DataFrame()
#clientFirehose = boto3.client('firehose')
for x in range(9,19):
try:
response = s3_client.get_object(Bucket=settings.BUCKET, Key='googleplaces/{}/{}/{}/{}'.format(str(date.year).zfill(4), str(date.month).zfill(2), str(date.day).zfill(2), str(x).zfill(2)))
result = pd.DataFrame(json.loads(response["Body"].read()))
result["date"] = date
result["hour"] = x
data = data.append(result)
except Exception as e:
print("No gmap data for " + str(date) + " " + str(e))
return
def normal_popularity(row):
return row["populartimes"][row["date"].weekday()]["data"][row["hour"]]
def to_data(landkreis, date, relative_popularity, airquality_score,hystreet_score,cycle_score):
#['id', 'name', 'date', 'gmap_score', 'hystreet_score', 'cycle_score']
return {
'name': landkreis,
# todo time from request
'date': date,
'gmap_score' : relative_popularity
#"airquality_score" : airquality_score
#'hystreet_score' : hystreet_score
# 'cycle_score' : cycle_score
}
import ast
data["normal_popularity"] = data.apply(normal_popularity, axis = 1, result_type = "reduce")
data["relative_popularity"] = data["current_popularity"] / data["normal_popularity"]
data["coordinates"] = data["coordinates"].astype(str)
lat = []
lon = []
for index, row in data.iterrows():
lat.append(ast.literal_eval(row["coordinates"])["lat"])
lon.append(ast.literal_eval(row["coordinates"])["lng"])
data["lat"] = lat
data["lon"] = lon
#print(data)
data["ags"] = coords_convert(data)
data
data2 = data.loc[data["ags"].notna()]
result = data2.groupby("ags").apply(lambda x: np.average(x.relative_popularity, weights=x.normal_popularity))
result = pd.DataFrame(result)
result = result.reset_index()
result.columns = ["ags", "relative_popularity"]
list_results = []
for index, row in result.iterrows():
landkreis = row['ags']
relative_popularity = row['relative_popularity']
data_index = {
"landkreis": landkreis,
# todo time from request
#'date': str(date),
'gmap_score' : relative_popularity
#"airquality_score" : airquality_score
#'hystreet_score' : hystreet_score
# 'cycle_score' : cycle_score
}
list_results.append(data_index)
#print (data_index)
# clientFirehose.put_record(DeliveryStreamName='sdd-kinese-aggregator', Record={'Data':data_index })
#print(input)
return list_results
# Copyright (C) 2015-2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from . import cfg
from . import objectmanager
from . import utils
from .cfg import BUS_NAME, BASE_INTERFACE, BASE_OBJ_PATH, MANAGER_OBJ_PATH
import threading
from . import cmdhandler
import time
import signal
import dbus
import dbus.mainloop.glib
from . import lvmdb
# noinspection PyUnresolvedReferences
from gi.repository import GLib
from .fetch import StateUpdate
from .manager import Manager
import traceback
import queue
from . import udevwatch
from .utils import log_debug, log_error
import argparse
import os
import sys
from .cmdhandler import LvmFlightRecorder
from .request import RequestEntry
class Lvm(objectmanager.ObjectManager):
def __init__(self, object_path):
super(Lvm, self).__init__(object_path, BASE_INTERFACE)
def process_request():
while cfg.run.value != 0:
# noinspection PyBroadException
try:
req = cfg.worker_q.get(True, 5)
log_debug(
"Running method: %s with args %s" %
(str(req.method), str(req.arguments)))
req.run_cmd()
log_debug("Method complete ")
except queue.Empty:
pass
except Exception:
st = traceback.format_exc()
utils.log_error("process_request exception: \n%s" % st)
def check_bb_size(value):
v = int(value)
if v < 0:
raise argparse.ArgumentTypeError(
"positive integers only ('%s' invalid)" % value)
return v
def install_signal_handlers():
# Because of the glib main loop stuff the python signal handler code is
# apparently not usable and we need to use the glib calls instead
signal_add = None
if hasattr(GLib, 'unix_signal_add'):
signal_add = GLib.unix_signal_add
elif hasattr(GLib, 'unix_signal_add_full'):
signal_add = GLib.unix_signal_add_full
if signal_add:
signal_add(GLib.PRIORITY_HIGH, signal.SIGHUP, utils.handler, signal.SIGHUP)
signal_add(GLib.PRIORITY_HIGH, signal.SIGINT, utils.handler, signal.SIGINT)
signal_add(GLib.PRIORITY_HIGH, signal.SIGUSR1, utils.handler, signal.SIGUSR1)
else:
log_error("GLib.unix_signal_[add|add_full] are NOT available!")
def main():
start = time.time()
# Add simple command line handling
parser = argparse.ArgumentParser()
parser.add_argument(
"--udev", action='store_true',
help="Use udev for updating state",
default=False,
dest='use_udev')
parser.add_argument(
"--debug", action='store_true',
help="Dump debug messages", default=False,
dest='debug')
parser.add_argument(
"--nojson", action='store_false',
help="Do not use LVM JSON output (disables lvmshell)", default=True,
dest='use_json')
parser.add_argument(
"--lvmshell", action='store_true',
help="Use the lvm shell, not fork & exec lvm",
default=False,
dest='use_lvm_shell')
parser.add_argument(
"--blackboxsize",
help="Size of the black box flight recorder, 0 to disable",
default=10,
type=check_bb_size,
dest='bb_size')
use_session = os.getenv('LVMDBUSD_USE_SESSION', False)
# Ensure that we get consistent output for parsing stdout/stderr
os.environ["LC_ALL"] = "C"
cfg.args = parser.parse_args()
cfg.create_request_entry = RequestEntry
# We create a flight recorder in cmdhandler too, but we replace it here
# as the user may be specifying a different size. The default one in
# cmdhandler is for when we are running other code with a different main.
cfg.blackbox = LvmFlightRecorder(cfg.args.bb_size)
if cfg.args.use_lvm_shell and not cfg.args.use_json:
log_error("You cannot specify --lvmshell and --nojson")
sys.exit(1)
# List of threads that we start up
thread_list = []
install_signal_handlers()
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
dbus.mainloop.glib.threads_init()
cmdhandler.set_execution(cfg.args.use_lvm_shell)
if use_session:
cfg.bus = dbus.SessionBus()
else:
cfg.bus = dbus.SystemBus()
# The base name variable needs to exist for things to work.
# noinspection PyUnusedLocal
base_name = dbus.service.BusName(BUS_NAME, cfg.bus)
cfg.om = Lvm(BASE_OBJ_PATH)
cfg.om.register_object(Manager(MANAGER_OBJ_PATH))
cfg.db = lvmdb.DataStore(cfg.args.use_json)
# Using a thread to process requests, we cannot hang the dbus library
# thread that is handling the dbus interface
thread_list.append(threading.Thread(target=process_request,
name='process_request'))
# Have a single thread handling updating lvm and the dbus model so we
# don't have multiple threads doing this as the same time
updater = StateUpdate()
thread_list.append(updater.thread)
cfg.load = updater.load
cfg.loop = GLib.MainLoop()
for thread in thread_list:
thread.damon = True
thread.start()
# Add udev watching
if cfg.args.use_udev:
log_debug('Utilizing udev to trigger updates')
# In all cases we are going to monitor for udev until we get an
# ExternalEvent. In the case where we get an external event and the user
# didn't specify --udev we will stop monitoring udev
udevwatch.add()
end = time.time()
log_debug(
'Service ready! total time= %.4f, lvm time= %.4f count= %d' %
(end - start, cmdhandler.total_time, cmdhandler.total_count),
'bg_black', 'fg_light_green')
try:
if cfg.run.value != 0:
cfg.loop.run()
udevwatch.remove()
for thread in thread_list:
thread.join()
except KeyboardInterrupt:
# If we are unable to register signal handler, we will end up here when
# the service gets a ^C or a kill -2 <parent pid>
utils.handler(signal.SIGINT)
return 0
# Copyright (C) Microsoft Corporation. All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#!/usr/bin/python
# -*- coding: utf-8 -*-
from ipmicmd_library import *
from bladeinfo_lib import *
from bladethermal_lib import fan_sub_parser
def get_server_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
ipmi_cmd = 'ocsoem redfish health'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "Server health")
healthrsp = {}
if completion_code.cc_key in output:
healthrsp[completion_code.cc_key] = completion_code.failure
healthrsp[completion_code.desc] = "get server health ipmi call error "
return healthrsp
if(output['status_code'] == 0) or (output['stdout']):
healthrsp = parse_health_response(output['stdout'])
return healthrsp
else:
errorData = output['stderr'].split('\n')
errorData = filter(None, errorData)
healthrsp[completion_code.cc_key] = completion_code.failure
for data in errorData:
if "Error" in data:
healthrsp[completion_code.desc] = data.split(":")[-1].strip()
elif completion_code.cc_key in data:
healthrsp[completion_code.ipmi_code] = data.split(":")[-1].strip()
else:
healthrsp[completion_code.desc] = data.strip()
break
if healthrsp[completion_code.desc] == "":
healthrsp[completion_code.desc] = errorData.strip()
return healthrsp
except Exception,e:
#log.exception("Exception error is: %s " %e)
healthrsp[completion_code.cc_key] = completion_code.failure
healthrsp[completion_code.desc] = "Get server health, Exception: ", e
return healthrsp
def parse_health_response(output):
try:
completionstate = True
healthrsp = {}
healthrsp[" Server Information"] = {}
healthrsp["CPU Information"] = {}
healthrsp["Memory Information"] = {}
healthrsp["PCIE Information"] = {}
healthrsp["Temperature Information"] = {}
healthrsp["FRU Information"] = {}
healthrsp["Fan Information"] = {}
healthrsp["Sensor Information"] = {}
#populating temperatures data
health = output.split('$')
healthdata = filter(None, health) #Remove empty data
if len(healthdata) == 0:
healthrsp[completion_code.cc_key] = completion_code.failure
healthrsp[completion_code.desc] = "health data is empty"
return healthrsp
else:
for value in healthdata:
object_data = value.split('\n')
object_value= filter(None, object_data)
# Skipping empty lists if any
if len(object_value) == 0:
break
else:
if object_value[0].lower().strip('-').strip() == "fru information":
fru_info = get_fru_info(object_value)
if completion_code.cc_key in fru_info.keys():
value = fru_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["FRU Information"] = fru_info
elif object_value[0].lower().strip('-').strip() == "cpu information":
cpu_info = get_cpu_info(object_value)
if completion_code.cc_key in cpu_info.keys():
value = cpu_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["CPU Information"] = cpu_info
elif object_value[0].lower().strip('-').strip() == "server information":
server_info = get_server_info(object_value)
if completion_code.cc_key in server_info.keys():
value = server_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp[" Server Information"] = server_info
elif object_value[0].lower().strip('-').strip() == "memory information":
memory_data = value.split('*')
memory_value= filter(None, memory_data)
mem_info = get_memory_health(memory_value)
if completion_code.cc_key in mem_info.keys():
value = mem_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["Memory Information"] = mem_info
elif object_value[0].lower().strip('-').strip() == "pcie information":
pcie_data = value.split('*')
pcie_value= filter(None, pcie_data)
pcie_info = get_pcie_info(pcie_value)
if completion_code.cc_key in pcie_info.keys():
value = pcie_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["PCIE Information"] = pcie_info
elif object_value[0].lower().strip('-').strip() == "fan information":
del object_value[0] # deleting first record which is "-----Fan Information-------" string
fan_info = get_sensor_info(object_value, 'fan')
if completion_code.cc_key in fan_info.keys():
value = fan_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["Fan Information"] = fan_info
elif object_value[0].lower().strip('-').strip() == "temperature information":
del object_value[0] # deleting first record which is "-----Temperature Information-------" string
temp_info = get_sensor_info(object_value)
if completion_code.cc_key in temp_info.keys():
value = temp_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["Temperature Information"] = temp_info
elif object_value[0].lower().strip('-').strip() == "sensor information":
del object_value[0] # deleting first record which is "-----Sensor Information-------" string
sensor_info = get_sensor_info(object_value)
if completion_code.cc_key in sensor_info.keys():
value = sensor_info.pop(completion_code.cc_key,None)
if value == completion_code.failure:
completionstate &= False
healthrsp["Sensor Information"] = sensor_info
except Exception,e:
#log.exception("Exception error is: %s " %e)
healthrsp[completion_code.cc_key] = completion_code.failure
healthrsp[completion_code.desc] = "Get server health, Exception: ", e
return healthrsp
if completionstate:
healthrsp[completion_code.cc_key] = completion_code.success
else:
healthrsp[completion_code.cc_key] = completion_code.failure
return healthrsp
def get_memory_health(memory):
try:
completionstate = True
mem_rsp = {}
dimm_id = 1
for value in memory:
dimm_data = value.split('\n')
dimm_data = filter(None, dimm_data) # Removes empty strings
# Skipping empty lists if any
if len(dimm_data) == 0:
break
if len(dimm_data) == 2:
continue
mem_rsp[dimm_id] = {}
for value in dimm_data:
if "Completion Code:" in value:
completionstate &= False
elif "DimmId" in value:
mem_rsp[dimm_id]["Dimm Id"] = value.split(":")[-1].strip()
elif "Dimm Type" in value:
mem_rsp[dimm_id]["Dimm Type"] = value.split(":")[-1].strip()
elif "Dimm speed" in value:
mem_rsp[dimm_id]["Dimm Speed"] = value.split(":")[-1].strip()
elif "Dimm size" in value:
mem_rsp[dimm_id]["Dimm Size"] = value.split(":")[-1].strip()
elif "Dimm Status" in value:
mem_rsp[dimm_id]["Dimm Status"] = value.split(":")[-1].strip()
elif "Voltage" in value:
mem_rsp[dimm_id]["Dimm Voltage"] = value.split(":")[-1].strip()
elif "Running Speed" in value:
mem_rsp[dimm_id]["Running Speed"] = value.split(":")[-1].strip()
dimm_id = dimm_id + 1
except Exception,e:
mem_rsp[completion_code.cc_key] = completion_code.failure
mem_rsp[completion_code.desc] = "Get memory health, Exception: ", e
return mem_rsp
if completionstate:
mem_rsp[completion_code.cc_key] = completion_code.success
else:
mem_rsp[completion_code.cc_key] = completion_code.failure
return mem_rsp
def get_pcie_info(pcie):
try:
completionstate = True
pcie_rsp = {}
pcie_id = 1
for value in pcie:
pcie_data = value.split('\n')
pcie_data = filter(None, pcie_data) # Removes empty list
# Skipping empty lists if any
if len(pcie_data) == 0:
break
if len(pcie_data) == 2:
continue
pcie_rsp[pcie_id] = {}
for value in pcie_data:
if "Completion Code:" in value:
completionstate &= False
elif "PCIe Id" in value:
pcie_rsp[pcie_id]["PCIe Index"] = value.split(":")[-1].strip()
elif "PCIe Status" in value:
pcie_rsp[pcie_id]["PCIe Status"] = value.split(":")[-1].strip()
elif "PCIe Device" in value:
pcie_rsp[pcie_id]["State"] = value.split(":")[-1].strip()
elif "Device Id" in value:
pcie_rsp[pcie_id]["Device Id"] = value.split(":")[-1].strip()
elif "Vendor Id" in value:
pcie_rsp[pcie_id]["Vendor Id"] = value.split(":")[-1].strip()
elif "SubSystem Id" in value:
pcie_rsp[pcie_id]["SubSystem Id"] = value.split(":")[-1].strip()
elif "SubSystem vendor Id" in value:
pcie_rsp[pcie_id]["SubSystem vendor Id"] = value.split(":")[-1].strip()
pcie_id = pcie_id + 1
except Exception,e:
pcie_rsp[completion_code.cc_key] = completion_code.failure
pcie_rsp[completion_code.desc] = "Get PCIe health, Exception: ", e
return pcie_rsp
if completionstate:
pcie_rsp[completion_code.cc_key] = completion_code.success
else:
pcie_rsp[completion_code.cc_key] = completion_code.failure
return pcie_rsp
def get_server_info(server):
try:
completionstate = True
server_rsp = {}
for value in server:
if "Completion Code:" in value:
completionstate &= False
elif "Server communication type" in value:
server_type = value.split(":")[-1].strip()
if server_type == "IPMI":
server_rsp["Server Type"] = "C2010"
elif server_type == "REST":
server_rsp["Server Type"] = "J2010"
else:
server_rsp["Server Type"] = "Unknown"
completionstate &= False
elif "Slot Id" in value:
server_rsp["Server Slot ID"] = value.split(":")[-1].strip()
elif "System Power State" in value:
server_rsp["Server State"] = value.split(":")[-1].strip()
except Exception,e:
server_rsp[completion_code.cc_key] = completion_code.failure
server_rsp[completion_code.desc] = "Get Server Information, Exception: ", e
return server_rsp
if completionstate:
server_rsp[completion_code.cc_key] = completion_code.success
else:
server_rsp[completion_code.cc_key] = completion_code.failure
return server_rsp
def get_cpu_info(cpu):
try:
completionstate = True
cpursp = {}
cpursp["Processor-1"] = {}
cpursp["Processor-2"] = {}
for value in cpu:
if "Completion Code:" in value:
completionstate &= False
elif "Processor0 Type" in value:
cpursp["Processor-1"]["Processor Id"] = 0
cpursp["Processor-1"]["Processor Type"] = value.split(":")[-1].strip()
elif "Processor0 Frequency" in value:
cpursp["Processor-1"]["Processor Frequency"] = value.split(":")[-1].strip()
elif "Processor0 State" in value:
cpursp["Processor-1"]["ProcessorState"] = value.split(":")[-1].strip()
elif "Processor1 Type" in value:
cpursp["Processor-2"]["Processor Id"] = 1
cpursp["Processor-2"]["Processor Type"] = value.split(":")[-1].strip()
elif "Processor1 Frequency" in value:
cpursp["Processor-2"]["Processor Frequency"] = value.split(":")[-1].strip()
elif "Processor1 State" in value:
cpursp["Processor-2"]["ProcessorState"] = value.split(":")[-1].strip()
except Exception,e:
cpursp[completion_code.cc_key] = completion_code.failure
cpursp[completion_code.desc] = "Get CPU health, Exception: ", e
return cpursp
if completionstate:
cpursp[completion_code.cc_key] = completion_code.success
else:
cpursp[completion_code.cc_key] = completion_code.failure
return cpursp
def get_sensor_info(temp, sensortype = ''):
try:
completionstate = True
temp_rsp = {}
record_id = 1
for value in temp:
if "Completion Code:" in value:
completionstate &= False
# Skipping empty lists if any
if len(value) == 0:
break
val = value.split ("|")
sensor = {}
if sensortype == "fan":
if "pwm" in val[0].lower().strip():
continue
else:
sensor["Fan Name"] = val[0].strip ()
sensor["Fan Number"] = val[1].strip ()
sensor["Fan Status"] = val[2].strip ()
sensor["Fan MemberId"] = val[3].strip ()
sensor["Fan Reading"] = val[4].strip ()
else:
sensor["Sensor Description"] = val[0].strip ()
sensor["Sensor Number"] = val[1].strip ()
sensor["Sensor Status"] = val[2].strip ()
sensor["Sensor Entity ID"] = val[3].strip ()
sensor["Sensor Reading"] = val[4].strip ()
temp_rsp[record_id] = sensor
record_id = record_id + 1
except Exception,e:
temp_rsp[completion_code.cc_key] = completion_code.failure
temp_rsp[completion_code.desc] = "Get Sensor Information, Exception: ", e
return temp_rsp
if completionstate:
temp_rsp[completion_code.cc_key] = completion_code.success
else:
temp_rsp[completion_code.cc_key] = completion_code.failure
return temp_rsp
def show_memory_info(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
ipmi_cmd = 'ocsoem dimminfo'
cmdinterface = interface + ' ' + ipmi_cmd
get_memory = parse_memory(cmdinterface ,"memory")
if get_memory is None or not get_memory: # Check empty or none
return set_failure_dict("Empty memory info", completion_code.failure)
return get_memory
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("get memory info Exception: ", e), completion_code.failure)
def show_pcie_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
ipmi_cmd = 'ocsoem getpcie'
cmdinterface = interface + ' ' + ipmi_cmd
get_pcie = parse_pcie(cmdinterface ,"pcie")
if get_pcie is None or not get_pcie: # Check empty or none
return set_failure_dict("Empty PCIe information", completion_code.failure)
return get_pcie
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("get Pcie info Exception: ", e), completion_code.failure)
def show_cpu_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
ipmi_cmd = 'ocsoem redfish cpu'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "cpu")
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
cpursp = {}
if(output['status_code'] == 0):
cpu_data = output['stdout'].split('\n')
cpursp = get_cpu_info(cpu_data)
else:
error_data = output['stderr'].split('\n')
cpursp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
cpursp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
cpursp[completion_code.ipmi_code] = data.split(":")[-1]
if cpursp is None or not cpursp: # Check empty or none
return set_failure_dict("Empty cpu information", completion_code.failure)
return cpursp
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("show cpu info Exception: ", e), completion_code.failure)
def show_temperature_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
ipmi_cmd = 'sdr type temperature'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "Temperature")
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
temprsp = {}
if(output['status_code'] == 0):
temp_data = output['stdout'].split('\n')
temprsp = get_sensor_info(temp_data)
else:
error_data = output['stderr'].split('\n')
temprsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
temprsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
temprsp[completion_code.ipmi_code] = data.split(":")[-1]
if temprsp is None or not temprsp: # Check empty or none
return set_failure_dict("Empty temperature information", completion_code.failure)
return temprsp
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("show temperature info Exception: ", e), completion_code.failure)
def show_fan_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
ipmi_cmd = 'sdr type fan'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "Fan")
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
fanrsp = {}
if(output['status_code'] == 0):
fan_data = output['stdout'].split('\n')
fanrsp = get_sensor_info(fan_data,'fan')
else:
error_data = output['stderr'].split('\n')
fanrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
fanrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
fanrsp[completion_code.ipmi_code] = data.split(":")[-1]
if fanrsp is None or not fanrsp: # Check empty or none
return set_failure_dict("Empty fan information", completion_code.failure)
return fanrsp
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("show fan info Exception: ", e), completion_code.failure)
# This method is using to get the show manager inventory or sh system health -s (server info)
def show_server_health(serverid, inventory = False):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
ipmi_cmd = 'ocsoem redfish server'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "server")
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
serverrsp = {}
if(output['status_code'] == 0) or output['stdout']:
server_data = output['stdout'].split('\n')
serverrsp = parse_server_details(server_data, inventory)
else:
error_data = output['stderr'].split('\n')
serverrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
serverrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
serverrsp[completion_code.ipmi_code] = data.split(":")[-1]
if serverrsp is None or not serverrsp: # Check empty or none
return set_failure_dict("Empty server information", completion_code.failure)
return serverrsp
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("show server info Exception: ", e), completion_code.failure)
def parse_server_details(server, inventory):
try:
completionstate = True
server_rsp = {}
for value in server:
if "Completion Code" in value:
completionstate &= False
elif "Server communication type" in value:
server_type = value.split(":")[-1].strip()
if server_type == "IPMI":
server_rsp["Server Type"] = "C2010"
elif server_type == "REST":
server_rsp["Server Type"] = "J2010"
else:
server_rsp["Server Type"] = "Unknown"
completionstate &= False
elif "Slot Id" in value:
server_rsp["Server Slot ID"] = value.split(":")[-1].strip()
elif "System Power State" in value:
server_rsp["Server State"] = value.split(":")[-1].strip()
elif inventory == True and "GUID" in value:
guid = value.split(":")[-1].strip()
if guid.lower().strip() == "failure":
completionstate &= False
server_rsp["UUID"] = guid
elif inventory == True and "MAC1" in value:
mac1 = value.split(":")[-1].strip()
if mac1.lower().strip() == "failure":
completionstate &= False
server_rsp["MAC1"] = mac1
except Exception,e:
server_rsp[completion_code.cc_key] = completion_code.failure
server_rsp[completion_code.desc] = "Get Server Information, Exception: ", e
return server_rsp
if completionstate:
server_rsp[completion_code.cc_key] = completion_code.success
else:
server_rsp[completion_code.cc_key] = completion_code.failure
return server_rsp
def show_sensor_health(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
ipmi_cmd = 'sdr elist'
cmdinterface = interface + ' ' + ipmi_cmd
output = call_ipmi(cmdinterface, "sensor")
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
sensorrsp = {}
if(output['status_code'] == 0):
sensor_data = output['stdout'].split('\n')
sensorrsp = get_sensor_info(sensor_data)
else:
error_data = output['stderr'].split('\n')
sensorrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
sensorrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
sensorrsp[completion_code.ipmi_code] = data.split(":")[-1]
if sensorrsp is None or not sensorrsp: # Check empty or none
return set_failure_dict("Empty sensor information", completion_code.failure)
return sensorrsp
except Exception, e:
#Log_Error("Failed Exception:",e)
return set_failure_dict(("show sensor info Exception: ", e), completion_code.failure)
def get_server_fru(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
# IPMI command to get FRU details
cmdinterface = interface + ' ' + "fru print"
fru_collection = parse_fru_data(cmdinterface, "fru")
if fru_collection is None or not fru_collection: # Check empty or none
return set_failure_dict("Empty Fru data", completion_code.failure)
except Exception, e:
return set_failure_dict(("Server fru Exception",e), completion_code.failure)
return fru_collection
def get_server_nicinfo(serverid):
try:
if serverid < 1 or serverid > 48:
return set_failure_dict("Expected server-id between 1 to 48", completion_code.failure)
else:
interface = get_ipmi_interface(serverid)
if "Failed:" in interface:
return set_failure_dict(interface, completion_code.failure)
nic_collection = {}
for i in range(1,3):
ipmi_cmd = 'ocsoem nicinfo' + ' ' + str(i) # IPMI command to get server pcie details
cmdinterface = interface + ' ' + ipmi_cmd
get_nic = parse_nic(cmdinterface , "nic", str(i))
if get_nic is None or not get_nic: # Check empty or none
nic_collection[completion_code.cc_key] = completion_code.failure
nic_collection.update({i: get_nic})
except Exception, e:
return set_failure_dict(("Server fru Exception",e), completion_code.failure)
nic_collection[completion_code.cc_key] = completion_code.success
return nic_collection
def parse_fru_data(interface,command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
fru_rsp = {}
if(output['status_code'] == 0):
sdata = output['stdout'].split('\n')
fru_rsp = get_fru_info(sdata)
else:
error_data = output['stderr'].split('\n')
fru_rsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
fru_rsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
fru_rsp[completion_code.ipmi_code] = data.split(":")[-1]
except Exception, e:
#log.exception("Exception error is: ",e)
return set_failure_dict(("parse_fru() Exception ",e), completion_code.failure)
return fru_rsp
def get_fru_info(output):
try:
completionstate = True
fru_rsp = {}
for value in output:
if "Completion Code:" in value:
completionstate &= False
elif "Board Mfg Date" in value:
date = value.split(":")
date.pop(0)
fru_rsp["Board Mfg Date"] = ":".join(date)
elif "Board Mfg" in value:
fru_rsp["Board Mfg"] = value.split(":")[-1].strip()
elif "Board Product" in value:
fru_rsp["Board Product"] = value.split(":")[-1].strip()
elif "Board Serial" in value:
fru_rsp["Board Serial Number"] = value.split(":")[-1].strip()
elif "Board Part Number" in value:
fru_rsp["Board Part Number"] = value.split(":")[-1].strip()
elif "Product Asset Tag" in value:
fru_rsp["AssetTag"] = value.split(":")[-1].strip()
elif "Product Manufacturer" in value:
fru_rsp["Manufacturer"] = value.split(":")[-1].strip()
elif "Product Name" in value:
fru_rsp["Model"] = value.split(":")[-1].strip()
elif "Product Part Number" in value:
fru_rsp["Product Part Number"] = value.split(":")[-1].strip()
elif "Product Version" in value:
fru_rsp["Product Version"] = value.split(":")[-1].strip()
elif "Product Serial" in value:
fru_rsp["Product Serial"] = value.split(":")[-1].strip()
if completionstate:
fru_rsp[completion_code.cc_key] = completion_code.success
else:
fru_rsp[completion_code.cc_key] = completion_code.failure
return fru_rsp
except Exception,e:
#log.exception("Exception error is: %s " %e)
fru_rsp[completion_code.cc_key] = completion_code.failure
fru_rsp[completion_code.desc] = "Get fru info, Exception: ", e
return fru_rsp
def parse_memory(interface ,command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return set_failure_dict("IPMI call error {0}".format(output), completion_code.failure)
memoryrsp = {}
if(output['status_code'] == 0):
memory_data = output['stdout'].split('*')
memory_value= filter(None, memory_data)
memoryrsp = get_memory_health(memory_value)
return memoryrsp
else:
error_data = output['stderr'].split('\n')
memoryrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
memoryrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
memoryrsp[completion_code.ipmi_code] = data.split(":")[-1]
except Exception, e:
#log.exception("Exception error is: ",e)
return set_failure_dict(("parse_memopry() Exception ",e), completion_code.failure)
def parse_pcie(interface ,command):
try:
output = call_ipmi(interface, command)
if "ErrorCode" in output:
return output
pciersp = {}
if(output['status_code'] == 0):
pcie_data = output['stdout'].split('*')
pcie_value= filter(None, pcie_data)
pciersp = get_pcie_info(pcie_value)
return pciersp
else:
error_data = output['stderr'].split('\n')
pciersp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
pciersp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
pciersp[completion_code.ipmi_code] = data.split(":")[-1]
except Exception, e:
#log.exception("Exception error is: ",e)
return set_failure_dict(("parse_pcie Exception ",e), completion_code.failure)
def parse_nic(interface, command, nicid):
try:
output = call_ipmi(interface, command+nicid)
if "ErrorCode" in output:
return output
nicrsp = {}
if(output['status_code'] == 0):
sdata = output['stdout'].strip()
nicrsp["Device Id"] = nicid
nicrsp["Mac Address"] = sdata[:-1]
else:
error_data = output['stderr'].split('\n')
nicrsp["Device Id"] = nicid
nicrsp[completion_code.cc_key] = completion_code.failure
for data in error_data:
if "Error" in data:
nicrsp[completion_code.desc] = data.split(":")[-1]
elif "Completion Code" in data:
nicrsp[completion_code.ipmi_code] = data.split(":")[-1]
except Exception, e:
#log.exception("Exception error is: ",e)
return set_failure_dict(("parse_pcie Exception ",e), completion_code.failure)
return nicrsp
43.075908
134
0.517826
b382ad15202e6d1f8333e684f4ffd5247e6fccf7
1,658
py
Python
research/cv/STGAN/eval.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/STGAN/eval.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/STGAN/eval.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" Model Test """
import tqdm
from mindspore.common import set_seed
from src.models import STGANModel
from src.utils import get_args
from src.dataset import CelebADataLoader
set_seed(1)
def test():
""" test function """
args = get_args("test")
print('\n\n=============== start testing ===============\n\n')
data_loader = CelebADataLoader(args.dataroot,
mode=args.phase,
selected_attrs=args.attrs,
batch_size=1,
image_size=args.image_size)
iter_per_epoch = len(data_loader)
args.dataset_size = iter_per_epoch
model = STGANModel(args)
for _ in tqdm.trange(iter_per_epoch, desc='Test Loop'):
data = next(data_loader.test_loader)
model.test(data, data_loader.test_set.get_current_filename())
print('\n\n=============== finish testing ===============\n\n')
if __name__ == '__main__':
test()
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 2020
@author: Shrey1608
"""
# Approach :1) Recursive inorder traversal i.e first do a inorder traversal which is a method of Dfs and then just follow the recursion(Time complexity= O(N)(h is height of the tree)
# It's a very straightforward approach with O(N)\mathcal{O}(N)O(N) time complexity. The idea is to build an inorder traversal of BST which is an array sorted in the ascending order. Now the answer is the k - 1th element of this array.
#Solution : 1)Recursive inorder traversal
class Solution:
def inorder(self,root,output):
if root == None:
return
else:
self.inorder(root.left,output)
output.append(root.val)
self.inorder(root.right,output)
def kthSmallest(self, root: TreeNode, k: int) -> int:
output=[]
self.inorder(root,output)
return output[k-1]
38.416667
234
0.656182
2fdddc7d0dd81d8a5e9af91722fed8c0a3fc930b
6,443
py
Python
src/main/python/qxy/rename.py
gwdgithubnom/ox-patient
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
[
"MIT"
]
null
null
null
src/main/python/qxy/rename.py
gwdgithubnom/ox-patient
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
[
"MIT"
]
null
null
null
src/main/python/qxy/rename.py
gwdgithubnom/ox-patient
cddf4fe381cb4506db8e0d62803dd2044cf7ad92
[
"MIT"
]
1
2021-04-14T00:45:38.000Z
2021-04-14T00:45:38.000Z
import os
import configparser
from context import resource_manager
from tools import file_manage
def rename_dir(url,reverse=True):
"""
用户给定文件夹的路径,如果给定路径存在,更改指定文件夹下的文件夹的名字
:param url: 用户给定的文件夹的路径
:param reverse: 如果reverse=True就进行反向命名,reverse=False就对所给文件夹所包含的文件夹进行重命名;
:return:
"""
if _exist_(url)and reverse==False:
if not os.path.exists(url):
url = os.path.abspath(url)
a = 1
list_sto=_random_name(url,'D')
while(True):
if str(a) in list_sto:
a=a+1
else:
break
conf_sto = configparser.ConfigParser()
conf_sto.read('conf'+resource_manager.getSeparator()+'directory.ini')
for ob in list_sto:
try:
files=conf_sto.get(url,ob)
old=os.path.join(url,files);
filetype=os.path.splitext(files)[1];
d=a
if os.path.isdir(old):
New=os.path.join(url,str(a)+filetype);
a=str(a)+filetype
os.rename(old, New);
_store_(url,ob,a,'D')
a=d
a=a+1
except:
pass;
elif reverse==True and _exist_(url):
_reverse_(url,'D')
def rename_file(url,reverse=True):
"""
用户给定文件夹的路径,如果给定路径存在,更改指定文件夹下的文件的名字
:param url: 用户给定的文件夹的路径
:param reverse: 如果reverse=True就进行反向命名,reverse=False就对所给文件夹所包含的文件进行重命名;
:return:
"""
if _exist_(url)and reverse==False:
if not os.path.exists(url):
url = os.path.abspath(url)
a = 1
list_sto=_random_name(url,'F')
conf_sto = configparser.ConfigParser()
conf_sto.read('conf'+resource_manager.getSeparator()+'factory.ini')
for ob in list_sto:
try:
files=conf_sto.get(url,ob)
old=os.path.join(url,files);
filetype=os.path.splitext(files)[1];
d=a
if os.path.isfile(old):
New=os.path.join(url,str(a)+filetype);
a=str(a)+filetype
os.rename(old, New);
_store_(url,ob, a,'F')
a=d
a=a+1
except:
pass;
elif reverse==True and _exist_(url):
_reverse_(url,'F')
"""根据reverse进行反向目录生成"""
def _reverse_(doc_name,type):
"""
根据reverse进行反向目录生成
:param doc_name: 用户给定文件夹的路径
:param type: 根据用户调用方法的不同对文件和文件夹分开进行重命名;type=‘F(file)’对文件操作,type=‘D(directory)’对文件夹操作
:return:
"""
try:
conf = configparser.ConfigParser()
if type=='D':
conf.read('conf'+resource_manager.getSeparator()+'directory.ini')
elif type=='F':
conf.read('conf'+resource_manager.getSeparator()+'factory.ini')
options = conf.options(doc_name)
if not os.path.exists(doc_name):
doc_name = os.path.abspath(doc_name)
for option in options:
try:
str_val = conf.get(doc_name,option )
New=os.path.join(doc_name,option);
old=os.path.join(doc_name,str_val);
os.rename(old,New);
except:
print(option+" don't exist")
except:
print("no document has been renamed")
def _exist_(url):
"""
判断所给的路径是否存在,如果所给的是相对路径(在判断文件夹不存在后)转换为绝对路径
:param url: 用户给定文件夹的路径
:return:
"""
s=url;
if not os.path.exists(url):
s = os.path.abspath(url)
if os.path.exists(s) and os.path.isdir(s):
return True
else:
print(url + " don't exist or isn't a dir")
def _store_(doc_name,files,a,type):
"""
将更改后的文件oldname和newname以section的方式存到directory.ini或factory.ini中
(具体哪个文件夹则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)
:param doc_name:用户传入的文件夹的路径
:param files:文件夹下面的文件或文件夹(具体类型根据type决定)的名字
:param a:文件重命名后新的编码(名字)
:return:
"""
try:
config_write = configparser.ConfigParser()
if type=='D':
config_write.read('conf'+resource_manager.getSeparator()+'directory.ini')
ftest = open('conf'+resource_manager.getSeparator()+'directory.ini','w+')
elif type=='F':
config_write.read('conf'+resource_manager.getSeparator()+'factory.ini')
ftest = open('conf'+resource_manager.getSeparator()+'factory.ini','w+')
check=config_write.sections()
n=False
if doc_name in check:
n=True
config_write.set(doc_name,files,str(a))
if n==False:
config_write.add_section(doc_name)
config_write.set(doc_name,files,str(a))
config_write.write(ftest)
ftest.close()
except:
pass;
def _random_name(url,type):
"""
对文件或文件夹进行随机重命名(防止产生因同名而无法重命名的问题)(具体类型则根据所给的文件类型type决定,用户调用相应的方法后type自动赋值)
:param url: 用户传入的文件夹的地址
:return: 返回文件夹中所有文件或文件夹重命名之前的名字的列表
"""
doc=os.listdir(url)
for files in doc:
try:
filetype=os.path.splitext(files)[1]
if not os.path.exists(files):
old=url+resource_manager.getSeparator()+files
else:
old=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+files
if os.path.isdir(old)and type=='D':
random=file_manage.random_string()
New=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+random
os.rename(old, New);
_store_(url,files,random+filetype,'D')
elif os.path.isfile(old)and type=='F':
random=file_manage.random_string()
New=resource_manager.Properties.getRootPath()+resource_manager.getSeparator()+url+resource_manager.getSeparator()+random
os.rename(old, New);
_store_(url,files,random+filetype,'F')
except:
pass
list=doc
return list;
if __name__ == "__main__":
rename_file(url=resource_manager.Properties.getRootPath()+'qxy/test',reverse=False)
#rename_dir(url='qxy/otest',reverse=True)
33.041026
137
0.559056
4467b73c95d9b2c58955f1c54d33d5927feef2ce
510
py
Python
PINp/2014/Koleganov_N_S/task_3_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
PINp/2014/Koleganov_N_S/task_3_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
PINp/2014/Koleganov_N_S/task_3_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
# Задача 3. Вариант 10.
# Напишите программу, которая выводит имя "Игорь Васильевич Лотарев", и
# запрашивает его псевдоним. Программа должна сцеплять две эти строки и
# выводить полученную строку, разделяя имя и псевдоним с помощью тире.
# Колеганов Никита Сергеевич
# 29.05.2012
name=input("Герой нашей программы - Игорь Васильевич Лотарев.\nПод каким же именем мы знаем этого человека?")
print("Ваш ответ:", name)
print("Все верно: Игорь Васильевич Лотарев -", name)
input("\n\nНажмите Enter для выхода.")
42.5
109
0.768627
4476ba1bfd2fc1007b3f67456acc9f653ecc2fe3
650
py
Python
challenges/blackrock/audit_sale.py
PlamenHristov/HackerRank
2c875995f0d51d7026c5cf92348d9fb94fa509d6
[
"MIT"
]
null
null
null
challenges/blackrock/audit_sale.py
PlamenHristov/HackerRank
2c875995f0d51d7026c5cf92348d9fb94fa509d6
[
"MIT"
]
null
null
null
challenges/blackrock/audit_sale.py
PlamenHristov/HackerRank
2c875995f0d51d7026c5cf92348d9fb94fa509d6
[
"MIT"
]
null
null
null
def sale(securities, M, K):
sorted_sec = sorted(securities, key=lambda x: x[0] * x[1], reverse=True)
res = 0
for i in range(M):
if K > 0:
x, _ = sorted_sec[i]
y = 1
else:
x, y = sorted_sec[i]
K -= 1
res += x * y
print(res)
# N, M, K = list(map(int, input().split()))
#
# securities = []
# for _ in range(N):
# securities.append(tuple(map(int, input().split())))
# print(securities)
def test():
N, M, K = 3, 2, 1
price = [(5, 10), (6, 60), (8, 40)]
sale(price, M, K)
# 1116/100
if __name__ == '__main__':
test()
"""cell_inverter.py
스프레드시트 셀 반전시키기
"""
import sys
import openpyxl
if len(sys.argv) != 2:
print(f"python {__file__} <*.xlsx>")
sys.exit(1)
file_name = sys.argv[1]
wb = openpyxl.load_workbook(file_name)
sheet = wb.active
new_wb = openpyxl.Workbook()
new_sheet = new_wb.active
for row_obj in sheet.rows:
for cell in row_obj:
new_sheet.cell(row=cell.column, column=cell.row).value = cell.value
new_wb.save(f"reversed_{file_name}")
18.875
75
0.704194
f380ec6d4ec28c12552bb7c4cc31a51a27dcbed1
789
py
Python
udacity course code/01-06-plottwohistograms.py
bluemurder/mlfl
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
[
"MIT"
]
1
2021-03-22T22:25:54.000Z
2021-03-22T22:25:54.000Z
udacity course code/01-06-plottwohistograms.py
bluemurder/mlfl
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
[
"MIT"
]
6
2017-01-16T09:53:21.000Z
2017-01-18T12:20:09.000Z
udacity course code/01-06-plottwohistograms.py
bluemurder/mlfl
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
[
"MIT"
]
null
null
null
"""Plot a couple of histogram."""
import pandas as pd
import matplotlib.pyplot as plt
from util import get_data, plot_data, compute_daily_returns
def test_run():
# Read data
dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index
symbols = ['SPY','XOM']
df = get_data(symbols, dates) # get data for each symbol
#plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
#plot_data(daily_returns, title = "Daily returns", ylabel = "Daily returns")
# Compute and plot a couple of histograms on same chart
daily_returns['SPY'].hist(bins = 20, label = 'SPY')
daily_returns['XOM'].hist(bins = 20, label = 'XOM')
plt.legend(loc = 'upper right')
plt.show()
if __name__ == "__main__":
test_run()
30.346154
80
0.671736
946b4c33baf96dd37d1d32b582a875e2a52db092
3,140
py
Python
src/deal_features.py
Times125/Emotion-Analyse
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
[
"Apache-2.0"
]
11
2018-01-16T06:39:00.000Z
2021-11-28T11:46:41.000Z
src/deal_features.py
Times125/Emotion-Analyse
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
[
"Apache-2.0"
]
null
null
null
src/deal_features.py
Times125/Emotion-Analyse
b5d9f23fdf6c75f57f5cf20d58834a095b0c7e1e
[
"Apache-2.0"
]
2
2019-08-16T14:53:37.000Z
2019-08-17T02:01:22.000Z
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author:lch02
@Time: 2017/12/26 9:51
@Description:
"""
import itertools
import os
import pickle
import config
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
from config import test_path
from nltk.probability import FreqDist, ConditionalFreqDist
__author__ = 'lch02'
"""
计算单个词和双词搭配的贡献(信息量
"""
def word_bigram_scores():
pos_data = pickle.load(open(os.path.join(test_path, 'pos_review.pkl'), 'rb'))
neg_data = pickle.load(open(os.path.join(test_path, 'neg_review.pkl'), 'rb'))
pos_words = list(itertools.chain(*pos_data))
neg_words = list(itertools.chain(*neg_data))
pos_bigram_finder = BigramCollocationFinder.from_words(pos_words)
neg_bigram_finder = BigramCollocationFinder.from_words(neg_words)
pos_bigrams = pos_bigram_finder.nbest(BigramAssocMeasures.chi_sq, config.bigram_scores_threshold)
neg_bigrams = neg_bigram_finder.nbest(BigramAssocMeasures.chi_sq, config.bigram_scores_threshold)
pos_words.extend(pos_bigrams)
neg_words.extend(neg_bigrams)
word_tf = FreqDist() # 统计所有词频
con_word_tf = ConditionalFreqDist() # 统计每个词的概率分布
for word in pos_words:
word_tf[word] += 1
con_word_tf['pos'][word] += 1
for word in neg_words:
word_tf[word] += 1
con_word_tf['neg'][word] += 1
pos_word_count = con_word_tf['pos'].N() # 积极词的数量
neg_word_count = con_word_tf['neg'].N() # 消极词的数量
total_word_count = pos_word_count + neg_word_count # 总词
bigram_scores_dict = {}
for word, freq in word_tf.iteritems():
pos_score = BigramAssocMeasures.chi_sq(con_word_tf['pos'][word], (freq, pos_word_count), total_word_count) # 计算积极词的卡方统计量
neg_score = BigramAssocMeasures.chi_sq(con_word_tf['neg'][word], (freq, neg_word_count), total_word_count) # 计算消极词的卡方统计量
bigram_scores_dict[word] = pos_score + neg_score
return bigram_scores_dict
"""
选择贡献最大的特征
"""
def get_best_words(scores_dict, threshold=10000):
best = sorted(scores_dict.iteritems(), key=lambda (word, score): score, reverse=True)[:threshold] # 从大到小排列,选择前10000个
best_words = set([w for w, s in best])
return best_words
"""
选择1:最有信息量的单个词作为特征
"""
def best_words_features(words):
if config.best_words is None:
config.best_words = pickle.load(open(os.path.join(config.test_path, 'best_feats.pkl'), 'rb'))
lst = []
for word in words:
if word in config.best_words:
lst.append((word, True))
else:
lst.append((word, False))
return dict(lst)
"""
选择2:把所有词和双词搭配一起作为特征
"""
def best_bigram_words_features(words, score_fn=BigramAssocMeasures.chi_sq, n=1500):
try:
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
except ZeroDivisionError:
words.append(' ')
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
d = dict([(bigram, True) for bigram in bigrams])
d.update(best_words_features(words))
return d
import argparse, numpy as np, pandas as pd
from hrputils import calchrp
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-i', help='input csv file with returns', required=True)
my_parser.add_argument('-o', help='output csv file with hrp weights, defaults to weights.csv in the same folder', default='weights.csv')
args = my_parser.parse_args()
x = np.loadtxt(args.i,delimiter=',', dtype=float)
w = calchrp(x)
print(w.sort_index().values)
w.to_csv(args.o, index=False, sep=',')
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TBNet configurations."""
import json
class TBNetConfig:
"""
TBNet config file parser and holder.
Args:
config_path (str): json config file path.
"""
def __init__(self, config_path):
with open(config_path) as f:
json_dict = json.load(f)
self.num_item = int(json_dict['num_item'])
self.num_relation = int(json_dict['num_relation'])
self.num_entity = int(json_dict['num_entity'])
self.per_item_num_paths = int(json_dict['per_item_num_paths'])
self.embedding_dim = int(json_dict['embedding_dim'])
self.batch_size = int(json_dict['batch_size'])
self.lr = float(json_dict['lr'])
self.kge_weight = float(json_dict['kge_weight'])
self.node_weight = float(json_dict['node_weight'])
self.l2_weight = float(json_dict['l2_weight'])
38.05
78
0.659001
17214237c66cedce9feb050af3a6e63dea6a0917
164
py
Python
book/_build/jupyter_execute/docs/000_intro.py
tom-tubeless/Biologie
44660ea21971d4b3d77118606bfe1264993465fc
[
"MIT"
]
null
null
null
book/_build/jupyter_execute/docs/000_intro.py
tom-tubeless/Biologie
44660ea21971d4b3d77118606bfe1264993465fc
[
"MIT"
]
null
null
null
book/_build/jupyter_execute/docs/000_intro.py
tom-tubeless/Biologie
44660ea21971d4b3d77118606bfe1264993465fc
[
"MIT"
]
null
null
null
#!/usr/bin/env python
# coding: utf-8
# # Bio am WWG
#
# Unterrichtsplanung, -inhalte und -materialien für das Fach Biologie am Wim-Wenders-Gymnasium Düsseldorf.
# Sebastian Raschka 03/2014
import gzip
import shutil
import os
#import pyprind
def conc_gzip_files(in_dir, out_file, append=False, print_progress=True):
""" Reads contents from gzipped ASCII or UTF-8 files, decodes them, and
appends the lines to one output file.
Keyword arguments:
in_dir (str): Path of the directory with the gzip-files
out_file (str): Path to the resulting file
append (bool): If true, it appends contents to an exisiting file,
else creates a new output file.
print_progress (bool): prints progress bar if true.
"""
write_mode = 'wb'
gzips = [os.path.join(in_dir, i) for i in os.listdir(in_dir) if i.endswith('.gz')]
#if print_progress:
# pbar = pyprind.ProgBar(len(gzips))
with open(out_file, 'ab' if append else 'wb') as ofile:
for f in gzips:
with gzip.open(f, 'rb') as gzipf:
shutil.copyfileobj(gzipf, ofile)
#if print_progress:
# pbar.update()
if __name__ == '__main__':
conc_gzip_files('/home/usr/my_dir', '/home/usr/test.txt')
32.794118
86
0.639462
e52b8445bb0424f37bdedb3f32cfe011304969e5
2,819
py
Python
RSS/model/rssfeed.py
dereklm12880/rssticker
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
[
"MIT"
]
2
2020-02-26T01:54:26.000Z
2020-04-27T20:09:14.000Z
RSS/model/rssfeed.py
dereklm12880/rssticker
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
[
"MIT"
]
17
2020-02-29T02:43:44.000Z
2020-04-27T20:38:44.000Z
RSS/model/rssfeed.py
dereklm12880/rssticker
d90e8c00811d67bd9fb8104bbb6ec98aae5221f4
[
"MIT"
]
8
2020-02-26T21:37:36.000Z
2020-06-23T00:01:27.000Z
# method for rss links
import feedparser
class RssModel:
""" Class model.rssfeeds.RssModel.
This class parses the feeds that are given and returns them in an an empty list.
"""
_newsreel_index_pos = -1
_raw_feed = ''
given_url = ''
title = ''
subtitle = ''
link = ''
newsreel = []
def parse(self, feed_url):
""" Function model.rssfeeds.RssModel.parse.
This function checks to see if there are feeds are strings and if they
have URLs, otherwise throwing an unexpected error. If exceptions aren't thrown,
the title, subtitle, and link are stored.
Arguments:
feed_url -- the url that is taken from the feed.
"""
if not isinstance(feed_url, str): raise Exception('Expects string {} given'.format(type(feed_url)))
self._raw_feed = feedparser.parse(feed_url)
if len(self._raw_feed) == 0: raise Exception("No feed with the url {} found.".format(feed_url))
if 'bozo' in self._raw_feed and self._raw_feed['bozo'] == 1: raise Exception("An unexpected issue occurred: {}".format(self._raw_feed['bozo_exception']))
self.given_url = feed_url
self.title = self._raw_feed['feed']['title']
self.subtitle = self._raw_feed['feed']['subtitle']
self.link = self._raw_feed['feed']['link']
self.newsreel = self._raw_feed['entries']
return self
def get_current(self):
""" Function model.rssfeed.RssModel.get_current.
This function gets the current article from the feed. If nothing is loaded, an
exception is thrown.
"""
try:
_tmp = None
if self._newsreel_index_pos < 0:
_tmp = self._newsreel_index_pos
self._newsreel_index_pos = 0
_news_reel = self.newsreel[self._newsreel_index_pos]
self._newsreel_index_pos = _tmp if _tmp else self._newsreel_index_pos
return _news_reel
except IndexError: raise Exception("There is no news loaded! Try parsing a new RSS feed.")
def get_next(self):
""" Function model.rssfeed.RssModel.get_next.
This function gets the next article in the feed until it gets to the end. When it gets
to the end of the feeds, it throws an exception.
"""
try:
self._newsreel_index_pos = self._newsreel_index_pos +1
return self.get_current()
except IndexError: raise Exception("There is no more news! Try parsing a new RSS feed.")
#TODO: add functionality to move to the next URL feed. Once out of URLs, it will load the URL from the
# beginning of the list and continue. If we want to continue to the next feed, we need to add a
# load_next_feed function that cycles back to the first feed.
39.152778
161
0.644555
00aa38d98e9f0a6d64f535591ec7a7ee59a6c658
909
py
Python
3kCTF/2021/pwn/klibrary/upload.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[
"MIT"
]
1
2021-11-02T20:53:58.000Z
2021-11-02T20:53:58.000Z
3kCTF/2021/pwn/klibrary/upload.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[
"MIT"
]
null
null
null
3kCTF/2021/pwn/klibrary/upload.py
ruhan-islam/ctf-archives
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
[
"MIT"
]
null
null
null
#!/usr/bin/python3.8
from pwn import *
EXPLOIT_PATH = '/tmp/exploit'
SERVER = 178.62.107.48
PORT = 9994
SHELL_PROMPT = '$ '
def get_splitted_encoded_exploit():
split_every = 256
# Change the name to your exploit path
with open('exploit', 'rb') as exploit_file:
exploit = base64.b64encode(exploit_file.read())
return [exploit[i:i+split_every] for i in range(0, len(exploit), split_every)]
def upload_exploit(sh):
chunks_sent = 0
splitted_exploit = get_splitted_encoded_exploit()
for exploit_chunk in splitted_exploit:
print(f'[*] Sending a chunk ({chunks_sent}/{len(splitted_exploit)})')
sh.sendlineafter(
SHELL_PROMPT, f'echo {exploit_chunk.decode()} | base64 -d >> {EXPLOIT_PATH}')
chunks_sent += 1
r = remote(SERVER, PORT)
upload_exploit(r)
# When finished, your exploit will be in /tmp directory. Good luck.
r.interactive()
25.971429
89
0.685369
dae43870e767daf4c1ff60a5ad5c986861b3a2d3
387
py
Python
INBa/2015/Serdechnaya_A_M/task_3_25.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
INBa/2015/Serdechnaya_A_M/task_3_25.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
INBa/2015/Serdechnaya_A_M/task_3_25.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
#Напишите программу, которая выводит имя "Алессандро Филипели", и запрашивает его псевдоним.
#Программа должна сцеплять две эти строки и выводить полученную строку, разделяя имя и псевдоним с помощью тире.
print("Введите псевдоним Алессандро ди Мариано ди Ванни Филипепи:")
nick=input()
print("Алессандро ди Мариано ди Ванни Филипепи - это "+nick)
input("Нажмите ENTER для продолжения")
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# Recursive
def isUnivalTree(root: TreeNode) -> bool:
def compare_value(cur_root: TreeNode, previous_root: int) -> bool:
if not cur_root:
return True
if cur_root.val != previous_root:
return False
return compare_value(cur_root.left, cur_root.val) and compare_value(cur_root.right, cur_root.val)
return compare_value(root, root.val)
# Iterative
# def isUnivalTree(root: TreeNode) -> bool:
# stack = [(root, root.val)]
# while stack:
# cur_node, prev_val = stack.pop()
# if cur_node.val != prev_val:
# return False
# if cur_node.left:
# stack.append((cur_node.left, cur_node.val))
# if cur_node.right:
# stack.append((cur_node.right, cur_node.val))
#
# return True
29.848485
105
0.616244
6bd5a409455fe5863f8fd48fa075c30b28979376
4,429
py
Python
insert_tables.py
arwhyte/gffa-db
20410ab00c9f86a1bf7ca85ebdbaa9535b106c59
[
"BSD-3-Clause"
]
null
null
null
insert_tables.py
arwhyte/gffa-db
20410ab00c9f86a1bf7ca85ebdbaa9535b106c59
[
"BSD-3-Clause"
]
null
null
null
insert_tables.py
arwhyte/gffa-db
20410ab00c9f86a1bf7ca85ebdbaa9535b106c59
[
"BSD-3-Clause"
]
null
null
null
import requests
import json
import psycopg2
def read_json(filepath, encoding='utf-8'):
"""Reads a JSON document, decodes the file content, and returns a
dictionary if provided with a valid filepath.
Parameters:
filepath (str): path to file
Returns:
dict: dict representations of the decoded JSON document
"""
with open(filepath, 'r', encoding=encoding) as file_obj:
return json.load(file_obj)
def write_json(filepath, data, encoding='utf-8', ensure_ascii=False, indent=2):
"""Serializes object as JSON. Writes content to the provided filepath.
Parameters:
filepath (str): the path to the file
data (dict)/(list): the data to be encoded as JSON and written to the file
encoding (str): name of encoding used to encode the file
ensure_ascii (str): if False non-ASCII characters are printed as is; otherwise
non-ASCII characters are escaped.
indent (int): number of "pretty printed" indention spaces applied to encoded JSON
Returns:
None
"""
with open(filepath, 'w', encoding=encoding) as file_obj:
json.dump(data, file_obj, ensure_ascii=ensure_ascii, indent=indent)
def get_cleaned_list(data):
"""Takes dictionary data and converts its values it into list. Then rearranges the order of elements as per the database column order.
Parameters:
data (dict): one dict data from the whole json read file
Returns:
result (list): list of rearranged values as per database columns
"""
result = []
data = list(data.values())
result.append(data[13])
for i in data[:-1]:
result.append(i)
result.append(json.dumps(data))
print(len(result))
print(result[-1])
return result
def main():
# Connect to an existing database
conn = psycopg2.connect("dbname=gffa_db user=postgres password=postgres")
# Open a cursor to perform database operations
cur = conn.cursor()
# --------------- Query to insert records into film table ---------------
# Read film data from swapi_films.json
film_data = read_json('./data/swapi_json/swapi_films.json')
#insertion process
for film in film_data:
film_list = get_cleaned_list(film)
sql = """INSERT INTO public.film(url, title, episode_id, opening_crawl, director, producer, release_date, characters, planets, starships, vehicles, species, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING"""
# Execute to insert records into film table
cur.execute(sql, film_list)
# Make the changes to the database persistent
conn.commit()
# # --------------- Query to insert records into person table ---------------
# # Read person data from swapi_people.json
# person_data = read_json('./data/swapi_json/swapi_people.json')
# # insertion process
# for person in person_data:
# person_list = get_cleaned_list(person)
# sql = """INSERT INTO public.person(url, name, height, mass, hair_color, skin_color, eye_color, birth_year, gender, homeworld, films, species, vehicles, starships, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING"""
# # Execute to insert records into person table
# cur.execute(sql, person_list)
# # Make the changes to the database persistent
# conn.commit()
# # --------------- Query to insert records into planet table ---------------
# # Read person data from swapi_planets.json
# planet_data = read_json('./data/swapi_json/swapi_planets.json')
# # insertion process
# for planet in planet_data:
# planet_list = get_cleaned_list(planet)
# sql = """INSERT INTO public.planet(url, name, rotation_period, orbital_period, diameter, climate, gravity, terrain, surface_water, population, residents, films, created, edited, raw_json) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ON CONFLICT (url) DO NOTHING"""
# # Execute to insert records into planet table
# cur.execute(sql, planet_list)
# # Make the changes to the database persistent
# conn.commit()
# Close cursor
cur.close()
# Close connection
conn.close()
if __name__ == '__main__':
main()
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
PANW_IOT_INSTANCE = demisto.args().get('panw_iot_3rd_party_instance')
CISCO_ISE_ACTIVE_INSTANCE = demisto.args().get("active_ise_instance")
GET_EP_ID_CMD = 'cisco-ise-get-endpoint-id-by-name'
CISCO_ISE_FIELD_MAP = {
"ip": ["ZingboxIpAddress", "PanwIoTIpAddress"],
"ip address": ["ZingboxIP", "PanwIoTIP"],
"ip_address": ["ZingboxIP", "PanwIoTIP"],
"profile": ["ZingboxProfile", "PanwIoTProfile"],
"category": ["ZingboxCategory", "PanwIoTCategory"],
"risk_score": ["ZingboxRiskScore", "PanwIoTRiskScore"],
"risk score": ["ZingboxRiskScore", "PanwIoTRiskScore"],
"confidence": ["ZingboxConfidence", "PanwIoTConfidence"],
"confidence score": ["ZingboxConfidence", "PanwIoTConfidence"],
"confidence_score": ["ZingboxConfidence", "PanwIoTConfidence"],
"tag": ["ZingboxTag", "PanwIoTTag"],
"asset_tag": ["ZingboxTag", "PanwIoTTag"],
"Tags": ["ZingboxTag", "PanwIoTTag"],
"hostname": ["ZingboxHostname", "PanwIoTHostname"],
"osCombined": ["ZingboxOS", "PanwIoTOS"],
"model": ["ZingboxModel", "PanwIoTModel"],
"vendor": ["ZingboxVendor", "PanwIoTVendor"],
"Serial Number": ["ZingboxSerial", "PanwIoTSerial"],
"Serial_Number": ["ZingboxSerial", "PanwIoTSerial"],
"endpoint protection": ["ZingboxEPP", "PanwIoTEPP"],
"endpoint_protection": ["ZingboxEPP", "PanwIoTEPP"],
"AET": ["ZingboxAET", "PanwIoTAET"],
"External Network": ["ZingboxInternetAccess", "PanwIoTInternetAccess"],
}
INT_FIELDS = ["risk_score", "risk score", "confidence", "confidence score", "confidence_score"]
def send_status_to_panw_iot_cloud(status, msg):
"""
Reports status details back to PANW IoT Cloud.
param status: Status (error, disabled, success) to be send to PANW IoT cloud.
param msg: Debug message to be send to PANW IoT cloud.
"""
resp = demisto.executeCommand("panw-iot-3rd-party-report-status-to-panw", {
"status": status,
"message": msg,
"integration_name": "ise",
"playbook_name": "PANW IoT 3rd Party Cisco ISE Integration - Bulk Export to Cisco ISE",
"asset_type": 'device',
"timestamp": int(round(time.time() * 1000)),
"using": PANW_IOT_INSTANCE
})
if isError(resp[0]):
err_msg = f'Error, failed to send status to PANW IoT Cloud - {resp[0].get("Contents")}'
raise Exception(err_msg)
def get_active_ise_instance_or_error_msg():
"""
Get the active configured Cisco ISE instance, if not found then return the error message.
"""
response = demisto.executeCommand("GetCiscoISEActiveInstance", {})
err_msg = None
active_instance = None
data = response[0].get('EntryContext', {})
if 'PaloAltoIoTIntegrationBase.ActiveNodeInstance' in data:
active_instance = data.get('PaloAltoIoTIntegrationBase.ActiveNodeInstance')
elif 'PaloAltoIoTIntegrationBase.NodeErrorStatus' in data:
err_msg = data.get('PaloAltoIoTIntegrationBase.NodeErrorStatus')
return active_instance, err_msg
def extract_ise_api_error(err_msg):
"""
Extract any connection error or error code if possible,
Otherwise just return the original error
"""
err_msg = err_msg.split('-')[0]
if err_msg.startswith("Error in API call to Cisco"):
start = err_msg.find('[') + 1
end = err_msg.find(']')
return err_msg[start:end]
elif err_msg.startswith("Connection Error. Verify"):
return "Connection Error"
else:
return err_msg
def get_devices_from_panw_iot_cloud(offset, page_size):
"""
Gets assets from PANW IoT cloud.
param offset: Offset number for the asset list.
param page_size: Page size of the response being requested.
"""
resp = demisto.executeCommand("panw-iot-3rd-party-get-asset-list", {
"asset_type": 'device',
"increment_type": None,
"offset": offset,
"pageLength": page_size,
"using": PANW_IOT_INSTANCE
})
if isError(resp[0]):
err_msg = f'Error, could not get assets from PANW IoT Cloud - {resp[0].get("Contents")}'
raise Exception(err_msg)
return resp[0]['Contents']
def convert_device_map_to_cisco_ise_attributes(device_map):
"""
Converts a PANW IoT device_map to Cisco ISE custom attributes map.
param device_map: Single PANW IoT device_map with device attributes .
"""
attribute_list = {}
if 'deviceid' in device_map:
if device_map['deviceid'] is None or device_map['deviceid'] == "":
return None
attribute_list['mac'] = device_map['deviceid']
if not is_mac_address(attribute_list['mac']):
return None
zb_attributes = {}
for field in device_map:
if device_map[field] is None or device_map[field] == "":
continue
if field in CISCO_ISE_FIELD_MAP:
if field in INT_FIELDS:
try:
int_val = int(device_map[field])
except Exception:
continue
zb_attributes[CISCO_ISE_FIELD_MAP[field][0]] = int_val
zb_attributes[CISCO_ISE_FIELD_MAP[field][1]] = int_val
else:
zb_attributes[CISCO_ISE_FIELD_MAP[field][0]] = device_map[field]
zb_attributes[CISCO_ISE_FIELD_MAP[field][1]] = device_map[field]
attribute_list['zb_attributes'] = zb_attributes
return attribute_list
def update_existing_endpoint(mac, attr_map, ep_id, active_instance):
"""
Update an existing endpoint with the given custom attributes.
Param mac: mac address of the endpoint that needs to be updated.
Param attr_map: a map containing various ise custom attributes.
Param ep_id: ID for endpoint that needs to be updated.
Param active_instance: The primary/active ISE instance.
"""
attribute_names = ""
attribute_values = ""
for key in attr_map:
attribute_names += key + ","
attribute_values += str(attr_map[key]) + ","
attribute_names = attribute_names[:-1]
attribute_values = attribute_values[:-1]
resp = demisto.executeCommand("cisco-ise-update-endpoint-custom-attribute", {
"id": ep_id,
"macAddress": mac,
"attributeName": attribute_names,
"attributeValue": attribute_values,
"using": active_instance
})
if isError(resp[0]):
err_msg = f'Error, failed to update custom attributes for endpoint {id} - {resp[0].get("Contents")}'
raise Exception(err_msg)
def create_new_ep(mac, attr_map, active_instance):
"""
Create a new endpoint with the given params
Param mac: mac address of the endpoint that needs to be created.
Param attr_map: a map containing various ise custom attributes.
Param active_instance: The primary/active ISE instance.
"""
resp = demisto.executeCommand("cisco-ise-create-endpoint", {
"mac_address": mac,
"attributes_map": attr_map,
"using": active_instance
})
if isError(resp[0]):
err_msg = f'Failed to create new Endpoint {mac} - {resp[0].get("Contents")}'
raise Exception(err_msg)
def create_or_update_ep(mac, attr_map):
"""
Check if an enpoint exists in ISE, if not create one with the custom attributes
otherwise update it. If at any point the connection goes down or we get a 401 -
unautherized access we will attempt to get the new active instance.
Params mac: Mac adress of the endpoint.
attr_map: Custom attributes for the endpoint.
"""
global CISCO_ISE_ACTIVE_INSTANCE
global GET_EP_ID_CMD
cmd_mac_syntax_map = {
"cisco-ise-get-endpoint-id-by-name": "mac_address",
"cisco-ise-get-endpoint-id": "macAddress"
}
# Check if this mac address (endpoint) is present in ISE by attempting to get its ID
resp = demisto.executeCommand(GET_EP_ID_CMD, {
cmd_mac_syntax_map[GET_EP_ID_CMD]: mac,
"using": CISCO_ISE_ACTIVE_INSTANCE
})
if isError(resp[0]):
err_msg = extract_ise_api_error(resp[0].get("Contents"))
# 404 Not Found or empty results, we need to create a new EP
if err_msg == "404" or err_msg == "list index out of range":
create_new_ep(mac, attr_map, CISCO_ISE_ACTIVE_INSTANCE)
# 405 - Method not allowed means we need to switch to an old filter based API
elif err_msg == '405':
GET_EP_ID_CMD = "cisco-ise-get-endpoint-id"
# The primary went down (connection Error) or 401 if a fail over occurred (this primary/active
# is not a secondary/standby device).We should attempt to get the new Primary/Active
# instance is possible.
elif err_msg == "Connection Error" or err_msg == "401":
# Failover can take up to 10 minutes, its ok to just wait even if its a standalone ISE noe.
msg = "ISE instance is down. Trying again in 10 minutes. Error = %s" % err_msg
demisto.info("PANW_IOT_3RD_PARTY_BASE %s" % msg)
send_status_to_panw_iot_cloud("error", msg)
time.sleep(10 * 60)
# Try again to get a new active instance
new_active_instance, err_msg = get_active_ise_instance_or_error_msg()
if new_active_instance is None:
raise Exception(err_msg)
else:
CISCO_ISE_ACTIVE_INSTANCE = new_active_instance
msg = f"Found new active ISE instance {CISCO_ISE_ACTIVE_INSTANCE}"
send_status_to_panw_iot_cloud("success", msg)
else:
raise Exception(resp[0].get("Contents"))
else:
ep_id = resp[0]['EntryContext']['Endpoint(val.ID === obj.ID)']['ID']
update_existing_endpoint(mac, attr_map, ep_id, CISCO_ISE_ACTIVE_INSTANCE)
def get_all_panw_iot_devices_and_send_to_cisco_ise():
"""
Retrieves all devices from PANW IoT Cloud, 1000 devices at a time and sends it
to the primary/active cisco ise.
"""
count = 0
offset = 0
page_size = 1000
unique_macs = set()
while True:
device_list = get_devices_from_panw_iot_cloud(offset, page_size)
size = len(device_list)
count += size
for device in device_list:
attrs = convert_device_map_to_cisco_ise_attributes(device)
if attrs is not None:
mac = attrs['mac']
attr_map = attrs['zb_attributes']
if mac not in unique_macs:
create_or_update_ep(mac, attr_map)
unique_macs.add(mac)
time.sleep(0.5)
if size == page_size:
offset += page_size
msg = f'Successfully exported {count} devices to Cisco ISE'
send_status_to_panw_iot_cloud("success", msg,)
else:
break
return(f'Total {count} devices pulled from PANW IoT Cloud.\n'
f'Exported {len(unique_macs)} devices (with available mac addresses) to Cisco ISE')
def main():
try:
status_msg = get_all_panw_iot_devices_and_send_to_cisco_ise()
except Exception as ex:
send_status_to_panw_iot_cloud("error", str(ex))
return_error(str(ex))
send_status_to_panw_iot_cloud("success", status_msg)
return_results(status_msg)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
38.989796
108
0.651226
6bfc67542b8061cb0974af1ae7c6a732f07ff2ec
290
py
Python
src/python/py-accepted/231A.py
cbarnson/UVa
0dd73fae656613e28b5aaf5880c5dad529316270
[
"Unlicense",
"MIT"
]
2
2019-09-07T17:00:26.000Z
2020-08-05T02:08:35.000Z
src/python/py-accepted/231A.py
cbarnson/UVa
0dd73fae656613e28b5aaf5880c5dad529316270
[
"Unlicense",
"MIT"
]
null
null
null
src/python/py-accepted/231A.py
cbarnson/UVa
0dd73fae656613e28b5aaf5880c5dad529316270
[
"Unlicense",
"MIT"
]
null
null
null
#! python
# Problem # : 231A
# Created on : 2019-01-14 21:19:31
def Main():
n = int(input())
cnt = 0
for i in range(0, n):
if sum(list(map(int, input().split(' ')))) > 1:
cnt += 1
else:
print(cnt)
if __name__ == '__main__':
Main()
from math import ceil
from aiohttp import web
from lbry.testcase import CommandTestCase
class MockedCommentServer:
ERRORS = {
'INVALID_PARAMS': {'code': -32602, 'message': 'Invalid parameters'},
'INTERNAL': {'code': -32603, 'message': 'An internal error'},
'UNKNOWN': {'code': -1, 'message': 'An unknown or very miscellaneous error'},
'INVALID_METHOD': {'code': -32604, 'message': 'The Requested method does not exist'}
}
def __init__(self, port=2903):
self.port = port
self.app = web.Application(debug=True)
self.app.add_routes([web.post('/api', self.api)])
self.runner = None
self.server = None
self.comments = []
self.comment_id = 0
def create_comment(self, **comment):
self.comment_id += 1
comment['comment_id'] = self.comment_id
if 'channel_id' in comment:
comment['channel_url'] = 'lbry://' + comment['channel_name'] + '#' + comment['channel_id']
self.comments.append(comment)
return comment
def get_claim_comments(self, page=1, page_size=50, **kwargs):
return {
'page': page,
'page_size': page_size,
'total_pages': ceil(len(self.comments)/page_size),
'total_items': len(self.comments),
'items': (self.comments[::-1])[(page - 1) * page_size: page * page_size]
}
methods = {
'get_claim_comments': get_claim_comments,
'create_comment': create_comment,
}
def process_json(self, body) -> dict:
response = {'jsonrpc': '2.0', 'id': body['id']}
if body['method'] in self.methods:
params = body.get('params', {})
result = self.methods[body['method']](self, **params)
response['result'] = result
else:
response['error'] = self.ERRORS['INVALID_METHOD']
return response
async def start(self):
self.runner = web.AppRunner(self.app)
await self.runner.setup()
self.server = web.TCPSite(self.runner, 'localhost', self.port)
await self.server.start()
async def stop(self):
await self.runner.shutdown()
await self.runner.cleanup()
async def api(self, request):
body = await request.json()
if type(body) is list or type(body) is dict:
if type(body) is list:
response = [self.process_json(part) for part in body]
else:
response = self.process_json(body)
return web.json_response(response)
else:
raise TypeError('invalid type passed')
class CommentCommands(CommandTestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
self.daemon.conf.comment_server = 'http://localhost:2903/api'
self.comment_server = MockedCommentServer(2903)
await self.comment_server.start()
self.addCleanup(self.comment_server.stop)
async def test01_comment_create(self):
channel = (await self.channel_create('@JimmyBuffett'))['outputs'][0]
stream = (await self.stream_create())['outputs'][0]
self.assertEqual(0, len((await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']))
comment = await self.daemon.jsonrpc_comment_create(
claim_id=stream['claim_id'],
channel_id=channel['claim_id'],
comment="It's 5 O'Clock Somewhere"
)
comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']
self.assertEqual(1, len(comments))
self.assertEqual(comment['comment_id'], comments[0]['comment_id'])
self.assertEqual(stream['claim_id'], comments[0]['claim_id'])
channel2 = (await self.channel_create('@BuffettJimmy'))['outputs'][0]
await self.daemon.jsonrpc_comment_create(
claim_id=stream['claim_id'],
channel_name=channel2['name'],
comment='Let\'s all go to Margaritaville',
parent_id=comments[0]['comment_id']
)
comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']
self.assertEqual(2, len(comments))
self.assertEqual(comments[0]['channel_id'], channel2['claim_id'])
self.assertEqual(comments[0]['parent_id'], comments[1]['comment_id'])
comment = await self.daemon.jsonrpc_comment_create(
claim_id=stream['claim_id'],
comment='Anonymous comment'
)
comments = (await self.daemon.jsonrpc_comment_list(stream['claim_id']))['items']
self.assertEqual(comment['comment_id'], comments[0]['comment_id'])
async def test02_unsigned_comment_list(self):
stream = (await self.stream_create())['outputs'][0]
comments = []
for i in range(28):
comment = await self.daemon.jsonrpc_comment_create(
comment=f'{i}',
claim_id=stream['claim_id'],
)
self.assertIn('comment_id', comment)
comments.append(comment)
comment_list = await self.daemon.jsonrpc_comment_list(
claim_id=stream['claim_id']
)
self.assertIs(comment_list['page_size'], 50)
self.assertIs(comment_list['page'], 1)
self.assertIs(comment_list['total_items'], 28)
for comment in comment_list['items']:
self.assertEqual(comment['comment'], comments.pop()['comment'])
signed_comment_list = await self.daemon.jsonrpc_comment_list(
claim_id=stream['claim_id'],
is_channel_signature_valid=True
)
self.assertIs(len(signed_comment_list['items']), 0)
async def test03_signed_comments_list(self):
channel = (await self.channel_create('@JimmyBuffett'))['outputs'][0]
stream = (await self.stream_create())['outputs'][0]
comments = []
for i in range(28):
comment = await self.daemon.jsonrpc_comment_create(
comment=f'{i}',
claim_id=stream['claim_id'],
channel_id=channel['claim_id'],
)
self.assertIn('comment_id', comment)
comments.append(comment)
comment_list = await self.daemon.jsonrpc_comment_list(
claim_id=stream['claim_id']
)
self.assertIs(comment_list['page_size'], 50)
self.assertIs(comment_list['page'], 1)
self.assertIs(comment_list['total_items'], 28)
for comment in comment_list['items']:
self.assertEqual(comment['comment'], comments.pop()['comment'])
signed_comment_list = await self.daemon.jsonrpc_comment_list(
claim_id=stream['claim_id'],
is_channel_signature_valid=True
)
self.assertIs(len(signed_comment_list['items']), 28)
38.772727
103
0.608587
2e06dfe9bc67684bd6217157e61a6a60afe366d2
9,265
py
Python
encoder.py
FinlayDaG33k/HBC
e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9
[
"MIT"
]
1
2016-06-15T07:22:14.000Z
2016-06-15T07:22:14.000Z
encoder.py
FinlayDaG33k/HBC
e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9
[
"MIT"
]
null
null
null
encoder.py
FinlayDaG33k/HBC
e8aa7ae6b6d8af758f15613ab2aaf6cb276e4cd9
[
"MIT"
]
null
null
null
#!/usr/bin/python
"""
HBC by FinlayDaG33k under the MIT License
"""
import Pyro4
import os
import platform
import re
import logging
from threading import Timer
import subprocess
from encoder_cfg import pyro_host, pyro_port, ftp_host, ftp_port, ftp_user, ftp_pass
from encoder_cfg import IDLE, RUNNING, Task, getLanIP
from ftplib import FTP
import socket
handbrake_unix = '/usr/bin/HandBrakeCLI'
handbrake_win32 = 'C:\\Program Files\\Handbrake\\HandBrakeCLI.exe'
handbrake_win64 = 'C:\\Program Files (x86)\\Handbrake\\HandBrakeCLI.exe'
class Encoder(object):
"""
Main encoder object
"""
def __init__(self):
# The dir which the encoder uses to store video filess it grabs from the central server
# and files which it generates via handbrake
# TODO -- make this configurable
self.homedir = os.path.expanduser("~")
# Look up the central server
self.central = Pyro4.Proxy('PYRONAME:central.encoding@{0}:{1}'.format(pyro_host,pyro_port))
# Determine the handbrake path
# TODO -- This should probably be configurable too
self.handbrake = ''
if os.path.exists(handbrake_unix):
self.handbrake = handbrake_unix
elif os.path.exists(handbrake_win32):
self.handbrake = handbrake_win32
elif os.path.exists(handbrake_win64):
self.handbrake = handbrake_win64
self.status = IDLE
# The name used to register with Pyro Naming
# TODO -- Might want to use a better naming scheme, lazy linux users may not set hostnames
# on all their hosts, meaning we could have multiple encoder.localhost's stepping on eachother
self.name = 'encoder.{0}'.format(platform.node())
# Reference the external handbrake process
self.encodeProc = None
# This timer will check on the encoder's status every ten seconds
self.timer = Timer(10,self.checkForTask)
self.timer.start()
def getName(self):
return self.name
def getLine(self):
"""
Read from the handbrake process's stdout one char at a time
until we hit a \r -- this is a needed because if you try
a readline it'll hang until it hits \n -- which won't happen
until handbrake exits -- it updates the vid progress in place
with multiple \r messages
"""
line = ''
while True:
out = self.encodeProc.stdout.read(1)
if out:
if out == '\r':
break
if not out:
break
line += out
return line
def checkForTask(self):
if self.status == RUNNING:
# We (think) we're doing something
if self.encodeProc:
# Handbrake process reference exists
if self.encodeProc.poll() is not None:
# Handbrake has exited
# TODO -- we should do some validation on the handbrake exit code, just checking that the
# output file exists is pretty weak
if os.path.exists(os.path.join(self.homedir,self.task.getOutputName())):
# Since this file exists we assume things succeeded, FTP the video to the central server
if not self.sendVideo():
self.task.setErrors('Unable to send video')
# Complete the task and inform the central server that we're done
self.task.taskFinished()
self.task.setCompleted(100)
self.central.finishTask(self.task)
self.cleanUp()
return
else:
# We're not done yet, but handbrake is running, update the central server on our progress
if not self.central.updateTask(self.task):
self.cancel(self.task.getName())
else:
# Don't know why we think we're running -- probably a corner case here, but let's go back to IDLE
self.status = IDLE
else:
failed = False
# Try to get at ask from the central server
self.task = self.central.getTask(self.getName())
if self.task:
# We got a task, set our status to running, grab the video via FTP from the server and begin the encode
# process
self.status = RUNNING
if self.getVideo(self.task.getName()):
self.encodeVid()
else:
failed = True
if failed:
# Something bad happened with FTP, fail the task and tell the server
self.central.finishTask(self.task)
self.status = IDLE
# Reschedule the task so we'll check our state again in two seconds
self.timer = Timer(2,self.checkForTask)
self.timer.start()
def cleanUp(self):
"""
Various clean up operations that need to be performed
- Delete the video files, we shouldn't need them by now
- Cancel the update timer if it's still active since HB has exited
- Go back to IDLE
- Reschedule the main method timer
"""
if os.path.exists(os.path.join(self.homedir,self.task.getOutputName())):
os.unlink(os.path.join(self.homedir,self.task.getOutputName()))
if os.path.exists(os.path.join(self.homedir,self.task.getName())):
os.unlink(os.path.join(self.homedir,self.task.getName()))
self.updateTimer.cancel()
self.timer.cancel()
self.task = None
self.status = IDLE
self.timer = Timer(2,self.checkForTask)
self.timer.start()
def cancel(self,name):
"""
External call point to cancel the active task, used by the central server upon user request,
kills the handbrake process and cleans up
"""
if self.task:
if self.task.getName() == name:
if self.encodeProc:
self.encodeProc.kill()
self.cleanUp()
return True
return False
def updateCompleted(self):
"""
Timed method which gets the percentage completed from the handbrake stdout and updates the task
"""
out = self.getLine()
if out:
match = re.search('(\d+\.\d+)\s\%',out)
if match:
completed = match.group(1)
self.task.setCompleted(completed)
if self.encodeProc:
if self.encodeProc.poll() is None:
self.updateTimer = Timer(.1,self.updateCompleted)
self.updateTimer.start()
def getStatus(self):
return self.status
def sendVideo(self):
"""
Sends the encoded video back to the central server
"""
ftp = FTP()
ftp.connect(ftp_host,ftp_port)
ftp.login(ftp_user, ftp_pass)
ftp.storbinary('STOR {0}'.format(self.task.getOutputName()),open(os.path.join(self.homedir,self.task.getOutputName()),'rb'))
return True
def getVideo(self,video):
"""
Grabs the passed video from the central server
"""
ftp = FTP()
ftp.connect(ftp_host,ftp_port)
ftp.login(ftp_user, ftp_pass)
ftp.retrbinary('RETR {0}'.format(video), open(os.path.join(self.homedir,video),'wb').write)
return True
def encodeVid(self):
"""
Kick off the handbrake process with the various settings found in the task as arguements
Also starts the timer which will parse the handbrake output for completion percentages
"""
self.task.setOutputName(re.sub('\.\w*$','.{0}'.format(self.task.getFormat()),self.task.getName()))
self.task.taskStarted()
args = [self.handbrake]
if self.task.getEncoder():
args.extend(['-e',self.task.getEncoder()])
if self.task.getFormat():
args.extend(['-f',self.task.getFormat()])
if self.task.getQuality():
args.extend(['-q',self.task.getQuality()])
if self.task.getLarge():
args.append('-4')
args.extend(['-i',os.path.join(self.homedir,self.task.getName()),'-o',os.path.join(self.homedir,self.task.getOutputName())])
self.encodeProc = subprocess.Popen(args,stdout=subprocess.PIPE)
self.updateTimer = Timer(.1,self.updateCompleted)
self.updateTimer.start()
def main():
encoder = Encoder()
# Register encoder with Pyro naming
daemon = Pyro4.Daemon(host=getLanIP())
uri = daemon.register(encoder)
ns = Pyro4.locateNS(host=pyro_host,port=pyro_port)
try:
# Remove any stale bindings in naming
# TODO -- do a little more validation, a 'stale' binding may be a host with a duplicate name
ns.remove(encoder.getName())
except:
pass
ns.register(encoder.getName(),uri)
daemon.requestLoop()
if __name__ == "__main__":
main()
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
import pickle
"""
Shows a bar plot of the decision histogram for one robot position on the field
Example:
run without any parameters
$ python decision_histogram_plot.py
"""
# Set file for importing the decisions
decisions = pickle.load(open("../data/humanoids/simulate_every_pos-30-100.pickle", "rb"))
# Set robot position
fixed_rotation = 0
fixed_x = 1000
fixed_y = 100
for pos in decisions:
x, y, rotation, new_decision_histogram = pos
# only plot if desired position is found
if rotation == fixed_rotation and x == fixed_x and y == fixed_y:
plt.bar(range(0, len(new_decision_histogram)), new_decision_histogram)
plt.show()
23.84375
89
0.730013
cf5813108f9b2605e07af83b9ad4c455d9ff95f7
269
py
Python
python/primary/模块/print_tuple.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[
"Apache-2.0"
]
1
2020-07-29T16:43:46.000Z
2020-07-29T16:43:46.000Z
python/primary/模块/print_tuple.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[
"Apache-2.0"
]
null
null
null
python/primary/模块/print_tuple.py
EstherLacan/jiangfw
a449b1925742873c76dc1b3284aedb359204bc76
[
"Apache-2.0"
]
null
null
null
#!/usr/bin/python
# Filename: print_tuple.py
age = 22
myempty = ()
print(len(myempty))
a1 = ('aa',)
print(len(a1))
a2 = ('abcd')
print(len(a2),a2[3])
name = ('Swaroop')
print ('%s is %d years old' % (name, age))
print ('Why is %s playing with that python?' % name)
import boto
import logging
import os
from mongodb_consistent_backup.Common.Util import file_md5hash
from mongodb_consistent_backup.Errors import OperationError
class GsUploadThread:
def __init__(self, backup_dir, file_path, gs_path, bucket, project_id, access_key, secret_key, remove_uploaded=False, retries=5):
self.backup_dir = backup_dir
self.file_path = file_path
self.gs_path = gs_path
self.bucket = bucket
self.project_id = project_id
self.access_key = access_key
self.secret_key = secret_key
self.remove_uploaded = remove_uploaded
self.retries = retries
self.path = "%s/%s" % (self.bucket, self.gs_path)
self.meta_data_dir = "mongodb_consistent_backup-META"
self._metadata = None
def configure(self):
if not boto.config.has_section("Credentials"):
boto.config.add_section("Credentials")
boto.config.set("Credentials", "gs_access_key_id", self.access_key)
boto.config.set("Credentials", "gs_secret_access_key", self.secret_key)
if not boto.config.has_section("Boto"):
boto.config.add_section("Boto")
boto.config.setbool('Boto', 'https_validate_certificates', True)
def get_uri(self):
return boto.storage_uri(self.path, 'gs')
def gs_exists(self):
try:
self.metadata()
return True
except boto.exception.InvalidUriError:
return False
def metadata(self):
logging.debug("Getting metadata for path: %s" % self.path)
if not self._metadata:
self._metadata = self.get_uri().get_key()
return self._metadata
def gs_md5hash(self):
key = self.metadata()
if hasattr(key, 'etag'):
return key.etag.strip('"\'')
def success(self):
if self.remove_uploaded and not self.file_path.startswith(os.path.join(self.backup_dir, self.meta_data_dir)):
logging.debug("Removing successfully uploaded file: %s" % self.file_path)
os.remove(self.file_path)
def run(self):
f = None
try:
self.configure()
if self.gs_exists():
gs_md5hash = self.gs_md5hash()
if gs_md5hash and file_md5hash(self.file_path) == gs_md5hash:
logging.debug("Path %s already exists with the same checksum (%s), skipping" % (self.path, self.gs_md5hash()))
return
logging.debug("Path %s checksum and local checksum differ, re-uploading" % self.path)
else:
logging.debug("Path %s does not exist, uploading" % self.path)
try:
f = open(self.file_path, 'r')
uri = self.get_uri()
retry = 0
error = None
while retry < self.retries:
try:
logging.info("Uploading %s to Google Cloud Storage (attempt %i/%i)" % (self.path, retry, self.retries))
uri.new_key().set_contents_from_file(f)
except Exception, e:
logging.error("Received error for Google Cloud Storage upload of %s: %s" % (self.path, e))
error = e
retry += 1
continue
if retry >= self.retries and error:
raise error
finally:
if f:
f.close()
self.success()
except Exception, e:
logging.error("Uploading to Google Cloud Storage failed! Error: %s" % e)
raise OperationError(e)
39.145833
133
0.567855
510824956a099e0817d4a9c182b2014c3bfb0912
703
py
Python
Hello coding/selection_sort.py
wooooooogi/SAlgorithm
bf76bb721785a52b6abf158077b554b0626ee1f7
[
"MIT"
]
null
null
null
Hello coding/selection_sort.py
wooooooogi/SAlgorithm
bf76bb721785a52b6abf158077b554b0626ee1f7
[
"MIT"
]
null
null
null
Hello coding/selection_sort.py
wooooooogi/SAlgorithm
bf76bb721785a52b6abf158077b554b0626ee1f7
[
"MIT"
]
null
null
null
# Coded by Sungwook Kim
# Date: 2019-07-30
# Python version: 3.6.5
# IDE: Spyder 3
# Sort, information = [Name, Date, E-mail]
# Sample code is 2001 year's 메리츠화재 stock information. (Downloaded in KRX)
# Sort by price (When is the highest price in 2001)
import pandas as pd
import numpy as np
import os
# Use pandas library to get csv information.
Data = pd.read_csv(os.getcwd() + "/selection_sort_sample_code.csv", encoding="ms949", index_col=False)
print(Data)
Data = Data[:-1]
#print(Data)
sorting_variable = "종가"
sorted_variable = "년/월/일"
SV = Data[sorting_variable]
SV = [SV]
#SV = np.array()
#print(SV)
#for i in SV:
# for j in SV:
# if i < J:
# break
"""
Created by Christos Baziotis.
"""
import random
import pickle
import numpy
from ekphrasis.classes.preprocessor import TextPreProcessor
from ekphrasis.classes.tokenizer import SocialTokenizer
from ekphrasis.dicts.emoticons import emoticons
from kutilities.helpers.data_preparation import print_dataset_statistics, \
labels_to_categories, categories_to_onehot
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from embeddings.WordVectorsManager import WordVectorsManager
from modules.CustomPreProcessor import CustomPreProcessor
from modules.EmbeddingsExtractor import EmbeddingsExtractor
def prepare_dataset(X, y, pipeline, y_one_hot=True, y_as_is=False):
X = pipeline.fit_transform(X)
if y_as_is:
try:
return X, numpy.asarray(y, dtype=float)
except:
return X, y
# 1 - Labels (positive) to categories (integer)
y_cat = labels_to_categories(y)
if y_one_hot:
# 2 - Labels to one-hot vectors
return X, categories_to_onehot(y_cat)
return X, y_cat
def get_embeddings(corpus, dim):
vectors = WordVectorsManager(corpus, dim).read()
vocab_size = len(vectors)
print('Loaded %s word vectors.' % vocab_size)
wv_map = {}
pos = 0
# +1 for zero padding token and +1 for <unk>
emb_matrix = numpy.ndarray((vocab_size + 2, dim), dtype='float32')
for i, (word, vector) in enumerate(vectors.items()):
pos = i + 1
wv_map[word] = pos
emb_matrix[pos] = vector
pos += 1
wv_map["<unk>"] = pos
emb_matrix[pos] = numpy.random.uniform(low=-0.05, high=0.05, size=dim)
return emb_matrix, wv_map
def prepare_text_only_dataset(X, pipeline):
X = pipeline.fit_transform(X)
return X
class Task4Loader:
def __init__(self, word_indices, text_lengths, loading_data=True, datafolder="", preprocess_typ="ekphrasis", **kwargs):
self.word_indices = word_indices
self.y_one_hot = kwargs.get("y_one_hot", True)
self.pipeline = Pipeline([
('ext', EmbeddingsExtractor(word_indices=word_indices,
max_lengths=text_lengths,
add_tokens=(True),
unk_policy="random"))])
if(loading_data):
print("Loading data...")
self.X_train = pickle.load(open(
"{}X_train_{}.pickle".format(datafolder, preprocess_typ), "rb"))
self.X_test = pickle.load(open(
"{}X_test_{}.pickle".format(datafolder, preprocess_typ), "rb"))
self.y_train = pickle.load(open(
"{}y_train_{}.pickle".format(datafolder, preprocess_typ), "rb"))
self.y_test = pickle.load(open(
"{}y_test_{}.pickle".format(datafolder, preprocess_typ), "rb"))
print("-------------------\ntraining set stats\n-------------------")
print_dataset_statistics(self.y_train)
print("-------------------")
def load_train_val_test(self):
X_val, X_test, y_val, y_test = train_test_split(self.X_test, self.y_test,
test_size=0.5,
stratify=self.y_test,
random_state=42)
print("\nPreparing training set...")
training = prepare_dataset(self.X_train, self.y_train, self.pipeline,
self.y_one_hot)
print("\nPreparing validation set...")
validation = prepare_dataset(X_val, y_val, self.pipeline,
self.y_one_hot)
print("\nPreparing test set...")
testing = prepare_dataset(X_test, y_test, self.pipeline,
self.y_one_hot)
return training, validation, testing
def load_final(self):
print("\nPreparing training set...")
training = prepare_dataset(self.X_train, self.y_train, self.pipeline,
self.y_one_hot)
print("\nPreparing test set...")
testing = prepare_dataset(self.X_test, self.y_test, self.pipeline,
self.y_one_hot)
return training, testing
def decode_data_to_embeddings(self, X_data, y_data):
embedding_data = prepare_dataset(X_data, y_data, self.pipeline,
self.y_one_hot)
return embedding_data
40.672566
123
0.58812
740ff96b7d1bf1f591bf1f5059c0aa00bbee743c
449
py
Python
api/models/library.py
aidun/seite50
761a8e76f9e4473f70a8705dce169d61bf660267
[
"Apache-2.0"
]
null
null
null
api/models/library.py
aidun/seite50
761a8e76f9e4473f70a8705dce169d61bf660267
[
"Apache-2.0"
]
2
2018-05-11T18:26:18.000Z
2018-05-12T18:49:38.000Z
api/models/library.py
aidun/seite50
761a8e76f9e4473f70a8705dce169d61bf660267
[
"Apache-2.0"
]
1
2018-10-10T19:52:17.000Z
2018-10-10T19:52:17.000Z
from django.db import models
from api.models.user import User
# Create your models here.
class Library(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255, blank=False, unique=True)
owner = models.ManyToManyField(User, related_name="organized_by", blank=False)
# Metadata
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
29.933333
82
0.759465
744976428a3f9d0b93a5bf2aee0bfc9497a2c2d3
661
py
Python
Codeforces_problems/Merge It A/solution.py
KAHund/CompetitiveCode
6ed211a2f795569f5c2f18c2f660520d99d41ca0
[
"MIT"
]
165
2020-10-03T08:01:11.000Z
2022-03-31T02:42:08.000Z
Codeforces_problems/Merge It A/solution.py
KAHund/CompetitiveCode
6ed211a2f795569f5c2f18c2f660520d99d41ca0
[
"MIT"
]
383
2020-10-03T07:39:11.000Z
2021-11-20T07:06:35.000Z
Codeforces_problems/Merge It A/solution.py
KAHund/CompetitiveCode
6ed211a2f795569f5c2f18c2f660520d99d41ca0
[
"MIT"
]
380
2020-10-03T08:05:04.000Z
2022-03-19T06:56:59.000Z
# We can check the divisibility by 3 by taking the sum of digits.
# If remainder is 0, we consider it as it is. If remainder is 1 or 2 we can combine them greedily.
# We can then combine 3 numbers each with remainder 1 or 2.
def sum_of(n):
s = 0
num = n
while(num>0):
s+=num%10
num = num//10
return s
for i in range(int(input())):
n = int(input())
l = list(map(int, input().split()))
x_0 = 0
x_1 = 0
x_2 = 0
for i in range(n):
temp = sum_of(l[i])
if(temp%3 == 0):
x_0 += 1
elif((temp-1)%3 == 0):
x_1 += 1
else:
x_2+=1
temp = min(x_1, x_2)
count = x_0 + temp
x_1-=temp
x_2-=temp
count += x_1//3
count += x_2//3
print(count)
20.65625
98
0.606657
7af4d8f7d4503155cd03cb13845759a7cfd88c78
8,705
py
Python
backend/apps/ineedstudent/views.py
n-hackert/match4healthcare
761248c27b49e568c545c643a72eac9a040649d7
[
"MIT"
]
null
null
null
backend/apps/ineedstudent/views.py
n-hackert/match4healthcare
761248c27b49e568c545c643a72eac9a040649d7
[
"MIT"
]
null
null
null
backend/apps/ineedstudent/views.py
n-hackert/match4healthcare
761248c27b49e568c545c643a72eac9a040649d7
[
"MIT"
]
null
null
null
from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
from apps.mapview.utils import plzs
from apps.iamstudent.models import Student
from apps.ineedstudent.models import Hospital
from apps.ineedstudent.forms import HospitalForm, EmailToHospitalForm
from django.utils.translation import gettext_lazy as _
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from apps.mapview.utils import plzs, get_plzs_close_to, haversine
import django_tables2 as tables
from django_tables2 import TemplateColumn
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from apps.accounts.decorator import student_required, hospital_required
from django.contrib.admin.views.decorators import staff_member_required
from functools import lru_cache
from apps.mapview.views import get_ttl_hash
from django.core.mail import EmailMessage
from django.conf import settings
from apps.iamstudent.models import EmailToHospital
from django.contrib import messages
from datetime import datetime
import time
from apps.accounts.utils import send_password_set_email
from apps.ineedstudent.forms import HospitalFormZustimmung
from django.views.decorators.gzip import gzip_page
class StudentTable(tables.Table):
info = TemplateColumn(template_name='info_button.html')
checkbox = TemplateColumn(template_name='checkbox_studenttable.html')
class Meta:
model = Student
template_name = "django_tables2/bootstrap4.html"
exclude = ['uuid','registration_date','id']
fields = ['user']
# Should be safe against BREACH attack because we don't have user input in reponse body
@gzip_page
def hospital_overview(request):
locations_and_number = prepare_hospitals(ttl_hash=get_ttl_hash(60))
template = loader.get_template('map_hospitals.html')
context = {
'locations': list(locations_and_number.values()),
}
return HttpResponse(template.render(context, request))
@lru_cache(maxsize=1)
def prepare_hospitals(ttl_hash=None):
hospitals = Hospital.objects.filter(user__validated_email=True, is_approved=True, appears_in_map=True)
locations_and_number = {}
for hospital in hospitals:
if len(hospital.sonstige_infos) != 0:
cc = hospital.countrycode
plz = hospital.plz
key = cc + "_" + plz
if key in locations_and_number:
locations_and_number[key]["count"] += 1
locations_and_number[key]["uuid"] = None
else:
lat, lon, ort = plzs[cc][plz]
locations_and_number[key] = {
"uuid": hospital.uuid,
"countrycode": cc,
"plz": plz,
"count": 1,
"lat": lat,
"lon": lon,
"ort": ort
}
return locations_and_number
@login_required
def hospital_list(request, countrycode, plz):
if countrycode not in plzs or plz not in plzs[countrycode]:
# TODO: niceren error werfen
return HttpResponse("Postleitzahl: " + plz + " ist keine valide Postleitzahl in " + countrycode)
lat, lon, ort = plzs[countrycode][plz]
table = HospitalTable(Hospital.objects.filter(user__validated_email=True, is_approved=True, plz=plz, appears_in_map=True))
table.paginate(page=request.GET.get("page", 1), per_page=25)
context = {
'countrycode': countrycode,
'plz': plz,
'ort': ort,
'table': table}
return render(request, "list_hospitals_by_plz.html", context)
@login_required
@hospital_required
def zustimmung(request):
user = request.user
h = Hospital.objects.get(user=user)
if request.method == 'POST':
form_info = HospitalFormZustimmung(request.POST, instance=h)
if form_info.is_valid():
h.save()
return HttpResponseRedirect("/accounts/login_redirect")
else:
form_info = HospitalFormZustimmung()
return render(request, 'zustimmung.html', {'form_info': form_info })
class HospitalTable(tables.Table):
info = TemplateColumn(template_name='info_button.html')
class Meta:
model = Hospital
template_name = "django_tables2/bootstrap4.html"
fields = ['firmenname','ansprechpartner']
exclude = ['uuid','registration_date','id']
class ApprovalHospitalTable(HospitalTable):
info = TemplateColumn(template_name='info_button.html')
status = TemplateColumn(template_name='approval_button.html')
delete = TemplateColumn(template_name='delete_button.html')
class Meta:
model = Hospital
template_name = "django_tables2/bootstrap4.html"
fields = ['firmenname','ansprechpartner','user','telefon','plz','user__validated_email', 'approval_date', 'approved_by']
exclude = ['uuid','id', 'registration_date']
@login_required
def hospital_view(request,uuid):
h = Hospital.objects.filter(uuid=uuid)[0]
initial = {
'subject': _('Neues Hilfsangebot'),
'message': _('Hallo, ich habe ihr Gesuche auf der Plattform match4healthcare gesehen und bin für die Stelle qualifiziert.\nIch bin...\nIch möchte helfen in dem...')
}
email_form = EmailToHospitalForm(initial=initial)
if request.POST and request.user.is_student and request.user.validated_email:
s = request.user.student
email_form = EmailToHospitalForm(request.POST, initial=initial)
if email_form.is_valid():
start_text = _("Hallo %s,\n\nSie haben über unsere Plattform match4healthcare von %s (%s) eine Antwort auf Ihre Anzeige bekommen.\n"
"Falls Sie keine Anfragen mehr bekommen möchten, deaktivieren Sie Ihre "
"Anzeige im Profil online.\n\n" % (h.ansprechpartner, s.name_first, request.user.email))
message = start_text + \
"===============================================\n\n" + \
email_form.cleaned_data['message'] + \
"\n\n===============================================\n\n" + \
"Mit freundlichen Grüßen,\nIhr match4healthcare Team"
emailtohospital = EmailToHospital.objects.create(student=s,hospital=h,message=email_form.cleaned_data['message'],subject=email_form.cleaned_data['subject'])
email = EmailMessage(
subject='[match4healthcare] ' + email_form.cleaned_data['subject'],
body=message,
from_email=settings.NOREPLY_MAIL,
to=[h.user.email]
)
email.send()
emailtohospital.send_date = datetime.now()
emailtohospital.save()
return render(request,'hospital_contacted.html')
lat1, lon1, ort1 = plzs[h.countrycode][h.plz]
context = {
'hospital': h,
'uuid': h.uuid,
'ort': ort1,
'hospital': h,
'mail': h.user.username,
}
if request.user.is_student:
s = Student.objects.get(user=request.user)
lat2, lon2, context["student_ort"] = plzs[s.countrycode][s.plz]
context["distance"] = int(haversine(lon1, lat1, lon2, lat2))
context["plz_student"] = s.plz
context['email_form'] = email_form
return render(request, 'hospital_view.html', context)
from .forms import PostingForm
from .tables import ContactedTable
from django.db import models
@login_required
@hospital_required
def change_posting(request):
if request.method == 'POST':
anzeige_form = PostingForm(request.POST,instance=request.user.hospital)
if anzeige_form.is_valid():
anzeige_form.save()
messages.add_message(request, messages.INFO,_('Deine Anzeige wurde erfolgreich aktualisiert.'))
else:
anzeige_form = PostingForm(instance=request.user.hospital)
context = {
'anzeige_form': anzeige_form
}
return render(request, 'change_posting.html', context)
@login_required
@hospital_required
def hospital_dashboard(request):
# tabelle kontaktierter Studis
values = ['student','registration_date','message','subject']
qs = request.user.hospital.emailtosend_set.all().values(*values,is_activated=models.F('student__is_activated' ))
kontaktiert_table = ContactedTable(qs)
context = {
'already_contacted': len(qs) > 0,
'has_posting': request.user.hospital.appears_in_map,
'posting_text': request.user.hospital.sonstige_infos,
'kontaktiert_table' : kontaktiert_table
}
return render(request, 'hospital_dashboard.html', context)
import demistomock as demisto
def test_main(mocker):
from PrintRaw import main
# test custom fields with short names
mocker.patch.object(demisto, 'args', return_value={
'value': '\tthat was a tab \n\n\nthree newlines\tafter another tab\n'
})
mocker.patch.object(demisto, 'results')
main()
assert demisto.results.call_count == 1
results = demisto.results.call_args[0][0]
assert results == r"'\tthat was a tab \n\n\nthree newlines\tafter another tab\n'"
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@date: 2021-Today
@author: [email protected]
"""
import numpy as np
from TreeModelLib.GrowthAndDeathDynamics.Mortality.Random import Random
class RandomGrowth(Random):
def __init__(self, args, case):
super(Random, self).__init__(args, case)
# Read input parameters from xml file
self.getInputParameters(args)
# Default values if no inputs are given
try:
self.k_die
except:
# Calibration factor default: 1e-12
self.k_die = 1e-12
print("NOTE: Use default `probability`: " + str(self.k_die) +
".")
def getSurvival(self, args):
self.survive = 1
# Calculate the probability to die
args.delta_volume = args.volume - args.volume_before
# = dV/dt/V
relative_volume_increment = args.delta_volume / (args.time *
args.volume)
self.p_die = self.k_die / relative_volume_increment
# Get a random number
r = np.random.uniform(0, 1, 1)
if r < self.p_die:
self.survive = 0
print("\t Tree died randomly. Random number: " + str(r[0]) +
", p: " + str(self.p_die))
return self.survive
def getMortalityVariables(self, args, growth_concept_information):
# Variable to store volume of previous time step (m³)
try:
args.volume_before = growth_concept_information[
"volume_previous_ts"]
if args.volume_before == "NaN":
args.volume_before = 0
except KeyError:
args.volume_before = 0
def setMortalityVariables(self, args, growth_concept_information):
# The current tree volume is the volume of t-1 in the next time step
growth_concept_information["volume_previous_ts"] = \
args.volume
return growth_concept_information
def getInputParameters(self, args):
# All tags are optional
missing_tags = ["type", "mortality", "k_die"]
for arg in args.iterdescendants():
tag = arg.tag
if tag == "k_die":
self.k_die = float(args.find("k_die").text)
elif tag == "type":
case = args.find("type").text
try:
missing_tags.remove(tag)
except ValueError:
print("WARNING: Tag " + tag +
" not specified for " + super().getConceptName() +
" (" + case + ") " +
"mortality initialisation!")
34.294872
76
0.559252
30581b42bd7880006b315eb86b70d6a55f536d33
674
py
Python
PMIa/2015/Donkor_A_H/task_6_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
PMIa/2015/Donkor_A_H/task_6_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
PMIa/2015/Donkor_A_H/task_6_10.py
YukkaSarasti/pythonintask
eadf4245abb65f4400a3bae30a4256b4658e009c
[
"Apache-2.0"
]
null
null
null
# Задача 6. Вариант 10.
# Создайте игру, в которой компьютер загадывает название одной из трех стран, входящих в военно-политический блок "Тройственный союз", а игрок должен его угадать.
# Donkor A.H.
# 14.04.2016
import random
x=random.choice(['Германия','Австро-Венгрия','Италия'])
y=input('Сыграем в игру. Я загадываю вам одну из трёх стран входящих в военно-политический блок "Тройственный союз", а вы должны угадать, какая именно это страна ? ')
z=1
while y!=x:
print("Вы не угадали!")
z+=1
y=input('Попробуйте снова ')
else:
print("Всё верно!Вы угадали!!!")
print("Число ваших попыток - "+ str(z))
input("\nВведите Enter, чтобы завершить")
35.473684
166
0.707715
063f6a9c7e6d7ee6eab51fa15b7d8a3d503ca147
11,557
py
Python
master.py
ameliecordier/IIK
57b40d6b851a1c2369604049d1820e5b572c6227
[
"MIT"
]
null
null
null
master.py
ameliecordier/IIK
57b40d6b851a1c2369604049d1820e5b572c6227
[
"MIT"
]
null
null
null
master.py
ameliecordier/IIK
57b40d6b851a1c2369604049d1820e5b572c6227
[
"MIT"
]
null
null
null
from datahandler import expertPatterns
from datahandler import miningPatterns
from datahandler import analyser as analyser
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
import time
def plot_two_results(norev, rev, fignum, legend, pp):
"""
Utilitaire d'affichage d'une courbe de comparaison révision / no révision
"""
x = list(range(len(norev.results)))
xrev = list(range(len(rev.results)))
norevList = []
revList = []
for elt in norev.results:
norevList.append(elt["idxMining"])
for elt in rev.results:
revList.append(elt["idxMining"])
plt.figure(fignum)
plt.plot(x, norevList, 'r', linestyle="-", label="Sans révision")
plt.plot(xrev, revList, 'g', linestyle="--", label="Avec révision")
plt.xlabel('Itération')
plt.ylabel('Rang du pattern')
plt.title(legend)
plt.legend()
plt.savefig(pp, format="pdf")
def plot_three_results(rand, freq, cove, figNum, legend, pp):
"""
Utilitaire d'affichage d'une courbe de comparaison des trois méthodes
"""
# Génération des graphs
x = list(range(len(rand.results)))
y = list(range(len(freq.results)))
z = list(range(len(cove.results)))
randomList = []
freqList = []
covList = []
for elt in rand.results:
randomList.append(elt["idxMining"])
for elt in freq.results:
freqList.append(elt["idxMining"])
for elt in cove.results:
covList.append(elt["idxMining"])
plt.figure(figNum)
plt.plot(x, randomList, 'r', linestyle="-", label="Random")
plt.plot(y, freqList, 'g', linestyle="--", label="Fréq")
plt.plot(z, covList, 'b', linestyle="-.", label="Cov")
plt.xlabel('Itération')
plt.ylabel('Rang du pattern')
plt.title(legend)
plt.legend()
plt.savefig(pp, format="pdf")
def threefold_comp(mining, expert, nameExpe, sortingFreq, sortingCov):
"""
Compare random, fréquence et couverture événementielle
Pour fréquence et couverture, le critère de tri est celui passé en paramètre
Les résultats sont stockés dans le répertoire nameExpe
"""
# Lecture des patterns
ep = expertPatterns.ExpertPatterns()
ep.getPatterns(expert)
# Création du répertoire pour stocker les résultats
try:
os.mkdir("DATA/" + nameExpe)
except:
print("Directory already there")
# Random
randnorevbegin = time.time()
mpRandNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/no-rev_beforeSortRandom.csv"
mpRandNoRev.toFile(fname)
anaRandNoRev = mpRandNoRev.findPatterns(ep)
fname = "DATA/" + nameExpe + "/no-rev_analyseRandom.csv"
anaRandNoRev.toFile(fname)
fname = "DATA/" + nameExpe + "/no-rev_afterSortRandom.csv"
mpRandNoRev.toFile(fname)
del mpRandNoRev
randnorevend = time.time()
randnorevtime = randnorevend-randnorevbegin
print(randnorevtime)
# Freq
freqnorevbegin = time.time()
mpFreqNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/no-rev_beforeSortFreq.csv"
mpFreqNoRev.toFile(fname)
mpFreqNoRev.sortBy(sortingFreq)
anaFreqNoRev = mpFreqNoRev.findPatterns(ep)
fname = "DATA/" + nameExpe + "/no-rev_analyseFreq.csv"
anaFreqNoRev.toFile(fname)
fname = "DATA/" + nameExpe + "/no-rev_afterSortFreq.csv"
mpFreqNoRev.toFile(fname)
del mpFreqNoRev
freqnorevend = time.time()
freqnorevtime = freqnorevend-freqnorevbegin
print(freqnorevtime)
# Cov evt
covnorevbegin = time.time()
mpCoveNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/no-rev_beforeSortCovEvt.csv"
mpCoveNoRev.toFile(fname)
mpCoveNoRev.sortBy(sortingCov)
anaCoveNoRev = mpCoveNoRev.findPatterns(ep)
fname = "DATA/" + nameExpe + "/no-rev_analyseCovEvt.csv"
anaCoveNoRev.toFile(fname)
fname = "DATA/" + nameExpe + "/no-rev_afterSortCovEvt.csv"
mpCoveNoRev.toFile(fname)
del mpCoveNoRev
covnorevend = time.time()
covnorevtime = covnorevend-covnorevbegin
print(covnorevtime)
# Random
randbegin = time.time()
mpRand = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/rev_beforeSortRandom.csv"
mpRand.toFile(fname)
anaRand = mpRand.findPatternsWithRevision(ep)
fname = "DATA/" + nameExpe + "/rev_analyseRandom.csv"
anaRand.toFile(fname)
fname = "DATA/" + nameExpe + "/rev_afterSortRandom.csv"
mpRand.toFile(fname)
del mpRand
randend = time.time()
randtime = randend - randbegin
print(randtime)
# Freq
freqbegin = time.time()
mpFreq = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/rev_beforeSortFreq.csv"
mpFreq.toFile(fname)
mpFreq.sortBy(sortingFreq)
anaFreq = mpFreq.findPatternsWithRevision(ep)
fname = "DATA/" + nameExpe + "/rev_analyseFreq.csv"
anaFreq.toFile(fname)
fname = "DATA/" + nameExpe + "/rev_afterSortFreq.csv"
mpFreq.toFile(fname)
del mpFreq
freqend = time.time()
freqtime = freqend - freqbegin
print(freqtime)
# Cov evt
covbegin = time.time()
mpCove = miningPatterns.Patterns(mining, ";", 13, 11)
fname = "DATA/" + nameExpe + "/rev_beforeSortCovEvt.csv"
mpCove.toFile(fname)
mpCove.sortBy(sortingCov)
anaCove = mpCove.findPatternsWithRevision(ep)
fname = "DATA/" + nameExpe + "/rev_analyseCovEvt.csv"
anaCove.toFile(fname)
fname = "DATA/" + nameExpe + "/rev_afterSortCovEvt.csv"
mpCove.toFile(fname)
del mpCove
covend = time.time()
covtime = covend - covbegin
print(covtime)
# Génération des graphes résultats
pdfname = "DATA/" + nameExpe + "/results.pdf"
pp = PdfPages(pdfname)
legende = "Comparaison des résultats sans révision"
plot_three_results(anaRandNoRev, anaFreqNoRev, anaCoveNoRev, 1, legende, pp)
legende = "Comparaison des résultats avec révision"
plot_three_results(anaRand, anaFreq, anaCove, 2, legende, pp)
legende = "Performances de random"
plot_two_results(anaRandNoRev, anaRand, 3, legende, pp)
legende = "Performances de freq"
plot_two_results(anaFreqNoRev, anaFreq, 4, legende, pp)
legende = "Performances de cov evt"
plot_two_results(anaCoveNoRev, anaCove, 5, legende, pp)
pp.close()
print(randtime, covtime, freqtime, randnorevtime, freqnorevtime, covnorevtime)
def threefold_compNoFiles(mining, expert, nameExpe, sortingFreq, sortingCov):
"""
Compare random, fréquence et couverture événementielle
Pour fréquence et couverture, le critère de tri est celui passé en paramètre
Les résultats sont stockés dans le répertoire nameExpe
Spécificité : à part les graphes résultats, pas de fichiers générés
"""
# Lecture des patterns
ep = expertPatterns.ExpertPatterns()
ep.getPatterns(expert)
# Création du répertoire pour stocker les résultats
try:
os.mkdir("DATA/" + nameExpe)
except:
print("Directory already there")
# Random
a = time.time()
mpRandNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
anaRandNoRev = mpRandNoRev.findPatterns(ep)
b = time.time()
del mpRandNoRev
print(b-a)
# Freq
mpFreqNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
mpFreqNoRev.sortBy(sortingFreq)
anaFreqNoRev = mpFreqNoRev.findPatterns(ep)
del mpFreqNoRev
# Cov evt
mpCoveNoRev = miningPatterns.Patterns(mining, ";", 13, 11)
mpCoveNoRev.sortBy(sortingCov)
anaCoveNoRev = mpCoveNoRev.findPatterns(ep)
del mpCoveNoRev
# Random
mpRand = miningPatterns.Patterns(mining, ";", 13, 11)
anaRand = mpRand.findPatternsWithRevision(ep)
del mpRand
# Freq
mpFreq = miningPatterns.Patterns(mining, ";", 13, 11)
mpFreq.sortBy(sortingFreq)
anaFreq = mpFreq.findPatternsWithRevision(ep)
del mpFreq
# Cov evt
mpCove = miningPatterns.Patterns(mining, ";", 13, 11)
mpCove.sortBy(sortingCov)
anaCove = mpCove.findPatternsWithRevision(ep)
del mpCove
# Génération des graphes résultats
pdfname = "DATA/" + nameExpe + "/results.pdf"
pp = PdfPages(pdfname)
legende = "Comparaison des résultats sans révision"
plot_three_results(anaRandNoRev, anaFreqNoRev, anaCoveNoRev, 1, legende, pp)
legende = "Comparaison des résultats avec révision"
plot_three_results(anaRand, anaFreq, anaCove, 2, legende, pp)
legende = "Performances de random"
plot_two_results(anaRandNoRev, anaRand, 3, legende, pp)
legende = "Performances de freq"
plot_two_results(anaFreqNoRev, anaFreq, 4, legende, pp)
legende = "Performances de cov evt"
plot_two_results(anaCoveNoRev, anaCove, 5, legende, pp)
pp.close()
# Expé 1 : ibert original, tri simple
'''
mining = "DATA/v1_ibert_fouille.csv"
expert = "DATA/v1_ibert_expert.csv"
xpname = "v1_ibert_standard"
sortingFreq = [("freq", "desc")]
sortingCov = [("cov evt", "desc")]
'''
# Expé 2 : ibert original, tri par longueur en premier
'''
mining = "DATA/v1_ibert_fouille.csv"
expert = "DATA/v1_ibert_expert.csv"
xpname = "v1_ibert_tri_longueur"
sortingFreq = [("long", "desc"), ("freq", "desc")]
sortingCov = [("long", "desc"), ("cov evt", "desc")]
'''
# Expé 3 : debussy original, tri simple
'''
mining = "DATA/v1_debussy_fouille.csv"
expert = "DATA/v1_debussy_expert.csv"
xpname = "v1_debussy_standard"
sortingFreq = [("freq", "desc")]
sortingCov = [("cov evt", "desc")]
'''
# Expé 4 : debussy original, tri par longueur en premier
'''
mining = "DATA/v1_debussy_fouille.csv"
expert = "DATA/v1_debussy_expert.csv"
xpname = "v1_debussy_tri_longueur"
sortingFreq = [("long", "desc"), ("freq", "desc")]
sortingCov = [("long", "desc"), ("cov evt", "desc")]
'''
# Expé 5 : reichert original, tri simple
'''
mining = "DATA/v1_reichert_fouille.csv"
expert = "DATA/v1_reichert_expert.csv"
xpname = "v1_reichert_standard"
sortingFreq = [("freq", "desc")]
sortingCov = [("cov evt", "desc")]
'''
# Expé 6 : reichert original, tri par longueur en premier
'''
mining = "DATA/v1_reichert_fouille.csv"
expert = "DATA/v1_reichert_expert.csv"
xpname = "v1_reichert_tri_longueur"
sortingFreq = [("long", "desc"), ("freq", "desc")]
sortingCov = [("long", "desc"), ("cov evt", "desc")]
'''
# Expé 7 : ibert v2, tri simple
# Attention, pour celle-ci les fichiez n'ont pas été générés
# TODO
'''
mining = "DATA/v2_ibert_fouille.csv"
expert = "DATA/v2_ibert_expert.csv"
xpname = "v2_ibert_standard"
sortingFreq = [("freq", "desc")]
sortingCov = [("cov evt", "desc")]
'''
# Expé 8 : ibert v2, tri par longueur en premier
# Super long (2h10)
'''
mining = "DATA/v2_ibert_fouille.csv"
expert = "DATA/v2_ibert_expert.csv"
xpname = "v2_ibert_tri_longueur"
sortingFreq = [("long", "desc"), ("freq", "desc")]
sortingCov = [("long", "desc"), ("cov evt", "desc")]
'''
# Expé 9 : debussy v2, tri simple
'''
mining = "DATA/v2_debussy_fouille.csv"
expert = "DATA/v2_debussy_expert.csv"
xpname = "v2_debussy_standard"
sortingFreq = [("freq", "desc")]
sortingCov = [("cov evt", "desc")]
'''
# Expé 10 : debussy v2, tri par longueur en premier
'''
mining = "DATA/v2_debussy_fouille.csv"
expert = "DATA/v2_debussy_expert.csv"
xpname = "v2_debussy_tri_longueur"
sortingFreq = [("long", "desc"), ("freq", "desc")]
sortingCov = [("long", "desc"), ("cov evt", "desc")]
'''
# Main code
a = time.time()
threefold_comp(mining, expert, xpname, sortingFreq, sortingCov)
b = time.time()
print(b-a)
""" Implements the matching algorithm used to match attendees to occasions.
The algorithm used is based on Deferred Acceptance. The algorithm has a
quadratic runtime.
"""
from onegov.activity import Attendee, Booking, Occasion, Period
from onegov.activity.matching.score import Scoring
from onegov.activity.matching.utils import overlaps, LoopBudget, hashable
from onegov.activity.matching.utils import booking_order, unblockable
from onegov.core.utils import Bunch
from itertools import groupby, product
from sortedcontainers import SortedSet
from sqlalchemy.orm import joinedload, defer
class AttendeeAgent(hashable('id')):
""" Acts on behalf of the attendee with the goal to get a stable booking
with an occasion.
A booking/occasion pair is considered stable if there exists no other
such pair which is preferred by both the attendee and the occasion.
In other words, if there's no other occasion that would accept the
attendee over another attendee.
"""
__slots__ = ('id', 'wishlist', 'accepted', 'blocked')
def __init__(self, id, bookings, limit=None, minutes_between=0,
alignment=None):
self.id = id
self.limit = limit
self.wishlist = SortedSet(bookings, key=booking_order)
self.accepted = set()
self.blocked = set()
self.minutes_between = minutes_between
self.alignment = alignment
def blocks(self, subject, other):
return overlaps(
subject, other, self.minutes_between, self.alignment,
with_anti_affinity_check=True)
def accept(self, booking):
""" Accepts the given booking. """
self.wishlist.remove(booking)
self.accepted.add(booking)
if self.limit and len(self.accepted) >= self.limit:
self.blocked |= self.wishlist
else:
self.blocked |= {
b for b in self.wishlist if self.blocks(booking, b)
}
self.wishlist -= self.blocked
def deny(self, booking):
""" Removes the given booking from the accepted bookings. """
self.wishlist.add(booking)
self.accepted.remove(booking)
# remove bookings from the blocked list which are not blocked anymore
for booking in unblockable(
self.accepted, self.blocked, with_anti_affinity_check=True):
self.blocked.remove(booking)
self.wishlist.add(booking)
@property
def is_valid(self):
""" Returns True if the results of this agent are valid.
The algorithm should never get to this stage, so this is an extra
security measure to make sure there's no bug.
"""
for a, b in product(self.accepted, self.accepted):
if a != b and self.blocks(a, b):
return False
return True
class OccasionAgent(hashable('id')):
""" Represents the other side of the Attendee/Occasion pair.
While the attende agent will try to get the best possible occasion
according to the wishses of the attendee, the occasion agent will
try to get the best attendee according to the wishes of the occasion.
These wishes may include hard-coded rules or peferences defined by the
organiser/admin, who may manually prefer certain attendees over others.
"""
__slots__ = ('occasion', 'bookings', 'attendees', 'score_function')
def __init__(self, occasion, score_function=None):
self.id = occasion.id
self.occasion = occasion
self.bookings = set()
self.attendees = {}
self.score_function = score_function or (lambda b: b.score)
@property
def full(self):
return len(self.bookings) >= (self.occasion.max_spots)
def preferred(self, booking):
""" Returns the first booking with a lower score than the given
booking (which indicates that the given booking is preferred over
the returned item).
If there's no preferred booking, None is returned.
"""
return next(
(
b for b in self.bookings
if self.score_function(b) < self.score_function(booking)
),
None
)
def accept(self, attendee, booking):
self.attendees[booking] = attendee
self.bookings.add(booking)
attendee.accept(booking)
def deny(self, booking):
self.attendees[booking].deny(booking)
self.bookings.remove(booking)
del self.attendees[booking]
def match(self, attendee, booking):
# as long as there are spots, automatically accept new requests
if not self.full:
self.accept(attendee, booking)
return True
# if the occasion is already full, accept the booking by throwing
# another one out, if there exists a better fit
over = self.preferred(booking)
if over:
self.deny(over)
self.accept(attendee, booking)
return True
return False
def deferred_acceptance(bookings, occasions,
score_function=None,
validity_check=True,
stability_check=False,
hard_budget=True,
default_limit=None,
attendee_limits=None,
minutes_between=0,
alignment=None,
sort_bookings=True):
""" Matches bookings with occasions.
:score_function:
A function accepting a booking and returning a score. Occasions prefer
bookings with a higher score over bookings with a lower score, if and
only if the occasion is not yet full.
The score function is meant to return a constant value for each
booking during the run of the algorithm. If this is not the case,
the algorithm might not halt.
:validity_check:
Ensures that the algorithm doesn't lead to any overlapping bookings.
Runs in O(b) time, where b is the number of bookings per period.
:stability_check:
Ensures that the result does not contain any blocking pairs, that is
it checks that the result is stable. This runs in O(b^3) time, so
do not run this in production (it's more of a testing tool).
:hard_budget:
Makes sure that the algorithm halts eventually by raising an exception
if the runtime budget of O(a*b) is reached (number of attendees
times the number of bookings).
Feel free to proof that this can't happen and then remove the check ;)
:default_limit:
The maximum number of bookings which should be accepted for each
attendee.
:attendee_limits:
The maximum number of bookings which should be accepted for each
attendee. Keyed by the attendee id, this dictionary contains
per-attendee limits. Those fall back to the default_limit.
:minutes_between:
The minutes between each booking that should be considered
transfer-time. That is the time it takes to get from one booking
to another. Basically acts as a suffix to each booking, extending
it's end time by n minutes.
:alignment:
Align the date range to the given value. Currently only 'day' is
supported. When an alignment is active, all bookings are internally
stretched to at least cover the alignment.
For example, if 'day' is given, a booking that lasts 4 hours is
considered to last the whole day and it will block out bookings
on the same day.
Note that the ``minutes_between`` parameter is independent of this.
That is if there's 90 minutes between bookigns and the bookings are
aligned to the day, there can only be a booking every other day::
10:00 - 19:00 becomes 00:00 - 24:00 + 90mins.
Usually you probably do not want minutes_between combined with
an alignment.
"""
assert alignment in (None, 'day')
if sort_bookings:
bookings = sorted(bookings, key=lambda b: b.attendee_id)
attendee_limits = attendee_limits or {}
# pre-calculate the booking scores
score_function = score_function or Scoring()
for booking in bookings:
booking.score = score_function(booking)
# after the booking score has been calculated, the scoring function
# should no longer be used for performance reasons
score_function = None
occasions = {o.id: OccasionAgent(o) for o in occasions}
attendees = {
aid: AttendeeAgent(
aid,
limit=attendee_limits.get(aid, default_limit),
bookings=bookings,
minutes_between=minutes_between,
alignment=alignment
)
for aid, bookings in groupby(bookings, key=lambda b: b.attendee_id)
}
# I haven't proven yet that the following loop will always end. Until I
# do there's a fallback check to make sure that we'll stop at some point
budget = LoopBudget(max_ticks=len(bookings) * len(attendees))
# while there are attendees with entries in a wishlist
while next((a for a in attendees.values() if a.wishlist), None):
if budget.limit_reached(as_exception=hard_budget):
break
candidates = [a for a in attendees.values() if a.wishlist]
matched = 0
# match attendees to courses
while candidates:
candidate = candidates.pop()
for booking in candidate.wishlist:
if occasions[booking.occasion_id].match(candidate, booking):
matched += 1
break # required because the wishlist has been changed
# if no matches were possible the situation can't be improved
if not matched:
break
# make sure the algorithm didn't make any mistakes
if validity_check:
for a in attendees.values():
assert a.is_valid
# make sure the result is stable
if stability_check:
assert is_stable(attendees.values(), occasions.values())
return Bunch(
open=set(b for a in attendees.values() for b in a.wishlist),
accepted=set(b for a in attendees.values() for b in a.accepted),
blocked=set(b for a in attendees.values() for b in a.blocked)
)
def deferred_acceptance_from_database(session, period_id, **kwargs):
period = session.query(Period).filter(Period.id == period_id).one()
b = session.query(Booking)
b = b.options(joinedload(Booking.occasion))
b = b.filter(Booking.period_id == period_id)
b = b.filter(Booking.state != 'cancelled')
b = b.filter(Booking.created >= period.created)
b = b.order_by(Booking.attendee_id)
o = session.query(Occasion)
o = o.filter(Occasion.period_id == period_id)
o = o.options(
defer('meeting_point'),
defer('note'),
defer('cost')
)
if period.max_bookings_per_attendee:
default_limit = period.max_bookings_per_attendee
attendee_limits = None
else:
default_limit = None
attendee_limits = {
a.id: a.limit for a in
session.query(Attendee.id, Attendee.limit)
}
# fetch it here as it'll be reused multiple times
bookings = list(b)
results = deferred_acceptance(
bookings=bookings, occasions=o,
default_limit=default_limit, attendee_limits=attendee_limits,
minutes_between=period.minutes_between, alignment=period.alignment,
sort_bookings=False, **kwargs)
# write the changes to the database
def update_bookings(targets, state):
q = session.query(Booking)
q = q.filter(Booking.state != state)
q = q.filter(Booking.state != 'cancelled')
q = q.filter(Booking.period_id == period_id)
q = q.filter(Booking.id.in_(t.id for t in targets))
for booking in q:
booking.state = state
with session.no_autoflush:
update_bookings(results.open, 'open')
update_bookings(results.accepted, 'accepted')
update_bookings(results.blocked, 'blocked')
def is_stable(attendees, occasions):
""" Returns true if the matching between attendees and occasions is
stable.
This runs in O(n^4) time, where n is the combination of
bookings and occasions. So this is a testing tool, not something to
run in production.
"""
for attendee in attendees:
for booking in attendee.accepted:
for occasion in occasions:
# the booking was actually accepted, skip
if booking in occasion.bookings:
continue
# if the current occasion prefers the given booking..
over = occasion.preferred(booking)
if over:
for o in occasions:
if o == occasion:
continue
# ..and another occasion prefers the loser..
switch = o.preferred(over)
# .. we have an unstable matching
if switch and occasion.preferred(switch):
return False
return True
33.814249
78
0.636316
88171ad87dc7b1072f2705b938c97bec9545041c
10,749
py
Python
AustinBot/all_cogs/boardgame.py
austinmh12/DiscordBots
55550b68a7ad6423de55e62dbbff93fd88f08ff2
[
"MIT"
]
null
null
null
AustinBot/all_cogs/boardgame.py
austinmh12/DiscordBots
55550b68a7ad6423de55e62dbbff93fd88f08ff2
[
"MIT"
]
null
null
null
AustinBot/all_cogs/boardgame.py
austinmh12/DiscordBots
55550b68a7ad6423de55e62dbbff93fd88f08ff2
[
"MIT"
]
null
null
null
from . import log, BASE_PATH, Page, MyCog, chunk
from discord import File
from discord.ext import commands, tasks
import asyncio
from PIL import Image, ImageDraw, ImageFont
from random import randint
import typing
from . import boardgameFunctions as BGF
from .boardgameFunctions import yahtzee
# Version
version = '1.0.0'
# Constants
# Functions
# Classes
class BoardGameCog(MyCog):
available_games = ['yahtzee']
def __init__(self, bot):
super().__init__(bot)
self.yahtzee_game = None
self.iniatited_games = {
'yahtzee': {'owner': None, 'players': []}
}
# Functions
def initiate_game(self, game, user_id):
self.iniatited_games[game]['owner'] = user_id
self.iniatited_games[game]['players'].append(user_id)
def add_player(self, game, user_id):
self.iniatited_games[game]['players'].append(user_id)
def remove_player(self, game, user_id):
self.iniatited_games[game]['players'].pop(self.iniatited_games[game]['players'].index(user_id))
if user_id == self.iniatited_games[game]['owner']:
if self.iniatited_games[game]['players']:
self.iniatited_games[game]['owner'] = self.iniatited_games[game]['players'][0]
else:
self.iniatited_games[game]['owner'] = None
# Commands
@commands.command(name='games',
pass_context=True,
description='View all the available board games',
brief='View board games')
async def games(self, ctx):
desc = 'Welcome to the Board Game Plaza! Here you can view all the available\n'
desc += 'board games. To initiate, or join, a board game, use **.<game name>**\n'
desc += 'Once all the players who want to play have joined, the **owner** of\n'
desc += 'the game instance can start the game with **.<game name> start**\n\n'
for game in __class__.available_games:
desc += f'***{game}***\n'
return await self.paginated_embeds(ctx, Page('Board Games Plaza', desc))
###########
# Yahtzee #
###########
@commands.group(name='yahtzee',
pass_context=True,
invoke_without_command=True,
description='Initiate a game of Yahtzee',
brief='Yahtzee',
aliases=['y'])
async def yahtzee_main(self, ctx):
if self.yahtzee_game:
return await ctx.send('A game of Yahtzee is already ongoing.')
yahtzee_info = self.iniatited_games['yahtzee']
if not yahtzee_info['owner']:
self.initiate_game('yahtzee', ctx.author.id)
return await ctx.send(f'A game of Yahtzee has been initiated by <@{ctx.author.id}>')
if ctx.author.id in yahtzee_info['players']:
self.remove_player('yahtzee', ctx.author.id)
await ctx.send('You have left the game.')
if not yahtzee_info['owner']:
return await ctx.send('The game of Yahtzee has been canceled.')
self.add_player('yahtzee', ctx.author.id)
return await ctx.send('You have joined the game of Yahtzee')
@yahtzee_main.command(name='start',
pass_context=True,
description='Start a game that you initiated',
brief='Starts the game')
async def yahtzee_start(self, ctx):
if self.yahtzee_game:
return await ctx.send('A game of Yahtzee is already ongoing.')
yahtzee_info = self.iniatited_games['yahtzee']
if yahtzee_info['owner'] == ctx.author.id:
self.yahtzee_game = yahtzee.YahtzeeGame(yahtzee_info['players'])
return await ctx.send('The game of Yahtzee has started')
return await ctx.send('You didn\'t initiate a game of Yahtzee.')
@yahtzee_main.command(name='end',
pass_context=True,
description='End a game that you initiated',
brief='Ends the game')
async def yahtzee_end(self, ctx):
yahtzee_info = self.iniatited_games['yahtzee']
if yahtzee_info['owner'] == ctx.author.id:
self.yahtzee_game = None
self.iniatited_games['yahtzee'] = {'owner': None, 'players': []}
return await ctx.send('The game of Yahtzee has been ended')
return await ctx.send('You didn\'t initiate this game.')
@yahtzee_main.command(name='roll',
pass_context=True,
description='Rolls the yahtzee dice',
brief='Rolls yahtzee dice')
async def yahtzee_roll(self, ctx):
if not self.yahtzee_game:
return await ctx.send('There is no Yahtzee game ongoing.')
if ctx.author.id != self.yahtzee_game.current_player.id:
return await ctx.send('It is not your turn.')
if self.yahtzee_game.current_player.remaining_rolls == 0:
return await ctx.send('You have no rolls left, use **.yahtzee score <category>**')
dice_str = f'{5 - len(self.yahtzee_game.current_player.held_dice)}d6'
roll_results = BGF.roll_dice(dice_str)
self.yahtzee_game.current_player.last_roll = roll_results
self.yahtzee_game.current_player.remaining_rolls -= 1
self.yahtzee_game.current_player.held_this_turn = False
return await ctx.send(f'You rolled:\n{" ".join([str(r) for r in roll_results])}')
@yahtzee_main.command(name='hold',
pass_context=True,
description='Holds yahtzee dice',
brief='Holds yahtzee dice')
async def yahtzee_hold(self, ctx, *positions):
if not self.yahtzee_game:
return await ctx.send('There is no Yahtzee game ongoing.')
if ctx.author.id != self.yahtzee_game.current_player.id:
return await ctx.send('It is not your turn.')
if self.yahtzee_game.current_player.held_this_turn:
return await ctx.send('You held dice this turn already, roll again with **.yahtzee roll**')
if not positions:
msg = f'Your last roll was {" ".join([str(r) for r in self.yahtzee_game.current_player.last_roll])}\n'
msg += f'Your current held dice are {" ".join([str(r) for r in self.yahtzee_game.current_player.held_dice])}'
return await ctx.send(msg)
positions = list(positions)
positions.sort(reverse=True)
for position in positions:
if int(position) == 0:
break
self.yahtzee_game.current_player.held_dice.append(self.yahtzee_game.current_player.last_roll[int(position) - 1])
self.yahtzee_game.current_player.last_roll.pop(int(position) - 1)
self.yahtzee_game.current_player.held_this_turn = True
return await ctx.send(f'You hold {" ".join([str(r) for r in self.yahtzee_game.current_player.held_dice])}')
@yahtzee_main.command(name='score',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score(self, ctx, category: typing.Optional[str] = ''):
if not self.yahtzee_game:
return await ctx.send('There is no Yahtzee game ongoing.')
if ctx.author.id != self.yahtzee_game.current_player.id:
return await ctx.send('It is not your turn.')
if category not in yahtzee.top_categories and category not in yahtzee.bottom_categories:
f = self.yahtzee_game.current_player.get_board()
cats = ' '.join([f'***{c}***' for c in self.yahtzee_game.current_player.unscored_categories])
await ctx.send(f'These are the categories that you haven\'t used\n{cats}', file=f)
return f.close()
self.yahtzee_game.current_player.calculate_score(category)
f = self.yahtzee_game.current_player.get_board()
await ctx.send('Here is your score card', file=f)
f.close()
self.yahtzee_game.next_player()
if self.yahtzee_game.game_done:
return await ctx.send(f'Game over! <@{self.yahtzee_game.winner.id}> wins!')
return await ctx.send(f'It\'s now <@{self.yahtzee_game.current_player.id}>\'s turn!')
@yahtzee_main.command(name='1s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_1s(self, ctx):
return await self.yahtzee_score(ctx, '1s')
@yahtzee_main.command(name='2s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_2s(self, ctx):
return await self.yahtzee_score(ctx, '2s')
@yahtzee_main.command(name='3s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_3s(self, ctx):
return await self.yahtzee_score(ctx, '3s')
@yahtzee_main.command(name='4s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_4s(self, ctx):
return await self.yahtzee_score(ctx, '4s')
@yahtzee_main.command(name='5s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_5s(self, ctx):
return await self.yahtzee_score(ctx, '5s')
@yahtzee_main.command(name='6s',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_6s(self, ctx):
return await self.yahtzee_score(ctx, '6s')
@yahtzee_main.command(name='3kind',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_3kind(self, ctx):
return await self.yahtzee_score(ctx, '3kind')
@yahtzee_main.command(name='4kind',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_4kind(self, ctx):
return await self.yahtzee_score(ctx, '4kind')
@yahtzee_main.command(name='fullhouse',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_fullhouse(self, ctx):
return await self.yahtzee_score(ctx, 'fullhouse')
@yahtzee_main.command(name='small',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_small(self, ctx):
return await self.yahtzee_score(ctx, 'small')
@yahtzee_main.command(name='large',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_large(self, ctx):
return await self.yahtzee_score(ctx, 'large')
@yahtzee_main.command(name='yahtzee',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_yahtzee(self, ctx):
return await self.yahtzee_score(ctx, 'yahtzee')
@yahtzee_main.command(name='chance',
pass_context=True,
description='Calculates the score for the category chosen using your held dice.',
brief='Scores your held dice')
async def yahtzee_score_chance(self, ctx):
return await self.yahtzee_score(ctx, 'chance')
from typing import List, Optional
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.base_task import BaseTask
from app.schemas.base_task import BaseTaskUpdate, BaseTaskCreate
class CRUDBaseTask(CRUDBase[BaseTask, BaseTaskCreate, BaseTaskUpdate]):
def get_multi_by_task_group(
self, db: Session, *, task_group_id: int
) -> List[BaseTask]:
"""
Returns all BaseTasks to the given TaskGroup.
:param db: DB-Session
:param task_group_id: id of the TaskGroup
:return: All found TaskGroups
"""
return (
db.query(self.model).filter(BaseTask.task_group_id == task_group_id).all()
)
def get_multi_with_no_task_group(
self, db: Session, *, course_id: int
) -> List[BaseTask]:
"""
Returns all BaseTasks without a TaskGroup to the given Course.
:param db: DB-Session
:param course_id: id of the Course
:return: All found Courses
"""
return (
db.query(self.model)
.filter(BaseTask.course_id == course_id)
.filter(BaseTask.task_group_id.is_(None))
.all()
)
def get_by_short_name(self, db: Session, *, short_name: str) -> BaseTask:
"""
Returns the TaskGroup to the Shortname.
:param db: DB-Session
:param short_name: Shortname of the TaskGroup
:return: The found TaskGroup
"""
return db.query(self.model).filter(BaseTask.short_name == short_name).first()
def create_with_slide_id(self, db: Session, *, task_in: BaseTaskCreate) -> BaseTask:
"""
Creates a new TaskGroup.
:param db: DB-Session
:param task_in: contains all information to create a new BaseTask
:return: the created BaseTask
"""
db_obj = BaseTask()
db_obj.task_group_id = task_in.task_group_id
db_obj.slide_id = task_in.slide_id
db_obj.name = task_in.name
db.add(db_obj)
db.commit()
db_obj = db.refresh(db_obj)
return db_obj
def get_by_name(
self, db: Session, name: str, task_group_id: int
) -> Optional[BaseTask]:
"""
Returns the BaseTask with the given name to a TaskGroup
:param db: DB-Session
:param name: Name of the BaseTask
:param task_group_id: Id of the TaskGroup
:return: the found BaseTask
"""
return (
db.query(self.model)
.filter(BaseTask.name == name)
.filter(BaseTask.task_group_id == task_group_id)
.first()
)
def base_task_uses_slide(self, db: Session, slide_id: str) -> bool:
"""
Checks if a base task uses the given slide
:param db: DB-Session
:param slide_id: ID of the Slide
:return: If the slide is used by any base task
"""
return (
db.query(self.model).filter(BaseTask.slide_id == slide_id).first()
is not None
)
crud_base_task = CRUDBaseTask(BaseTask)
31.382353
89
0.587004
14fac4dfc6d2a571f44cfcd35f51402f4d2edf5f
722
py
Python
ANN/past_works/trapezoidal.py
joao-frohlich/BCC
9ed74eb6d921d1280f48680677a2140c5383368d
[
"Apache-2.0"
]
10
2020-12-08T20:18:15.000Z
2021-06-07T20:00:07.000Z
ANN/past_works/trapezoidal.py
joao-frohlich/BCC
9ed74eb6d921d1280f48680677a2140c5383368d
[
"Apache-2.0"
]
2
2021-06-28T03:42:13.000Z
2021-06-28T16:53:13.000Z
ANN/past_works/trapezoidal.py
joao-frohlich/BCC
9ed74eb6d921d1280f48680677a2140c5383368d
[
"Apache-2.0"
]
2
2021-01-14T19:59:20.000Z
2021-06-15T11:53:21.000Z
from math import tan
"""
Funcionamento
O algoritmo implementado funciona da seguinte maneira
É necessário definir a função, os intervalos inferioes e superiores e por fim
quantidade de iterações
a = limite inferior
b = limite superior
n = iterações
f(x) = função
"""
def calcula_integral(f, a, b, n):
h = (b - a) / n
s = (f(a) + f(b)) / 2
for i in range(1, n):
s += f(a + i * h)
return h * s
def f(x):
res = x + tan(tan(abs(1 / 4 * x - 7 / 4))) # alterar funcao
return res
a = 3
b = 11
n = 19
resultado = calcula_integral(f, a, b, n)
print("Integral de f(x) = FUNCAO_AQUI, de %d até %d" % (a, b))
print("I ~ %.2f" % resultado)
print("%d Iterações" % n)
20.055556
81
0.583102
092e558882814fb270ae1b63b10c91a2f1f27c7c
10,513
py
Python
generate_plots.py
silberzwiebel/klimawatch
fbb5b98e70080581c40821aa8f112c041c853bad
[
"Apache-2.0"
]
null
null
null
generate_plots.py
silberzwiebel/klimawatch
fbb5b98e70080581c40821aa8f112c041c853bad
[
"Apache-2.0"
]
null
null
null
generate_plots.py
silberzwiebel/klimawatch
fbb5b98e70080581c40821aa8f112c041c853bad
[
"Apache-2.0"
]
1
2020-02-07T09:21:59.000Z
2020-02-07T09:21:59.000Z
# plots
import plotly.graph_objects as go
# make it easier with numeric values
import pandas
import numpy as np
# for computing the trend
from scipy.stats import linregress
# reading command line arguments
import sys
# writing json
import json
# wrapping long lines
import textwrap
# possibility to delete files
import os
# read data
if(len(sys.argv) == 1):
print("No city given, plotting data for Münster ('data/muenster.csv')")
city = "muenster"
df = pandas.read_csv("data/muenster.csv")
else:
print("Plotting data for", sys.argv[1])
city = sys.argv[1]
try:
df = pandas.read_csv("data/" + city + ".csv")
except:
print("File not found. Does the file data/", city + ".csv", "exist?")
exit();
# create plot
fig = go.Figure()
emission_1990 = {}
# compute category-wise percentage (compared to 1990)
for cat in set(df.category):
if(cat != "Einwohner"):
emission_1990[str(cat)] = float(df[(df.year == 1990) & (df.category == cat) & (df.type == "real")].value)
df.loc[df.category == cat, 'percentage'] = df[df.category == cat].value.astype(float) / emission_1990[str(cat)]
# set() only lists unique values
# this loop plots all categories present in the csv, if type is either "real" or "geplant"
for cat in set(df.category):
subdf = df[(df.category == cat) & (df.type != "Einwohner")]
subdf_real = subdf[subdf.type == "real"]
fig.add_trace(go.Scatter(x = subdf_real.year, y = subdf_real.value,
name = cat + ", real", mode = "lines+markers",
legendgroup = cat,
text = subdf_real.percentage,
hovertemplate =
"<b>tatsächliche</b> Emissionen, Kategorie: " + cat +
"<br>Jahr: %{x}<br>" +
"CO<sub>2</sub>-Emissionen (tausend Tonnen): %{y:.1f}<br>" +
"Prozent von Emissionen 1990: " + "%{text:.0%}" +
"<extra></extra>") # no additional legend text in tooltip
)
subdf_planned = subdf[subdf.type == "geplant"]
fig.add_trace(go.Scatter(x = subdf_planned.year, y = subdf_planned.value, name = cat + ", geplant",
mode = "lines+markers", line = dict(dash = "dash"),
legendgroup = cat,
text = subdf_planned.percentage,
hovertemplate =
"<b>geplante</b> Emissionen, Kategorie: " + cat +
"<br>Jahr: %{x}<br>" +
"CO<sub>2</sub>-Emissionen (tausend Tonnen): %{y:.1f}<br>" +
"Prozent von Emissionen 1990: " + "%{text:.0%}" +
"<extra></extra>") # no additional legend text in tooltip
)
# compute trend based on current data
subdf = df[df.category == "Gesamt"]
subdf_real = subdf[subdf.type == "real"]
# variables to write to JSON later on
years_past_total_real = list(subdf_real.year)
values_past_total_real = list(subdf_real.value)
slope, intercept, r, p, stderr = linregress(subdf_real.year, subdf_real.value)
# print info about trend
print("linearer Trend: Steigung: ", slope, "Y-Achsenabschnitt: ", intercept, "R^2: ", r)
# plot trend
fig.add_trace(go.Scatter(x = subdf.year, y = slope * subdf.year + intercept, name = "Trend",
mode = "lines", line = dict(dash = "dot"),
legendgroup = "future",
text = (slope * subdf.year + intercept) / emission_1990["Gesamt"],
hovertemplate =
"<b>bisheriger Trend</b>" +
"<br>Jahr: %{x}<br>" +
"CO<sub>2</sub>-Emissionen (tausend Tonnen): %{y:.1f}<br>" +
"Prozent von Emissionen 1990: " + "%{text:.0%}" +
"<extra></extra>") # no additional legend text in tooltip
)
# compute remaining paris budget
last_emissions = np.array(df[df.note == "last_emissions"].value)
# see https://scilogs.spektrum.de/klimalounge/wie-viel-co2-kann-deutschland-noch-ausstossen/
paris_budget_germany_2019 = 7300000
inhabitants_germany = 83019213
paris_budget_per_capita_2019 = paris_budget_germany_2019 / inhabitants_germany
paris_budget_full_city_2019 = paris_budget_per_capita_2019 * np.array(df[df.type == "Einwohner"].value)
# substract individual CO2 use; roughly 40%, see https://uba.co2-rechner.de/
paris_budget_wo_individual_city_2019 = paris_budget_full_city_2019 * 0.6
# substract already emitted CO2 from 2019 onwards; assume last measured budget is 2019 emission
paris_budget_wo_individual_city_2020 = paris_budget_wo_individual_city_2019 - last_emissions
# compute slope for linear reduction of paris budget
paris_slope = (-pow(last_emissions, 2)) / (2 * paris_budget_wo_individual_city_2020)
years_to_climate_neutral = - last_emissions / paris_slope
full_years_to_climate_neutral = int(np.round(years_to_climate_neutral))
# plot paris line
future = list(range(0, full_years_to_climate_neutral, 1)) # from 2020 to 2050
future.append(float(years_to_climate_neutral))
# TODO: make df instead of (double) calculation inline?
fig.add_trace(go.Scatter(x = np.array(future) + 2020, y = paris_slope * np.array(future) + last_emissions,
name = "Paris berechnet",
mode = "lines+markers", line = dict(dash = "dash"),
legendgroup = "future",
text = (paris_slope * np.array(future) + last_emissions) / emission_1990["Gesamt"],
hovertemplate =
"<b>Paris-Budget</b>" +
"<br>Jahr: %{x:.0f}<br>" +
"CO<sub>2</sub>-Emissionen (tausend Tonnen): %{y:.1f}<br>" +
"Prozent von Gesamt-Emissionen 1990: " + "%{text:.0%}" +
"<extra></extra>") # no additional legend text in tooltip
)
fig.add_trace(go.Scatter(
x = [2020],
y = [emission_1990["Gesamt"] + (emission_1990["Gesamt"] / 30)],
mode = "text",
text = "heute",
hoverinfo="none",
showlegend = False)
)
# horizontal legend; vertical line at 2020
fig.update_layout(
title = "Realität und Ziele",
yaxis_title = "CO<sub>2</sub> in tausend Tonnen",
xaxis_title = "Jahr",
# horizontal legend
legend_orientation = "h",
# put legend above plot to avoid overlapping-bug
legend_xanchor = "center",
legend_y = -0.25,
legend_x = 0.5,
legend_font_size = 10,
# disable dragmode for better mobile experience
dragmode = False,
# German number separators
separators = ",.",
# vertical "today" line
shapes = [
go.layout.Shape(
type = "line",
x0 = 2020,
y0 = 0,
x1 = 2020,
y1 = emission_1990["Gesamt"],
)]
)
# write plot to file
fig.write_html("hugo/layouts/shortcodes/paris_" + city + ".html", include_plotlyjs = False,
config = {'displayModeBar': False}, full_html = False, auto_open = True)
# write computed Paris budget to JSON file for you-draw-it
paris_data = { }
paris_data['chart_id'] = 'you-draw-it'
paris_data['chart'] = {
'heading': 'Wie sollte sich der CO2-Ausstoß entwickeln?',
'lastPointShownAt': 2020,
'y_unit': 't. T.',
'data': [] }
# past data
past = range(1990, 2020, 5)
for y in past:
try:
yidx = years_past_total_real.index(y)
paris_data["chart"]["data"].append({
y: values_past_total_real[yidx]
})
except ValueError:
print("You-draw-it-chart: Emissions for", y, "unknown. Estimating from the trend.")
paris_data["chart"]["data"].append({
y: slope * y + intercept
})
# years with remaining budget
paris_years = list(np.array(future[:-1]) + 2020)
budget_per_year = list(paris_slope * np.array(future[:-1]) + last_emissions)
for y in range(len(paris_years)):
if y % 5 == 0: # print only every 5th year
paris_data["chart"]["data"].append({
int(paris_years[y]): budget_per_year[y]
})
climate_neutral_by = int(np.round(max(paris_years)))
# range every climate-neutral year, because
# we don't know the climate-neutral year and can't do 5-year steps
years_after_budget = range(climate_neutral_by + 1, 2051, 1)
for y in years_after_budget:
if y % 5 == 0: # print only every 5th year
paris_data["chart"]["data"].append({
y: 0
})
with open("hugo/data/you_draw_it_" + city + ".json", "w", encoding='utf8') as outfile:
json.dump(paris_data, outfile, indent = 2, ensure_ascii=False)
## visualisation of status of modules of Klimaschutzkonzepte
try:
modules_df = pandas.read_csv("data/" + city + "_sachstand.csv")
except:
print("Sachstand file for " + city + " (data/" + city + "_sachstand.csv) not found. Not creating module plot.")
exit();
# find unique overarching categories (here: first character of ID)
categories = set()
for c in modules_df["id"]:
categories.add(c[0:1])
## create a single treemap plot for every overarching category
# delete old plot file
os.remove("hugo/layouts/shortcodes/modules_" + city + ".html")
modules_plot_file = open("hugo/layouts/shortcodes/modules_" + city + ".html", "a")
for cat in categories:
modules_onecat = modules_df[modules_df.id.str.startswith(cat)]
fig_modules = go.Figure(go.Treemap(
branchvalues = "remainder",
ids = modules_onecat["id"],
labels = "<b>" + modules_onecat["title"] + "</b> (" + modules_onecat["id"] + ")",
parents = modules_onecat["parent"],
values = modules_onecat["priority"],
marker_colors = modules_onecat["assessment"],
text = (modules_onecat["text"]).apply(lambda txt: '<br>'.join(textwrap.wrap(txt, width = 100))),
textinfo = "label+text",
hovertext = (modules_onecat["text"] + " (" + modules_onecat["id"] + ")"
"<br>Priorität: " + (modules_onecat["priority"]).astype(str) +
"<br>Potential: " + (modules_onecat["potential"]).astype(str)).apply(lambda txt: '<br>'.join(textwrap.wrap(txt, width = 100))),
hoverinfo = "text",
pathbar = {"visible": True},
insidetextfont = {"size": 75}
)
)
fig_modules.update_layout(
margin = dict(r=10, l=10)
# ~ height = 750
)
modules_plot_file.write(fig_modules.to_html(include_plotlyjs = False,
config={'displayModeBar': False}, full_html = False))
modules_plot_file.close()
37.546429
139
0.61267
117a2a307d601c9f7de185e43945911010ddb280
1,573
py
Python
test/test_cashflow.py
scuervo91/dcapy
46c9277e607baff437e5707167476d5f7e2cf80c
[
"MIT"
]
4
2021-05-21T13:26:10.000Z
2021-11-15T17:17:01.000Z
test/test_cashflow.py
scuervo91/dcapy
46c9277e607baff437e5707167476d5f7e2cf80c
[
"MIT"
]
null
null
null
test/test_cashflow.py
scuervo91/dcapy
46c9277e607baff437e5707167476d5f7e2cf80c
[
"MIT"
]
null
null
null
import unittest
import numpy as np
from datetime import date
from pandas.testing import assert_frame_equal
import pandas as pd
from dcapy.cashflow import CashFlow, CashFlowModel
class TestCashFlow(unittest.TestCase):
def test_npv(self):
oil_sell = CashFlow(
name = 'oil_sell',
const_value= [10000,5000,8000,12000,30000],
start = date(2021,1,1),
end = date(2021,5,1),
freq_input = 'M'
)
oil_capex = CashFlow(
name = 'oil_capex',
const_value= [-50000],
start = date(2021,1,1),
end = date(2021,1,1),
freq_input = 'M'
)
cm = CashFlowModel(
name = 'Example Cashflow Model',
income=[oil_sell],
capex=[oil_capex]
)
assert_frame_equal(cm.npv(0.08), pd.DataFrame({'npv':3065.22267}, index=[0.08]))
def test_irr(self):
oil_sell = CashFlow(
name = 'oil_sell',
const_value= [40,39,59,55,20],
start = date(2021,1,1),
end = date(2021,5,1),
freq_input = 'M'
)
oil_capex = CashFlow(
name = 'oil_capex',
const_value= [-140],
start = date(2021,1,1),
end = date(2021,1,1),
freq_input = 'M'
)
cm = CashFlowModel(
name = 'Example Cashflow Model',
income=[oil_sell],
capex=[oil_capex]
)
print(cm.irr())
assert 0.28095 == round(cm.irr(),5)
29.12963
89
0.506039
11812f21c9c596dffd83d94ca429f45ff1c17050
92
py
Python
python/python_backup/PRAC_PYTHON/5_for.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[
"MIT"
]
16
2018-11-26T08:39:42.000Z
2019-05-08T10:09:52.000Z
python/python_backup/PRAC_PYTHON/5_for.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[
"MIT"
]
8
2020-05-04T06:29:26.000Z
2022-02-12T05:33:16.000Z
python/python_backup/PRAC_PYTHON/5_for.py
SayanGhoshBDA/code-backup
8b6135facc0e598e9686b2e8eb2d69dd68198b80
[
"MIT"
]
5
2020-02-11T16:02:21.000Z
2021-02-05T07:48:30.000Z
words=["Jimut","Python","c"]
for w in words: #for just simple for loop
print(w,len(w))
23
42
0.630435
0101f564eff3c78e8c68b76331f82e617d38803b
820
py
Python
longest-common-prefix/longest-common-prefix.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[
"MIT"
]
2
2021-12-05T14:29:06.000Z
2022-01-01T05:46:13.000Z
longest-common-prefix/longest-common-prefix.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[
"MIT"
]
null
null
null
longest-common-prefix/longest-common-prefix.py
hyeseonko/LeetCode
48dfc93f1638e13041d8ce1420517a886abbdc77
[
"MIT"
]
null
null
null
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs)==1:
return strs[0]
minlen = min([len(each) for each in strs])
result=""
for i in range(minlen):
basis = strs[-1][i]
for j in range(len(strs)-1):
if basis!=strs[j][i]:
return result
result+=basis
return result
# case: long common string (not prefix)
# result=""
# for s1 in strs[0]:
# nolook=False
# for s2 in strs[1:]:
# if result+s1 not in s2:
# nolook=True
# break
# print(s1, s2, result, nolook)
# if nolook==False:
# result+=s1
# return result
31.538462
58
0.439024
6df7257d5375f69e3ed61983628b9ca3d0df1d9f
2,607
py
Python
official/cv/srcnn/src/dataset.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
2
2021-07-08T13:10:42.000Z
2021-11-08T02:48:57.000Z
official/cv/srcnn/src/dataset.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
null
null
null
official/cv/srcnn/src/dataset.py
leelige/mindspore
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
[
"Apache-2.0"
]
2
2019-09-01T06:17:04.000Z
2019-10-04T08:39:45.000Z
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import glob
import numpy as np
import PIL.Image as pil_image
import mindspore.dataset as ds
from src.config import srcnn_cfg as config
from src.utils import convert_rgb_to_y
class EvalDataset:
def __init__(self, images_dir):
self.images_dir = images_dir
scale = config.scale
self.lr_group = []
self.hr_group = []
for image_path in sorted(glob.glob('{}/*'.format(images_dir))):
hr = pil_image.open(image_path).convert('RGB')
hr_width = (hr.width // scale) * scale
hr_height = (hr.height // scale) * scale
hr = hr.resize((hr_width, hr_height), resample=pil_image.BICUBIC)
lr = hr.resize((hr_width // scale, hr_height // scale), resample=pil_image.BICUBIC)
lr = lr.resize((lr.width * scale, lr.height * scale), resample=pil_image.BICUBIC)
hr = np.array(hr).astype(np.float32)
lr = np.array(lr).astype(np.float32)
hr = convert_rgb_to_y(hr)
lr = convert_rgb_to_y(lr)
self.lr_group.append(lr)
self.hr_group.append(hr)
def __len__(self):
return len(self.lr_group)
def __getitem__(self, idx):
return np.expand_dims(self.lr_group[idx] / 255., 0), np.expand_dims(self.hr_group[idx] / 255., 0)
def create_train_dataset(mindrecord_file, batch_size=1, shard_id=0, num_shard=1, num_parallel_workers=4):
data_set = ds.MindDataset(mindrecord_file, columns_list=["lr", "hr"], num_shards=num_shard,
shard_id=shard_id, num_parallel_workers=num_parallel_workers, shuffle=True)
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set
def create_eval_dataset(images_dir, batch_size=1):
dataset = EvalDataset(images_dir)
data_set = ds.GeneratorDataset(dataset, ["lr", "hr"], shuffle=False)
data_set = data_set.batch(batch_size, drop_remainder=True)
return data_set
41.380952
105
0.666283
0994508b38aca6fd6cbebc7f68d9e6a357639caa
424
py
Python
pyScript/custom_src/GlobalAccess.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[
"MIT"
]
null
null
null
pyScript/custom_src/GlobalAccess.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[
"MIT"
]
null
null
null
pyScript/custom_src/GlobalAccess.py
Shirazbello/Pyscriptining
0f2c80a9bb10477d65966faeccc7783f20385c1b
[
"MIT"
]
null
null
null
class GlobalStorage:
storage = {'design style': 'dark std',
'debugging': False}
def debug(*args):
s = ''
for arg in args:
s += ' '+str(arg)
if GlobalStorage.storage['debugging']:
print(' --> DEBUG:', s)
# yyep, that's it....
# you must be kidding...
# you MUST be
# it's actually true....
# that's ridiculous.
# indeed.
23.555556
46
0.478774
09f6606add82e1f791fea814ebb69a0d1607b4b5
6,412
py
Python
src/visitpy/visit_utils/tests/test_encoding.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[
"BSD-3-Clause"
]
226
2018-12-29T01:13:49.000Z
2022-03-30T19:16:31.000Z
src/visitpy/visit_utils/tests/test_encoding.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[
"BSD-3-Clause"
]
5,100
2019-01-14T18:19:25.000Z
2022-03-31T23:08:36.000Z
src/visitpy/visit_utils/tests/test_encoding.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[
"BSD-3-Clause"
]
84
2019-01-24T17:41:50.000Z
2022-03-10T10:01:46.000Z
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: test_encoding.py
author: Cyrus Harrison ([email protected])
created: 4/09/2010
description:
Unit tests for movie encoding helpers.
"""
import unittest
import os
import sys
import glob
from visit_utils import encoding
from visit_utils.common import VisItException
from os.path import join as pjoin
iframes_dir = pjoin(os.path.split(__file__)[0],"_data")
iframes = pjoin(iframes_dir,"wave.movie.%04d.png")
iframes_short_a = pjoin(iframes_dir,"wave.movie.%03d.png")
iframes_short_b = pjoin(iframes_dir,"wave.movie.%d.png")
iframes_stereo = pjoin(iframes_dir,"wave.movie.stereo.%04d.png")
output_dir = pjoin(os.path.split(__file__)[0],"_output")
def lst_slnks():
return glob.glob(pjoin(iframes_dir,"_encode.lnk.*"))
def clean_slnks():
slnks = lst_slnks()
for slnk in slnks:
os.remove(slnk)
def check_encoded_file(path):
if os.path.isfile(path):
# make sure the file isn't empty
st = os.stat(path)
return st.st_size > 0
return False
class TestEncoding(unittest.TestCase):
def setUp(self):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
def test_encoders(self):
encoders = encoding.encoders()
if len(encoders) > 0:
self.assertTrue("mpg" in encoders)
self.assertTrue("wmv" in encoders)
def test_ffmpeg_encoders(self):
for enc in ["wmv","mpg","divx","mov","swf","mp4","avi"]:
if enc in encoding.encoders():
ofile = pjoin(output_dir,"wave.movie.%s" % enc)
encoding.encode(iframes,ofile)
self.assertTrue(check_encoded_file(ofile))
ofile = pjoin(output_dir,"wave.movie.slow.%s" % enc)
encoding.encode(iframes,ofile,2)
self.assertTrue(check_encoded_file(ofile))
def test_sm(self):
if "sm" in encoding.encoders():
ofile = pjoin(output_dir,"wave.movie.sm")
encoding.encode(iframes,ofile)
self.assertTrue(check_encoded_file(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.slow.sm")
encoding.encode(iframes,ofile,2)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
def test_unsupported(self):
self.assertRaises(VisItException, encoding.encode, iframes,"wave.movie.bad_ext")
def test_sm_stereo(self):
if "sm" in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.stereo.sm")
encoding.encode(iframes_stereo,ofile,stereo=True)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.stereo.slow.sm")
encoding.encode(iframes_stereo,ofile,2,stereo=True)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
def test_stereo_uneven_frames_error(self):
self.assertRaises(VisItException, encoding.encode, iframes,
pjoin(output_dir,"wave.movie.stereo.bad.sm"),
stereo=True)
def test_extract(self):
if "mpg" in encoding.encoders():
eframes = pjoin(output_dir,"extract_out_%04d.png")
encoding.encode(iframes,pjoin(output_dir,"wave.movie.mpg"))
encoding.extract(pjoin(output_dir,"wave.movie.mpg"),eframes)
ofile = pjoin(output_dir,"wave.movie.extract.and.reencode.mpg")
encoding.encode(eframes,ofile)
self.assertTrue(check_encoded_file(ofile))
def test_pre_lr_stereo(self):
if "divx" in encoding.encoders():
iframes = pjoin(iframes_dir,"noise.stereo.left.right.1080p.%04d.png")
ofile = pjoin(output_dir,"noise.movie.stereo.pre.left.right.avi")
encoding.encode(iframes,ofile,etype="divx")
self.assertTrue(check_encoded_file(ofile))
def test_short_symlinks(self):
if "mpg" in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.test.seq.pattern.03d.mpg")
encoding.encode(iframes_short_a,ofile,3)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.test.seq.pattern.d.mpg")
encoding.encode(iframes_short_b,ofile,5)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
def test_ffmpeg_input_frame_rate(self):
for enc in ["wmv","mpg","divx","mov","swf","mp4"]:
if enc in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.input_frame_rate.%s" % enc)
encoding.encode(iframes,ofile,input_frame_rate=5)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
def test_ffmpeg_input_and_output_frame_rate(self):
for enc in ["wmv","mov"]:
if enc in encoding.encoders():
clean_slnks()
ofile = pjoin(output_dir,"wave.movie.input_and_output_frame_rate.%s" % enc)
encoding.encode(iframes,ofile,input_frame_rate=5,output_frame_rate=30)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile))
def test_ffmpeg_reencode_new_format(self):
encoders = encoding.encoders()
if "mpg" in encoders and "wmv" in encoders:
clean_slnks()
ofile_src = pjoin(output_dir,"wave.movie.reencode.src.mpg")
ofile_des = pjoin(output_dir,"wave.movie.reencode.src.wmv")
encoding.encode(iframes,ofile_src)
encoding.encode(ofile_src,ofile_des)
self.assertEqual(0,len(lst_slnks()))
self.assertTrue(check_encoded_file(ofile_src))
self.assertTrue(check_encoded_file(ofile_des))
if __name__ == '__main__':
unittest.main()
"""ExpanseAggregateAttributionUser
"""
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from typing import Dict, List, Any, Tuple, Optional
import traceback
''' STANDALONE FUNCTION '''
def deconstruct_entry(entry: Dict[str, str],
username_fields: List[str],
sightings_fields: List[str]) -> Tuple[Optional[str],
Optional[str],
Optional[int]]:
"""
deconstruct_entry
Extracts device relevant fields from a log entry.
:type entry: ``Dict[str, str]``
:param entry: Log entry as dictionary of fields.
:type sightings_fields: ``List[str]``
:param sightings_fields: List of possible field names in log entry to be considered as number of occurences.
:type username_fields: ``List[str]``
:param username_fields: List of possible field names in log entry to be considered as username.
:return: Tuple where the first element is the username or None, the second is the domain extracted from the
username field and the third element is the number of occurences of the event.
:rtype: ``Tuple[Optional[str], Optional[str], Optional[int]]``
"""
username = next((entry[field] for field in username_fields if field in entry), None)
sightings = next((int(entry[field]) for field in sightings_fields if field in entry), 1)
domain = None
if username is not None and "\\" in username:
domain, username = username.split("\\", 1)
return username, domain, sightings
''' COMMAND FUNCTION '''
def aggregate_command(args: Dict[str, Any]) -> CommandResults:
input_list = argToList(args.get('input', []))
current_list = argToList(args.get('current', []))
username_fields = argToList(args.get('username_fields', "source_user,srcuser,user"))
sightings_fields = argToList(args.get('sightings_fields', "count"))
current_users = {
f"{d['username']}::{d['domain']}": d
for d in current_list if d is not None
}
for entry in input_list:
if not isinstance(entry, dict):
continue
username, domain, sightings = deconstruct_entry(
entry,
username_fields=username_fields,
sightings_fields=sightings_fields
)
if username is None:
continue
if domain is None:
domain = ""
user_key = f"{username}::{domain}"
current_state = current_users.get(user_key, None)
if current_state is None:
current_state = {
'username': username,
'domain': domain,
'sightings': 0,
'groups': [],
'description': None,
}
current_users[user_key] = current_state
if sightings is not None:
current_state['sightings'] += sightings
markdown = '## ExpanseAggregateAttributionUser'
outputs = list(current_users.values())
return CommandResults(
readable_output=markdown,
outputs=outputs or None,
outputs_prefix="Expanse.AttributionUser",
outputs_key_field=["username", "domain"]
)
''' MAIN FUNCTION '''
def main():
try:
return_results(aggregate_command(demisto.args()))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute ExpanseAggregateAttributionUser. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
31.1
112
0.620579
a3a247716b05181eb2ad340dbb31ef8d5f76c0d0
5,288
py
Python
V1/utils/models.py
marsXyr/GESRL
3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01
[
"Apache-2.0"
]
null
null
null
V1/utils/models.py
marsXyr/GESRL
3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01
[
"Apache-2.0"
]
null
null
null
V1/utils/models.py
marsXyr/GESRL
3d60dfd4ffa1e0ae24d64b09f431d8ee0a9b5c01
[
"Apache-2.0"
]
null
null
null
import numpy as np
from copy import deepcopy
import torch
import torch.nn as nn
from torch.optim import Adam
import torch.nn.functional as F
#USE_CUDA = torch.cuda.is_available()
USE_CUDA = False
if USE_CUDA:
FloatTensor = torch.cuda.FloatTensor
else:
FloatTensor = torch.FloatTensor
def to_numpy(var):
return var.cpu().data.numpy()
def to_tensor(x):
return torch.FloatTensor(x)
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
class RLNN(nn.Module):
def __init__(self, state_dim, action_dim):
super(RLNN, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
def set_params(self, w):
for i, param in enumerate(self.parameters()):
param.data.copy_(torch.from_numpy(w).view(param.size()))
def get_params(self):
params = [to_numpy(v) for v in self.parameters()]
return deepcopy(params[0])
def get_grads(self):
grads = [to_numpy(v.grad) for v in self.parameters()]
return deepcopy(grads[0])
def get_size(self):
return self.get_params().shape[0]
def load_model(self, filename, net_name):
if filename is None:
return
self.load_state_dict(
torch.load('{}/{}.pkl'.format(filename, net_name), map_location=lambda storage, loc: storage))
def save_model(self, output, net_name):
torch.save(self.state_dict(), '{}/{}.pkl'.format(output, net_name))
class LinearPolicy(RLNN):
"""
Linear policy class that computes action as <w, ob>.
"""
def __init__(self, state_dim, action_dim, max_action, args):
super(LinearPolicy, self).__init__(state_dim, action_dim)
self.l1 = nn.Linear(self.state_dim, self.action_dim, bias=False)
self.optimizer = Adam(self.parameters(), lr=args.actor_lr)
self.tau = args.tau
# self.theta = args['theta']
self.max_action = max_action
if USE_CUDA:
self.cuda()
def forward(self, x):
out = self.l1(x)
# abs_out = torch.abs(out)
# abs_out_sum = torch.sum(abs_out).view(-1, 1)
# abs_out_mean = abs_out_sum / self.action_dim / self.theta
# ones = torch.ones(abs_out_mean.size())
# ones = ones.cuda()
# mod = torch.where(abs_out_mean >= 1, abs_out_mean, ones)
# out = out / mod
#
out = self.max_action * torch.tanh(out)
return out
def update(self, memory, batch_size, critic, policy_t):
# Sample replay buffer
states, _, _, _, _ = memory.sample(batch_size)
# Compute actor loss
policy_loss = -critic(states, self(states)).mean()
# Optimize the policy
self.optimizer.zero_grad()
policy_loss.backward()
grads = self.get_grads() # Get policy gradients
self.optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.parameters(), policy_t.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
return grads
class Critic(RLNN):
def __init__(self, state_dim, action_dim, args):
super(Critic, self).__init__(state_dim, action_dim)
self.l1 = nn.Linear(state_dim + action_dim, 64)
self.l2 = nn.Linear(64, 64)
self.l3 = nn.Linear(64, 1)
if args.layer_norm:
self.n1 = nn.LayerNorm(64)
self.n2 = nn.LayerNorm(64)
self.layer_norm = args.layer_norm
self.optimizer = Adam(self.parameters(), lr=args.critic_lr)
self.tau = args.tau
self.discount = args.discount
if USE_CUDA:
self.cuda()
def forward(self, x, u):
if not self.layer_norm:
x = F.leaky_relu(self.l1(torch.cat([x, u], 1)))
x = F.leaky_relu(self.l2(x))
x = self.l3(x)
else:
x = F.leaky_relu(self.n1(self.l1(torch.cat([x, u], 1))))
x = F.leaky_relu(self.n2(self.l2(x)))
x = self.l3(x)
return x
def update(self, memory, batch_size, policy, critic_t):
# Sample replay buffer
states, n_states, actions, rewards, dones = memory.sample(batch_size)
# Q target = reward + discount * Q(next_state, pi(next_state))
with torch.no_grad():
target_Q = critic_t(n_states, policy(n_states))
target_Q = rewards + (1 - dones) * self.discount * target_Q
# Get current Q estimate
current_Q = self.forward(states, actions)
# Compute critic loss
critic_loss = nn.MSELoss()(current_Q, target_Q)
# Optimize the critic
self.optimizer.zero_grad()
critic_loss.backward()
self.optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.parameters(), critic_t.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
from typing import List
from watchmen_model.dqc import MonitorRuleCode
disabled_rules: List[MonitorRuleCode] = [
MonitorRuleCode.RAW_MISMATCH_STRUCTURE, # ignored now
MonitorRuleCode.FACTOR_MISMATCH_DATE_TYPE, # should be detected on pipeline run
MonitorRuleCode.FACTOR_USE_CAST, # should be detected on pipeline run
MonitorRuleCode.FACTOR_BREAKS_MONOTONE_INCREASING, # ignored now
MonitorRuleCode.FACTOR_BREAKS_MONOTONE_DECREASING # ignored now
]
38.333333
81
0.83913
287189638a67b6c452ceb33fb318213a3f4059c3
1,049
py
Python
pyventskalender/tag15_loesung.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[
"MIT"
]
null
null
null
pyventskalender/tag15_loesung.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[
"MIT"
]
null
null
null
pyventskalender/tag15_loesung.py
kopp/pyventskalender
6f6455f3c1db07f65a772b2716e4be95fbcd1804
[
"MIT"
]
null
null
null
from typing import Set, List, Optional
try:
from pyventskalender.tag14_loesung import VERLOREN_BEI_SO_VIELEN_FEHLERN
except ImportError:
from tag14_loesung import VERLOREN_BEI_SO_VIELEN_FEHLERN
def ist_buchstabe(eingabe_von_nutzer: str) -> bool:
if len(eingabe_von_nutzer) != 1:
return False
return True
def ist_aufgeben(eingabe_von_nutzer: str) -> bool:
return eingabe_von_nutzer.lower() == "ich gebe auf"
def bewerte_geratenen_buchstaben(
buchstabe: str,
noch_gesuchte_buchstaben: Set[str],
falsch_geratene_buchstaben: List[str]
) -> Optional[bool]:
if buchstabe in noch_gesuchte_buchstaben:
noch_gesuchte_buchstaben.remove(buchstabe)
if len(noch_gesuchte_buchstaben) == 0:
return "gewonnen"
else:
return "richtig-geraten"
else:
falsch_geratene_buchstaben.append(buchstabe)
if len(falsch_geratene_buchstaben) >= VERLOREN_BEI_SO_VIELEN_FEHLERN:
return "verloren"
else:
return "falsch-geraten"
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import math
def solve_quadratic_simple():
for a in range(1, 100):
for b in range(1, 100):
for c in range(1, 100):
# if a ** 2 + b ** 2 == c ** 2:
if a * a + b * b == c * c:
print("a =", a, "/ b =", b, "/ c =", c)
def solve_quadratic():
for a in range(1, 100):
for b in range(1, 100):
c = int(math.sqrt(a * a + b * b))
if c < 100 and a * a + b * b == c * c:
print("a =", a, "/ b =", b, "/ c =", c)
def solve_quadratic_shorter():
return [(a, b, c) for a in range(1, 100) for b in range(1, 100)
for c in range(1, 100) if a * a + b * b == c * c]
def main():
solve_quadratic_simple()
solve_quadratic()
print(solve_quadratic_shorter())
if __name__ == "__main__":
main()
# Logic
# The required solution can be obtained by simply sorting the arrays. After sorting check if the arrays are exactly same or not.
# If the arrays are same, it's possible to obtain the desired configuration, otherwise it's impossible.
def organizingContainers(container):
rows = [sum(x) for x in container]
cols = [sum(y) for y in zip(*container)]
rows, cols = sorted(rows), sorted(cols)
if(all(x == y for x, y in zip(rows, cols))):
return "Possible"
else:
return "Impossible"
if __name__ == '__main__':
q = int(input())
for q_itr in range(q):
n = int(input())
container = []
for _ in range(n):
container.append(list(map(int, input().rstrip().split())))
result = organizingContainers(container)
print(result)
29.214286
128
0.633252
abb1a975a2d32cd0ef7a733a84328f95433e4943
1,078
py
Python
devices/device.py
MiaranaDIY/Salamigal
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
[
"MIT"
]
3
2017-08-02T12:26:34.000Z
2021-01-13T01:06:26.000Z
devices/device.py
MiaranaDIY/Salamigal
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
[
"MIT"
]
null
null
null
devices/device.py
MiaranaDIY/Salamigal
44ac98fa6463d46694e1f9343a0ebc788e7a88f8
[
"MIT"
]
3
2017-02-14T22:10:00.000Z
2021-01-02T14:26:43.000Z
import time
class Device:
#Global variable
instant_count = 0
def __init__(self):
#Increment instant counter
Device.instant_count += 1
#load watt for power usage calculation and device property
self.load_watt = 0
self.name = 'Device'
self.location = 'Location'
self.group = 'Group'
self.streaming = 0
self.state = 1
self.started_time = time.time()
#Set device load watt
def set_watt(self, lw = 0):
try:
self.load_watt = int(lw)
return lw
except Exception as err:
return None
pass
#Get device ON time to calculate power usage (Hours)
def get_ontime(self):
if(self.state):
return (int(time.time()) - int(self.started_time)) / 60 / 60
else:
return 0
#Calculate power usage in Wh
def get_usage(self):
try:
return self.get_ontime() * int(self.load_watt)
except Exception as err:
return None
pass
26.95
72
0.552876
abd3818cc13d4399585b2153d538159610366df5
2,213
py
Python
experimental/BibTeX/tst_BibTeX_grammar.py
jecki/DHParser
c6c1bd7db2de85b5997a3640242f4f444532304e
[
"Apache-2.0"
]
2
2020-12-25T19:37:42.000Z
2021-03-26T04:59:12.000Z
experimental/BibTeX/tst_BibTeX_grammar.py
jecki/DHParser
c6c1bd7db2de85b5997a3640242f4f444532304e
[
"Apache-2.0"
]
6
2018-08-07T22:48:52.000Z
2021-10-07T18:38:20.000Z
experimental/BibTeX/tst_BibTeX_grammar.py
jecki/DHParser
c6c1bd7db2de85b5997a3640242f4f444532304e
[
"Apache-2.0"
]
null
null
null
#!/usr/bin/env python3
"""tst_BibTeX_grammar.py - runs the unit tests for the BibTeX grammar
Author: Eckhart Arnold <[email protected]>
Copyright 2017 Bavarian Academy of Sciences and Humanities
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
scriptpath = os.path.dirname(__file__) or '.'
for path in (os.path.join('../../examples', '..'), '.'):
fullpath = os.path.abspath(os.path.join(scriptpath, path))
if fullpath not in sys.path:
sys.path.append(fullpath)
try:
from DHParser import configuration
import DHParser.dsl
from DHParser import testing
except ModuleNotFoundError:
print('Could not import DHParser. Please adjust sys.path in file '
'"%s" manually' % __file__)
sys.exit(1)
if __name__ == "__main__":
configuration.access_presets()
configuration.set_preset_value('test_parallelization', True)
configuration.finalize_presets()
if not DHParser.dsl.recompile_grammar('BibTeX.ebnf', force=False): # recompiles Grammar only if it has changed
print('\nErrors while recompiling "BibTeX.ebnf":\n--------------------------------------\n\n')
with open('BibTeX_ebnf_ERRORS.txt') as f:
print(f.read())
sys.exit(1)
sys.path.append('')
# must be appended after module creation, because otherwise an ImportError is raised under Windows
from BibTeXParser import get_grammar, get_transformer
error_report = testing.grammar_suite('test_grammar', get_grammar,
get_transformer, report='REPORT', verbose=True)
if error_report:
print('\n')
print(error_report)
sys.exit(1)
else:
print('\nSUCCESS! All tests passed :-)')
import demistomock as demisto
from CommonServerPython import *
from urllib.parse import quote, unquote
''' MAIN FUNCTION '''
def main(args):
value = args.get('value')
decoded_value = unquote(value)
return quote(decoded_value)
if __name__ in ('__main__', '__builtin__', 'builtins'):
try:
return_results(main(demisto.args()))
except Exception as exc:
return_error(str(exc), error=exc)