{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'PDF TO Markdown' && linkText !== 'PDF TO Markdown' ) { link.textContent = 'PDF TO Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Voice Cloning' ) { link.textContent = 'Voice Cloning'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'PDF TO Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); '.format(greeting))\n\n\nclass AdminPage(webapp2.RequestHandler):\n def get(self):\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin():\n self.response.write('You are an administrator.')\n else:\n self.response.write('You are not an administrator.')\n else:\n self.response.write('You are not logged in.')\n\n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/admin', AdminPage)\n], debug=True)\n\n# [END all]\n"},"path":{"kind":"string","value":"appengine/standard/users/main.py"},"size":{"kind":"number","value":1847,"string":"1,847"},"nl_text":{"kind":"string","value":"Sample Google App Engine application that demonstrates using the Users API\n\nFor more information about App Engine, see README.md under /appengine.\n\n Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [START all] [END all]"},"nl_size":{"kind":"number","value":719,"string":"719"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.840371310710907,"string":"0.840371"}}},{"rowIdx":564,"cells":{"content":{"kind":"string","value":"\"\"\"\nASGI config for avocadobites project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/howto/deployment/asgi/\n\"\"\"\n\nimport os\n\nfrom django.core.asgi import get_asgi_application\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings')\n\napplication = get_asgi_application()\n"},"path":{"kind":"string","value":"avocadobites/avocadobites/asgi.py"},"size":{"kind":"number","value":401,"string":"401"},"nl_text":{"kind":"string","value":"ASGI config for avocadobites project.\n\nIt exposes the ASGI callable as a module-level variable named ``application``.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/howto/deployment/asgi/"},"nl_size":{"kind":"number","value":218,"string":"218"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7236773371696472,"string":"0.723677"}}},{"rowIdx":565,"cells":{"content":{"kind":"string","value":"\"\"\"\n eZmax API Definition\n\n This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501\n\n The version of the OpenAPI document: 1.1.3\n Contact: support-api@ezmax.ca\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport sys\nimport unittest\n\nimport eZmaxApi\nfrom eZmaxApi.model.ezsignformfield_response import EzsignformfieldResponse\nglobals()['EzsignformfieldResponse'] = EzsignformfieldResponse\nfrom eZmaxApi.model.ezsignformfield_response_compound import EzsignformfieldResponseCompound\n\n\nclass TestEzsignformfieldResponseCompound(unittest.TestCase):\n \"\"\"EzsignformfieldResponseCompound unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testEzsignformfieldResponseCompound(self):\n \"\"\"Test EzsignformfieldResponseCompound\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = EzsignformfieldResponseCompound() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n"},"path":{"kind":"string","value":"test/test_ezsignformfield_response_compound.py"},"size":{"kind":"number","value":1047,"string":"1,047"},"nl_text":{"kind":"string","value":"EzsignformfieldResponseCompound unit test stubs\nTest EzsignformfieldResponseCompound\neZmax API Definition\n\nThis API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501\n\nThe version of the OpenAPI document: 1.1.3\nContact: support-api@ezmax.ca\nGenerated by: https://openapi-generator.tech\n\n FIXME: construct object with mandatory attributes with example values model = EzsignformfieldResponseCompound() noqa: E501"},"nl_size":{"kind":"number","value":446,"string":"446"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6850348711013794,"string":"0.685035"}}},{"rowIdx":566,"cells":{"content":{"kind":"string","value":"import os\nimport shutil\nimport tempfile\nfrom unittest import TestCase\n\nfrom mock import patch\n\nfrom regulations.apps import RegulationsConfig\n\n\nclass RegulationsConfigTests(TestCase):\n def setUp(self):\n self.tmpdir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmpdir)\n\n @patch('regulations.apps.get_app_template_dirs')\n def test_precompute_custom_templates(self, get_app_template_dirs):\n \"\"\"Verify that custom templates are found\"\"\"\n get_app_template_dirs.return_value = [self.tmpdir]\n open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close()\n open(os.path.join(self.tmpdir, 'other.html'), 'w').close()\n\n RegulationsConfig.precompute_custom_templates()\n self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'],\n 'regulations/custom_nodes/123-45-a.html')\n self.assertEqual(RegulationsConfig.custom_tpls['other'],\n 'regulations/custom_nodes/other.html')\n self.assertFalse('another' in RegulationsConfig.custom_tpls)\n"},"path":{"kind":"string","value":"regulations/tests/apps_tests.py"},"size":{"kind":"number","value":1070,"string":"1,070"},"nl_text":{"kind":"string","value":"Verify that custom templates are found"},"nl_size":{"kind":"number","value":38,"string":"38"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8672906756401062,"string":"0.867291"}}},{"rowIdx":567,"cells":{"content":{"kind":"string","value":"\"\"\"@package vc_updated\nFunctions to implement the updated Voce-Chaboche material model and measure its error.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom numdifftools import nd_algopy as nda\n\n\ndef uvc_return_mapping(x_sol, data, tol=1.0e-8, maximum_iterations=1000):\n \"\"\" Implements the time integration of the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the\n updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').\n \"\"\"\n\n if len(x_sol) < 8:\n raise RuntimeError(\"No backstresses or using original V-C params.\")\n n_param_per_back = 2\n n_basic_param = 6\n\n # Get material properties\n E = x_sol[0] * 1.0\n sy_0 = x_sol[1] * 1.0\n Q = x_sol[2] * 1.0\n b = x_sol[3] * 1.0\n D = x_sol[4] * 1.0\n a = x_sol[5] * 1.0\n\n # Set up backstresses\n n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)\n c_k = []\n gamma_k = []\n for i in range(0, n_backstresses):\n c_k.append(x_sol[n_basic_param + n_param_per_back * i])\n gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])\n\n # Initialize parameters\n alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components\n strain = 0.\n stress = 0.\n ep_eq = 0. # equivalent plastic strain\n\n error = 0. # error measure\n sum_abs_de = 0. # total strain\n stress_sim = 0.0\n stress_test = 0.0\n area_test = 0.0\n\n stress_track = []\n strain_track = []\n strain_inc_track = []\n iteration_track = []\n\n loading = np.diff(data['e_true'])\n for increment_number, strain_inc in enumerate(loading):\n strain += strain_inc\n alpha = np.sum(alpha_components)\n yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))\n\n trial_stress = stress + E * strain_inc\n relative_stress = trial_stress - alpha\n flow_dir = np.sign(relative_stress)\n\n yield_condition = np.abs(relative_stress) - yield_stress\n if yield_condition > tol:\n is_converged = False\n else:\n is_converged = True\n\n # For error\n stress_sim_1 = stress_sim * 1.0\n stress_test_1 = stress_test * 1.0\n\n # Return mapping if plastic loading\n ep_eq_init = ep_eq\n alpha_init = alpha\n consist_param = 0.\n number_of_iterations = 0\n while is_converged is False and number_of_iterations < maximum_iterations:\n number_of_iterations += 1\n # Isotropic hardening and isotropic modulus\n yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))\n iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)\n\n # Kinematic hardening and kinematic modulus\n alpha = 0.\n kin_modulus = 0.\n for i in range(0, n_backstresses):\n e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))\n alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k\n kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]\n delta_alpha = alpha - alpha_init\n\n # Local Newton step\n numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)\n denominator = -(E + iso_modulus + kin_modulus)\n consist_param = consist_param - numerator / denominator\n ep_eq = ep_eq_init + consist_param\n\n if np.abs(numerator) < tol:\n is_converged = True\n\n # Update the variables\n stress = trial_stress - E * flow_dir * consist_param\n for i in range(0, n_backstresses):\n e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))\n alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \\\n + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k\n\n stress_track.append(stress)\n strain_track.append(strain)\n strain_inc_track.append(strain_inc)\n iteration_track.append(number_of_iterations)\n\n # Calculate the error\n stress_sim = stress * 1.0\n stress_test = data['Sigma_true'].iloc[increment_number + 1]\n\n sum_abs_de += np.abs(strain_inc)\n area_test += np.abs(strain_inc) * ((stress_test) ** 2 + (stress_test_1) ** 2) / 2.\n error += np.abs(strain_inc) * ((stress_sim - stress_test) ** 2 + (stress_sim_1 - stress_test_1) ** 2) / 2.\n\n if number_of_iterations >= maximum_iterations:\n print (\"Increment number = \", increment_number)\n print (\"Parameters = \", x_sol)\n print (\"Numerator = \", numerator)\n raise RuntimeError('Return mapping did not converge in ' + str(maximum_iterations) + ' iterations.')\n\n area = area_test / sum_abs_de\n error = error / sum_abs_de\n return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track,\n 'area': area}\n\n\ndef sim_curve_uvc(x_sol, test_clean):\n \"\"\" Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return DataFrame: Voce-Chaboche approximation\n\n The strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\n \"\"\"\n\n model_output = uvc_return_mapping(x_sol, test_clean)\n strain = np.append([0.], model_output['strain'])\n stress = np.append([0.], model_output['stress'])\n\n sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true'])\n return sim_curve\n\n\ndef error_single_test_uvc(x_sol, test_clean):\n \"\"\" Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return float: relative error\n\n The strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\n \"\"\"\n\n model_output = uvc_return_mapping(x_sol, test_clean)\n return model_output['error']\n\n\ndef normalized_error_single_test_uvc(x_sol, test_clean):\n \"\"\" Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche\n material model.\n\n :param np.array x_sol: Voce-Chaboche model parameters\n :param DataFrame test_clean: stress-strain data\n :return list: (float) total error, (float) total area\n\n The strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\n \"\"\"\n\n model_output = uvc_return_mapping(x_sol, test_clean)\n return [model_output['error'], model_output['area']]\n\n\ndef calc_phi_total(x, data):\n \"\"\" Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Normalized error value expressed as a percent (raw value * 100).\n\n The normalized error is defined in de Sousa and Lignos (2017).\n \"\"\"\n error_total = 0.\n area_total = 0.\n for d in data:\n error, area = normalized_error_single_test_uvc(x, d)\n error_total += error\n area_total += area\n\n return np.sqrt(error_total / area_total) * 100.\n\n\ndef test_total_area(x, data):\n \"\"\" Returns the total squared area underneath all the tests.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Total squared area.\n \"\"\"\n area_total = 0.\n for d in data:\n _, area = normalized_error_single_test_uvc(x, d)\n area_total += area\n return area_total\n\n\ndef uvc_get_hessian(x, data):\n \"\"\" Returns the Hessian of the material model error function for a given set of test data evaluated at x.\n\n :param np.array x: Updated Voce-Chaboche material model parameters.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return np.array: Hessian matrix of the error function.\n \"\"\"\n\n def f(xi):\n val = 0.\n for d in data:\n val += error_single_test_uvc(xi, d)\n return val\n\n hess_fun = nda.Hessian(f)\n return hess_fun(x)\n\n\ndef uvc_consistency_metric(x_base, x_sample, data):\n \"\"\" Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.\n\n :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.\n :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.\n :param list data: (pd.DataFrame) Stress-strain history for each test considered.\n :return float: Increase in quadratic approximation from the base to the sample case.\n \"\"\"\n x_diff = x_sample - x_base\n hess_base = uvc_get_hessian(x_base, data)\n numerator = np.dot(x_diff, hess_base.dot(x_diff))\n denominator = test_total_area(x_base, data)\n return np.sqrt(numerator / denominator)\n\n\ndef uvc_tangent_modulus(x_sol, data, tol=1.0e-8, maximum_iterations=1000):\n \"\"\" Returns the tangent modulus at each strain step.\n\n :param np.array x_sol: Updated Voce-Chaboche model parameters.\n :param pd.DataFrame data: stress-strain data.\n :param float tol: Local Newton tolerance.\n :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n :return np.ndarray: Tangent modulus array.\n \"\"\"\n\n if len(x_sol) < 8:\n raise RuntimeError(\"No backstresses or using original V-C params.\")\n n_param_per_back = 2\n n_basic_param = 6\n\n # Get material properties\n E = x_sol[0] * 1.0\n sy_0 = x_sol[1] * 1.0\n Q = x_sol[2] * 1.0\n b = x_sol[3] * 1.0\n D = x_sol[4] * 1.0\n a = x_sol[5] * 1.0\n\n # Set up backstresses\n n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)\n c_k = []\n gamma_k = []\n for i in range(0, n_backstresses):\n c_k.append(x_sol[n_basic_param + n_param_per_back * i])\n gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])\n\n # Initialize parameters\n alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components\n strain = 0.\n stress = 0.\n ep_eq = 0. # equivalent plastic strain\n\n stress_track = []\n strain_track = []\n strain_inc_track = []\n iteration_track = []\n tangent_track = []\n\n loading = np.diff(data['e_true'])\n for increment_number, strain_inc in enumerate(loading):\n strain += strain_inc\n alpha = np.sum(alpha_components)\n yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))\n\n trial_stress = stress + E * strain_inc\n relative_stress = trial_stress - alpha\n flow_dir = np.sign(relative_stress)\n\n yield_condition = np.abs(relative_stress) - yield_stress\n if yield_condition > tol:\n is_converged = False\n else:\n is_converged = True\n\n # Return mapping if plastic loading\n ep_eq_init = ep_eq\n alpha_init = alpha\n consist_param = 0.\n number_of_iterations = 0\n while is_converged is False and number_of_iterations < maximum_iterations:\n number_of_iterations += 1\n # Isotropic hardening and isotropic modulus\n yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))\n iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)\n\n # Kinematic hardening and kinematic modulus\n alpha = 0.\n kin_modulus = 0.\n for i in range(0, n_backstresses):\n e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))\n alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k\n kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]\n delta_alpha = alpha - alpha_init\n\n # Local Newton step\n numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)\n denominator = -(E + iso_modulus + kin_modulus)\n consist_param = consist_param - numerator / denominator\n ep_eq = ep_eq_init + consist_param\n\n if np.abs(numerator) < tol:\n is_converged = True\n\n # Update the variables\n stress = trial_stress - E * flow_dir * consist_param\n for i in range(0, n_backstresses):\n e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))\n alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \\\n + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k\n\n stress_track.append(stress)\n strain_track.append(strain)\n strain_inc_track.append(strain_inc)\n iteration_track.append(number_of_iterations)\n\n # Calculate the tangent modulus\n if number_of_iterations > 0:\n h_prime = 0.\n for i in range(0, n_backstresses):\n h_prime += c_k[i] - flow_dir * gamma_k[i] * alpha_components[i]\n k_prime = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)\n tangent_track.append(E * (k_prime + h_prime) / (E + k_prime + h_prime))\n else:\n # Elastic loading\n tangent_track.append(E)\n\n return np.append([0.], np.array(tangent_track))\n"},"path":{"kind":"string","value":"RESSPyLab/uvc_model.py"},"size":{"kind":"number","value":14107,"string":"14,107"},"nl_text":{"kind":"string","value":"Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.\n\n:param np.array x: Updated Voce-Chaboche material model parameters.\n:param list data: (pd.DataFrame) Stress-strain history for each test considered.\n:return float: Normalized error value expressed as a percent (raw value * 100).\n\nThe normalized error is defined in de Sousa and Lignos (2017).\nReturns the relative error between a test and its approximation using the updated Voce-Chaboche material model.\n\n:param np.array x_sol: Voce-Chaboche model parameters\n:param DataFrame test_clean: stress-strain data\n:return float: relative error\n\nThe strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\nReturns the error and the total area of a test and its approximation using the updated Voce-Chaboche\nmaterial model.\n\n:param np.array x_sol: Voce-Chaboche model parameters\n:param DataFrame test_clean: stress-strain data\n:return list: (float) total error, (float) total area\n\nThe strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\nReturns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.\n\n:param np.array x_sol: Voce-Chaboche model parameters\n:param DataFrame test_clean: stress-strain data\n:return DataFrame: Voce-Chaboche approximation\n\nThe strain column in the DataFrame is labeled \"e_true\" and the stress column is labeled \"Sigma_true\".\nReturns the total squared area underneath all the tests.\n\n:param np.array x: Updated Voce-Chaboche material model parameters.\n:param list data: (pd.DataFrame) Stress-strain history for each test considered.\n:return float: Total squared area.\nReturns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.\n\n:param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.\n:param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.\n:param list data: (pd.DataFrame) Stress-strain history for each test considered.\n:return float: Increase in quadratic approximation from the base to the sample case.\nReturns the Hessian of the material model error function for a given set of test data evaluated at x.\n\n:param np.array x: Updated Voce-Chaboche material model parameters.\n:param list data: (pd.DataFrame) Stress-strain history for each test considered.\n:return np.array: Hessian matrix of the error function.\nImplements the time integration of the updated Voce-Chaboche material model.\n\n:param np.array x_sol: Updated Voce-Chaboche model parameters.\n:param pd.DataFrame data: stress-strain data.\n:param float tol: Local Newton tolerance.\n:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n:return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the\n updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').\nReturns the tangent modulus at each strain step.\n\n:param np.array x_sol: Updated Voce-Chaboche model parameters.\n:param pd.DataFrame data: stress-strain data.\n:param float tol: Local Newton tolerance.\n:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.\n:return np.ndarray: Tangent modulus array.\n@package vc_updated\nFunctions to implement the updated Voce-Chaboche material model and measure its error.\n\n Get material properties Set up backstresses Initialize parameters backstress components equivalent plastic strain error measure total strain For error Return mapping if plastic loading Isotropic hardening and isotropic modulus Kinematic hardening and kinematic modulus Local Newton step Update the variables Calculate the error Get material properties Set up backstresses Initialize parameters backstress components equivalent plastic strain Return mapping if plastic loading Isotropic hardening and isotropic modulus Kinematic hardening and kinematic modulus Local Newton step Update the variables Calculate the tangent modulus Elastic loading"},"nl_size":{"kind":"number","value":4148,"string":"4,148"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.4840848445892334,"string":"0.484085"}}},{"rowIdx":568,"cells":{"content":{"kind":"string","value":"import wikipedia as wiki\nfrom ..parsing import get_wiki_page_id, get_wiki_lines, get_wiki_sections\n\ndef get_wiki_references(url, outfile=None):\n \"\"\"get_wiki_references.\n Extracts references from predefined sections of wiki page\n Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)\n\n :param url: URL of wiki article to scrape\n :param outfile: File to write extracted references to\n \"\"\"\n def _check(l):\n return (not l['doi'] or l['doi'] == l['refs'][-1]['doi']) \\\n and (not l['arxiv'] or l['arxiv'] == l['refs'][-1]['arxiv'])\n page = wiki.page(get_wiki_page_id(url))\n sections = get_wiki_sections(page.content)\n lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], [])\n links = sum([wikiparse.parse(s).external_links for s in sections.values()], [])\n summary = sum([\n [\n {\n 'raw': l,\n 'links': urlscan.parse_text_urls(l),\n 'refs': refextract.extract_references_from_string(l),\n 'doi': doi.find_doi_in_text(l),\n 'arxiv': m.group(1) if (m := arxiv_url_regex.matches(l)) is not None else None\n } for l in get_wiki_lines(s, predicate=any)\n ] for s in sections.values()\n ])\n failed = [ld for ld in summary if not _check(ld)]\n if any(failed):\n logger.warning('Consistency check failed for the following lines: {}'.format(failed))\n return _serialize(summary, outfile)\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n"},"path":{"kind":"string","value":"scraper/apis/wikipedia.py"},"size":{"kind":"number","value":1558,"string":"1,558"},"nl_text":{"kind":"string","value":"get_wiki_references.\nExtracts references from predefined sections of wiki page\nUses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)\n\n:param url: URL of wiki article to scrape\n:param outfile: File to write extracted references to"},"nl_size":{"kind":"number","value":251,"string":"251"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7256291508674622,"string":"0.725629"}}},{"rowIdx":569,"cells":{"content":{"kind":"string","value":"\nfrom concurrent.futures.process import ProcessPoolExecutor\n\nimport api.Config\nimport api.middleware\nfrom api.Config import app\nfrom api.routers import (feedback, hiscore, label, legacy, legacy_debug,\n player, prediction, report, scraper)\n\napp.include_router(hiscore.router)\napp.include_router(player.router)\napp.include_router(prediction.router)\napp.include_router(feedback.router)\napp.include_router(report.router)\napp.include_router(legacy.router)\napp.include_router(scraper.router)\napp.include_router(label.router)\napp.include_router(legacy_debug.router)\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n# @app.on_event(\"startup\")\n# async def startup_event():\n# app.state.executor = ProcessPoolExecutor()\n\n\n# @app.on_event(\"shutdown\")\n# async def on_shutdown():\n# app.state.executor.shutdown()\n"},"path":{"kind":"string","value":"api/app.py"},"size":{"kind":"number","value":854,"string":"854"},"nl_text":{"kind":"string","value":"@app.on_event(\"startup\") async def startup_event(): app.state.executor = ProcessPoolExecutor() @app.on_event(\"shutdown\") async def on_shutdown(): app.state.executor.shutdown()"},"nl_size":{"kind":"number","value":183,"string":"183"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5161250233650208,"string":"0.516125"}}},{"rowIdx":570,"cells":{"content":{"kind":"string","value":"# Copyright lowRISC contributors.\n# Licensed under the Apache License, Version 2.0, see LICENSE for details.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\n\n\nOTBN_DIR = os.path.join(os.path.dirname(__file__), '../../..')\nUTIL_DIR = os.path.join(OTBN_DIR, 'util')\nSIM_DIR = os.path.join(os.path.dirname(__file__), '..')\n\n\ndef asm_and_link_one_file(asm_path: str, work_dir: str) -> str:\n '''Assemble and link file at asm_path in work_dir.\n\n Returns the path to the resulting ELF\n\n '''\n otbn_as = os.path.join(UTIL_DIR, 'otbn-as')\n otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld')\n obj_path = os.path.join(work_dir, 'tst.o')\n elf_path = os.path.join(work_dir, 'tst')\n\n subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True)\n subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True)\n return elf_path\n"},"path":{"kind":"string","value":"hw/ip/otbn/dv/otbnsim/test/testutil.py"},"size":{"kind":"number","value":857,"string":"857"},"nl_text":{"kind":"string","value":"Assemble and link file at asm_path in work_dir.\n\nReturns the path to the resulting ELF\n\n Copyright lowRISC contributors. Licensed under the Apache License, Version 2.0, see LICENSE for details. SPDX-License-Identifier: Apache-2.0"},"nl_size":{"kind":"number","value":229,"string":"229"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7511562705039978,"string":"0.751156"}}},{"rowIdx":571,"cells":{"content":{"kind":"string","value":"#Find,Remove,Find\n\"\"\"Return a tuple of the indices of the two smallest values in list L.\n >>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]\n >>> find_two_smallest(items)\n (6, 7)\n >>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]\n True\n\"\"\"\n\nfrom typing import List, Tuple\n\n\ndef find_two_smallest(L:List[float]) -> Tuple[int, int]:\n \"\"\" (see above) \"\"\"\n # Find the index of the minimum and remove that item\n smallest = min(L)\n min1 = L.index(smallest)\n L.remove(smallest)\n\n # Find the index of the new minimum item in the list\n next_smallest = min(L)\n min2 = L.index(next_smallest)\n\n # Put smallest back into L\n L.insert(min1, smallest)\n\n # Fix min2 in case it was affected by the removal and reinsertion:\n if min1 <= min2:\n min2 +=1\n \n return (min1, min2)\n \n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n print(find_two_smallest([0, 1, 3, 2, 5, 6, 1]))\n\n"},"path":{"kind":"string","value":"chapter12/examples/example02.py"},"size":{"kind":"number","value":972,"string":"972"},"nl_text":{"kind":"string","value":"(see above) \nReturn a tuple of the indices of the two smallest values in list L.\n>>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]\n>>> find_two_smallest(items)\n(6, 7)\n>>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]\nTrue\n\nFind,Remove,Find Find the index of the minimum and remove that item Find the index of the new minimum item in the list Put smallest back into L Fix min2 in case it was affected by the removal and reinsertion:"},"nl_size":{"kind":"number","value":456,"string":"456"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7592371106147766,"string":"0.759237"}}},{"rowIdx":572,"cells":{"content":{"kind":"string","value":"from __future__ import absolute_import, print_function\n\nimport logging\n\nimport bokeh.server.tornado as tornado\n\nfrom bokeh.application import Application\nfrom bokeh.client import pull_session\nfrom bokeh.server.views.static_handler import StaticHandler\n\nfrom .utils import ManagedServerLoop, url\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndef test_check_whitelist_rejects_port_mismatch():\n assert False == tornado.check_whitelist(\"foo:100\", [\"foo:101\", \"foo:102\"])\n\ndef test_check_whitelist_rejects_name_mismatch():\n assert False == tornado.check_whitelist(\"foo:100\", [\"bar:100\", \"baz:100\"])\n\ndef test_check_whitelist_accepts_name_port_match():\n assert True == tornado.check_whitelist(\"foo:100\", [\"foo:100\", \"baz:100\"])\n\ndef test_check_whitelist_accepts_implicit_port_80():\n assert True == tornado.check_whitelist(\"foo\", [\"foo:80\"])\n\ndef test_check_whitelist_accepts_all_on_star():\n assert True == tornado.check_whitelist(\"192.168.0.1\", ['*'])\n assert True == tornado.check_whitelist(\"192.168.0.1:80\", ['*'])\n assert True == tornado.check_whitelist(\"192.168.0.1:5006\", ['*'])\n assert True == tornado.check_whitelist(\"192.168.0.1:80\", ['*:80'])\n assert False == tornado.check_whitelist(\"192.168.0.1:80\", ['*:81'])\n assert True == tornado.check_whitelist(\"192.168.0.1:5006\", ['*:*'])\n assert True == tornado.check_whitelist(\"192.168.0.1\", ['192.168.0.*'])\n assert True == tornado.check_whitelist(\"192.168.0.1:5006\", ['192.168.0.*'])\n assert False == tornado.check_whitelist(\"192.168.1.1\", ['192.168.0.*'])\n assert True == tornado.check_whitelist(\"foobarbaz\", ['*'])\n assert True == tornado.check_whitelist(\"192.168.0.1\", ['192.168.0.*'])\n assert False == tornado.check_whitelist(\"192.168.1.1\", ['192.168.0.*'])\n assert False == tornado.check_whitelist(\"192.168.0.1\", ['192.168.0.*:5006'])\n assert True == tornado.check_whitelist(\"192.168.0.1\", ['192.168.0.*:80'])\n assert True == tornado.check_whitelist(\"foobarbaz\", ['*'])\n assert True == tornado.check_whitelist(\"foobarbaz\", ['*:*'])\n assert True == tornado.check_whitelist(\"foobarbaz\", ['*:80'])\n assert False == tornado.check_whitelist(\"foobarbaz\", ['*:5006'])\n assert True == tornado.check_whitelist(\"foobarbaz:5006\", ['*'])\n assert True == tornado.check_whitelist(\"foobarbaz:5006\", ['*:*'])\n assert True == tornado.check_whitelist(\"foobarbaz:5006\", ['*:5006'])\n\ndef test_default_resources():\n application = Application()\n with ManagedServerLoop(application) as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"\"\n assert r.path_versioner == StaticHandler.append_version\n\n with ManagedServerLoop(application, prefix=\"/foo/\") as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"/foo/\"\n assert r.path_versioner == StaticHandler.append_version\n\n with ManagedServerLoop(application, prefix=\"foo/\") as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"/foo/\"\n assert r.path_versioner == StaticHandler.append_version\n\n with ManagedServerLoop(application, prefix=\"foo\") as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"/foo/\"\n assert r.path_versioner == StaticHandler.append_version\n\n with ManagedServerLoop(application, prefix=\"/foo\") as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"/foo/\"\n assert r.path_versioner == StaticHandler.append_version\n\n with ManagedServerLoop(application, prefix=\"/foo/bar\") as server:\n r = server._tornado.resources()\n assert r.mode == \"server\"\n assert r.root_url == \"/foo/bar/\"\n assert r.path_versioner == StaticHandler.append_version\n\ndef test_default_app_paths():\n app = Application()\n t = tornado.BokehTornado({}, \"\", [])\n assert t.app_paths == set()\n\n t = tornado.BokehTornado({\"/\": app}, \"\", [])\n assert t.app_paths == { \"/\" }\n\n t = tornado.BokehTornado({\"/\": app, \"/foo\": app}, \"\", [])\n assert t.app_paths == { \"/\", \"/foo\"}\n\n# tried to use capsys to test what's actually logged and it wasn't\n# working, in the meantime at least this tests that log_stats\n# doesn't crash in various scenarios\ndef test_log_stats():\n application = Application()\n with ManagedServerLoop(application) as server:\n server._tornado.log_stats()\n session1 = pull_session(session_id='session1',\n url=url(server),\n io_loop=server.io_loop)\n session2 = pull_session(session_id='session2',\n url=url(server),\n io_loop=server.io_loop)\n server._tornado.log_stats()\n session1.close()\n session2.close()\n server._tornado.log_stats()\n"},"path":{"kind":"string","value":"bokeh/server/tests/test_tornado.py"},"size":{"kind":"number","value":4957,"string":"4,957"},"nl_text":{"kind":"string","value":"tried to use capsys to test what's actually logged and it wasn't working, in the meantime at least this tests that log_stats doesn't crash in various scenarios"},"nl_size":{"kind":"number","value":159,"string":"159"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9790326356887817,"string":"0.979033"}}},{"rowIdx":573,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2015 Luminal, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport csv\nimport json\nimport operator\nimport os\nimport os.path\nimport sys\nimport time\nimport re\nimport boto3\nimport botocore.exceptions\n\ntry:\n from StringIO import StringIO\nexcept ImportError:\n from io import StringIO\n\ntry:\n import yaml\n NO_YAML = False\nexcept ImportError:\n NO_YAML = True\n\nfrom base64 import b64encode, b64decode\nfrom boto3.dynamodb.conditions import Attr\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import SHA256\nfrom Crypto.Hash.HMAC import HMAC\nfrom Crypto.Util import Counter\n\nDEFAULT_REGION = \"us-east-1\"\nPAD_LEN = 19 # number of digits in sys.maxint\nWILDCARD_CHAR = \"*\"\n\n\nclass KmsError(Exception):\n def __init__(self, value=\"\"):\n self.value = \"KMS ERROR: \" + value if value is not \"\" else \"KMS ERROR\"\n\n def __str__(self):\n return self.value\n\n\nclass IntegrityError(Exception):\n def __init__(self, value=\"\"):\n self.value = \"INTEGRITY ERROR: \" + value if value is not \"\" else \\\n \"INTEGRITY ERROR\"\n\n def __str__(self):\n return self.value\n\n\nclass ItemNotFound(Exception):\n pass\n\n\nclass KeyValueToDictionary(argparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace,\n self.dest,\n dict((x[0], x[1]) for x in values))\n\n\ndef printStdErr(s):\n sys.stderr.write(str(s))\n sys.stderr.write(\"\\n\")\n\n\ndef key_value_pair(string):\n output = string.split('=')\n if len(output) != 2:\n msg = \"%r is not the form of \\\"key=value\\\"\" % string\n raise argparse.ArgumentTypeError(msg)\n return output\n\n\ndef expand_wildcard(string, secrets):\n prog = re.compile('^' + string.replace(WILDCARD_CHAR, '.*') + '$')\n output = []\n for secret in secrets:\n if prog.search(secret) is not None:\n output.append(secret)\n return output\n\n\ndef value_or_filename(string):\n # argparse running on old version of python (<2.7) will pass an empty\n # string to this function before it passes the actual value.\n # If an empty string is passes in, just return an empty string\n if string == \"\":\n return \"\"\n if string[0] == \"@\":\n filename = string[1:]\n try:\n with open(os.path.expanduser(filename)) as f:\n output = f.read()\n except IOError as e:\n raise argparse.ArgumentTypeError(\"Unable to read file %s\" %\n filename)\n else:\n output = string\n return output\n\n\ndef csv_dump(dictionary):\n csvfile = StringIO()\n csvwriter = csv.writer(csvfile)\n for key in dictionary:\n csvwriter.writerow([key, dictionary[key]])\n return csvfile.getvalue()\n\n\ndef paddedInt(i):\n '''\n return a string that contains `i`, left-padded with 0's up to PAD_LEN digits\n '''\n i_str = str(i)\n pad = PAD_LEN - len(i_str)\n return (pad * \"0\") + i_str\n\ndef getHighestVersion(name, region=\"us-east-1\", table=\"credential-store\"):\n '''\n Return the highest version of `name` in the table\n '''\n dynamodb = boto3.resource('dynamodb', region_name=region)\n secrets = dynamodb.Table(table)\n\n response = secrets.query(Limit=1,\n ScanIndexForward=False,\n ConsistentRead=True,\n KeyConditionExpression=boto3.dynamodb.conditions.Key(\"name\").eq(name),\n ProjectionExpression=\"version\")\n\n if response[\"Count\"] == 0:\n return 0\n return response[\"Items\"][0][\"version\"]\n\n\ndef listSecrets(region=\"us-east-1\", table=\"credential-store\"):\n '''\n do a full-table scan of the credential-store,\n and return the names and versions of every credential\n '''\n dynamodb = boto3.resource('dynamodb', region_name=region)\n secrets = dynamodb.Table(table)\n\n response = secrets.scan(ProjectionExpression=\"#N, version\",\n ExpressionAttributeNames={\"#N\": \"name\"})\n return response[\"Items\"]\n\n\ndef putSecret(name, secret, version, kms_key=\"alias/credstash\",\n region=\"us-east-1\", table=\"credential-store\", context=None):\n '''\n put a secret called `name` into the secret-store,\n protected by the key kms_key\n '''\n if not context:\n context = {}\n kms = boto3.client('kms', region_name=region)\n # generate a a 64 byte key.\n # Half will be for data encryption, the other half for HMAC\n try:\n kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64)\n except:\n raise KmsError(\"Could not generate key using KMS key %s\" % kms_key)\n data_key = kms_response['Plaintext'][:32]\n hmac_key = kms_response['Plaintext'][32:]\n wrapped_key = kms_response['CiphertextBlob']\n\n enc_ctr = Counter.new(128)\n encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr)\n\n c_text = encryptor.encrypt(secret)\n # compute an HMAC using the hmac key and the ciphertext\n hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256)\n b64hmac = hmac.hexdigest()\n\n dynamodb = boto3.resource('dynamodb', region_name=region)\n secrets = dynamodb.Table(table)\n\n data = {}\n data['name'] = name\n data['version'] = version if version != \"\" else paddedInt(1)\n data['key'] = b64encode(wrapped_key).decode('utf-8')\n data['contents'] = b64encode(c_text).decode('utf-8')\n data['hmac'] = b64hmac\n\n return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())\n\n\ndef getAllSecrets(version=\"\", region=\"us-east-1\",\n table=\"credential-store\", context=None):\n '''\n fetch and decrypt all secrets\n '''\n output = {}\n secrets = listSecrets(region, table)\n for credential in set([x[\"name\"] for x in secrets]):\n try:\n output[credential] = getSecret(credential,\n version,\n region,\n table,\n context)\n except:\n pass\n return output\n\n\ndef getSecret(name, version=\"\", region=\"us-east-1\",\n table=\"credential-store\", context=None):\n '''\n fetch and decrypt the secret called `name`\n '''\n if not context:\n context = {}\n\n dynamodb = boto3.resource('dynamodb', region_name=region)\n secrets = dynamodb.Table(table)\n\n if version == \"\":\n # do a consistent fetch of the credential with the highest version\n response = secrets.query(Limit=1,\n ScanIndexForward=False,\n ConsistentRead=True,\n KeyConditionExpression=boto3.dynamodb.conditions.Key(\"name\").eq(name))\n if response[\"Count\"] == 0:\n raise ItemNotFound(\"Item {'name': '%s'} couldn't be found.\" % name)\n material = response[\"Items\"][0]\n else:\n response = secrets.get_item(Key={\"name\": name, \"version\": version})\n if \"Item\" not in response:\n raise ItemNotFound(\"Item {'name': '%s', 'version': '%s'} couldn't be found.\" % (name, version))\n material = response[\"Item\"]\n\n kms = boto3.client('kms', region_name=region)\n # Check the HMAC before we decrypt to verify ciphertext integrity\n try:\n kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context)\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"InvalidCiphertextException\":\n if context is None:\n msg = (\"Could not decrypt hmac key with KMS. The credential may \"\n \"require that an encryption context be provided to decrypt \"\n \"it.\")\n else:\n msg = (\"Could not decrypt hmac key with KMS. The encryption \"\n \"context provided may not match the one used when the \"\n \"credential was stored.\")\n else:\n msg = \"Decryption error %s\" % e\n raise KmsError(msg)\n except Exception as e:\n raise KmsError(\"Decryption error %s\" % e)\n key = kms_response['Plaintext'][:32]\n hmac_key = kms_response['Plaintext'][32:]\n hmac = HMAC(hmac_key, msg=b64decode(material['contents']),\n digestmod=SHA256)\n if hmac.hexdigest() != material['hmac']:\n raise IntegrityError(\"Computed HMAC on %s does not match stored HMAC\"\n % name)\n dec_ctr = Counter.new(128)\n decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr)\n plaintext = decryptor.decrypt(b64decode(material['contents'])).decode(\"utf-8\")\n return plaintext\n\n\ndef deleteSecrets(name, region=\"us-east-1\", table=\"credential-store\"):\n dynamodb = boto3.resource('dynamodb', region_name=region)\n secrets = dynamodb.Table(table)\n\n response = secrets.scan(FilterExpression=boto3.dynamodb.conditions.Attr(\"name\").eq(name),\n ProjectionExpression=\"#N, version\",\n ExpressionAttributeNames={\"#N\": \"name\"})\n\n for secret in response[\"Items\"]:\n print(\"Deleting %s -- version %s\" % (secret[\"name\"], secret[\"version\"]))\n secrets.delete_item(Key=secret)\n\n\ndef createDdbTable(region=\"us-east-1\", table=\"credential-store\"):\n '''\n create the secret store table in DDB in the specified region\n '''\n dynamodb = boto3.resource(\"dynamodb\", region_name=region)\n if table in (t.name for t in dynamodb.tables.all()):\n print(\"Credential Store table already exists\")\n return\n\n print(\"Creating table...\")\n response = dynamodb.create_table(\n TableName=table,\n KeySchema=[\n {\n \"AttributeName\": \"name\",\n \"KeyType\": \"HASH\",\n },\n {\n \"AttributeName\": \"version\",\n \"KeyType\": \"RANGE\",\n }\n ],\n AttributeDefinitions=[\n {\n \"AttributeName\": \"name\",\n \"AttributeType\": \"S\",\n },\n {\n \"AttributeName\": \"version\",\n \"AttributeType\": \"S\",\n },\n ],\n ProvisionedThroughput={\n \"ReadCapacityUnits\": 1,\n \"WriteCapacityUnits\": 1,\n }\n )\n\n print(\"Waiting for table to be created...\")\n client = boto3.client(\"dynamodb\", region_name=region)\n client.get_waiter(\"table_exists\").wait(TableName=table)\n\n print(\"Table has been created. \"\n \"Go read the README about how to create your KMS key\")\n\n\ndef main():\n parsers = {}\n parsers['super'] = argparse.ArgumentParser(\n description=\"A credential/secret storage system\")\n\n parsers['super'].add_argument(\"-r\", \"--region\",\n help=\"the AWS region in which to operate.\"\n \"If a region is not specified, credstash \"\n \"will use the value of the \"\n \"AWS_DEFAULT_REGION env variable, \"\n \"or if that is not set, us-east-1\")\n parsers['super'].add_argument(\"-t\", \"--table\", default=\"credential-store\",\n help=\"DynamoDB table to use for \"\n \"credential storage\")\n subparsers = parsers['super'].add_subparsers(help='Try commands like '\n '\"{name} get -h\" or \"{name}'\n 'put --help\" to get each'\n 'sub command\\'s options'\n .format(name=os.path.basename(\n __file__)))\n\n action = 'delete'\n parsers[action] = subparsers.add_parser(action,\n help='Delete a credential \" \\\n \"from the store')\n parsers[action].add_argument(\"credential\", type=str,\n help=\"the name of the credential to delete\")\n parsers[action].set_defaults(action=action)\n\n action = 'get'\n parsers[action] = subparsers.add_parser(action, help=\"Get a credential \"\n \"from the store\")\n parsers[action].add_argument(\"credential\", type=str,\n help=\"the name of the credential to get.\"\n \"Using the wildcard character '%s' will \"\n \"search for credentials that match the \"\n \"pattern\" % WILDCARD_CHAR)\n parsers[action].add_argument(\"context\", type=key_value_pair,\n action=KeyValueToDictionary, nargs='*',\n help=\"encryption context key/value pairs \"\n \"associated with the credential in the form \"\n \"of \\\"key=value\\\"\")\n parsers[action].add_argument(\"-n\", \"--noline\", action=\"store_true\",\n help=\"Don't append newline to returned \"\n \"value (useful in scripts or with \"\n \"binary files)\")\n parsers[action].add_argument(\"-v\", \"--version\", default=\"\",\n help=\"Get a specific version of the \"\n \"credential (defaults to the latest version)\")\n parsers[action].set_defaults(action=action)\n\n action = 'getall'\n parsers[action] = subparsers.add_parser(action,\n help=\"Get all credentials from \"\n \"the store\")\n parsers[action].add_argument(\"context\", type=key_value_pair,\n action=KeyValueToDictionary, nargs='*',\n help=\"encryption context key/value pairs \"\n \"associated with the credential in the form \"\n \"of \\\"key=value\\\"\")\n parsers[action].add_argument(\"-v\", \"--version\", default=\"\",\n help=\"Get a specific version of the \"\n \"credential (defaults to the latest version)\")\n parsers[action].add_argument(\"-f\", \"--format\", default=\"json\",\n choices=[\"json\", \"csv\"] +\n ([] if NO_YAML else [\"yaml\"]),\n help=\"Output format. json(default) \" +\n (\"\" if NO_YAML else \"yaml \") + \"or csv.\")\n parsers[action].set_defaults(action=action)\n\n action = 'list'\n parsers[action] = subparsers.add_parser(action,\n help=\"list credentials and \"\n \"their versions\")\n parsers[action].set_defaults(action=action)\n\n action = 'put'\n parsers[action] = subparsers.add_parser(action,\n help=\"Put a credential into \"\n \"the store\")\n parsers[action].add_argument(\"credential\", type=str,\n help=\"the name of the credential to store\")\n parsers[action].add_argument(\"value\", type=value_or_filename,\n help=\"the value of the credential to store \"\n \"or, if beginning with the \\\"@\\\" character, \"\n \"the filename of the file containing \"\n \"the value\", default=\"\")\n parsers[action].add_argument(\"context\", type=key_value_pair,\n action=KeyValueToDictionary, nargs='*',\n help=\"encryption context key/value pairs \"\n \"associated with the credential in the form \"\n \"of \\\"key=value\\\"\")\n parsers[action].add_argument(\"-k\", \"--key\", default=\"alias/credstash\",\n help=\"the KMS key-id of the master key \"\n \"to use. See the README for more \"\n \"information. Defaults to alias/credstash\")\n parsers[action].add_argument(\"-v\", \"--version\", default=\"\",\n help=\"Put a specific version of the \"\n \"credential (update the credential; \"\n \"defaults to version `1`).\")\n parsers[action].add_argument(\"-a\", \"--autoversion\", action=\"store_true\",\n help=\"Automatically increment the version of \"\n \"the credential to be stored. This option \"\n \"causes the `-v` flag to be ignored. \"\n \"(This option will fail if the currently stored \"\n \"version is not numeric.)\")\n parsers[action].set_defaults(action=action)\n\n action = 'setup'\n parsers[action] = subparsers.add_parser(action,\n help='setup the credential store')\n parsers[action].set_defaults(action=action)\n\n args = parsers['super'].parse_args()\n\n region = os.getenv(\n \"AWS_DEFAULT_REGION\", DEFAULT_REGION) if not args.region \\\n else args.region\n if \"action\" in vars(args):\n if args.action == \"delete\":\n deleteSecrets(args.credential, region=region, table=args.table)\n return\n if args.action == \"list\":\n credential_list = listSecrets(region=region, table=args.table)\n if credential_list:\n # print list of credential names and versions,\n # sorted by name and then by version\n max_len = max([len(x[\"name\"]) for x in credential_list])\n for cred in sorted(credential_list,\n key=operator.itemgetter(\"name\", \"version\")):\n print(\"{0:{1}} -- version {2:>}\".format(\n cred[\"name\"], max_len, cred[\"version\"]))\n else:\n return\n if args.action == \"put\":\n if args.autoversion:\n latestVersion = getHighestVersion(args.credential, region,\n args.table)\n try:\n version = paddedInt(int(latestVersion) + 1)\n except ValueError:\n printStdErr(\"Can not autoincrement version. The current \"\n \"version: %s is not an int\" % latestVersion)\n return\n else:\n version = args.version\n try:\n if putSecret(args.credential, args.value, version,\n kms_key=args.key, region=region, table=args.table,\n context=args.context):\n print(\"{0} has been stored\".format(args.credential))\n except KmsError as e:\n printStdErr(e)\n except botocore.exceptions.ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"ConditionalCheckFailedException\":\n latestVersion = getHighestVersion(args.credential, region,\n args.table)\n printStdErr(\"%s version %s is already in the credential store. \"\n \"Use the -v flag to specify a new version\" %\n (args.credential, latestVersion))\n return\n if args.action == \"get\":\n try:\n if WILDCARD_CHAR in args.credential:\n names = expand_wildcard(args.credential,\n [x[\"name\"]\n for x\n in listSecrets(region=region,\n table=args.table)])\n print(json.dumps(dict((name,\n getSecret(name,\n args.version,\n region=region,\n table=args.table,\n context=args.context))\n for name in names)))\n else:\n sys.stdout.write(getSecret(args.credential, args.version,\n region=region, table=args.table,\n context=args.context))\n if not args.noline:\n sys.stdout.write(\"\\n\")\n except ItemNotFound as e:\n printStdErr(e)\n except KmsError as e:\n printStdErr(e)\n except IntegrityError as e:\n printStdErr(e)\n return\n if args.action == \"getall\":\n secrets = getAllSecrets(args.version,\n region=region,\n table=args.table,\n context=args.context)\n if args.format == \"json\":\n output_func = json.dumps\n output_args = {\"sort_keys\": True,\n \"indent\": 4,\n \"separators\": (',', ': ')}\n elif not NO_YAML and args.format == \"yaml\":\n output_func = yaml.dump\n output_args = {\"default_flow_style\": False}\n elif args.format == 'csv':\n output_func = csv_dump\n output_args = {}\n print(output_func(secrets, **output_args))\n return\n if args.action == \"setup\":\n createDdbTable(region=region, table=args.table)\n return\n else:\n parsers['super'].print_help()\n\nif __name__ == '__main__':\n main()\n"},"path":{"kind":"string","value":"credstash.py"},"size":{"kind":"number","value":22693,"string":"22,693"},"nl_text":{"kind":"string","value":"create the secret store table in DDB in the specified region\nfetch and decrypt all secrets\nReturn the highest version of `name` in the table\nfetch and decrypt the secret called `name`\ndo a full-table scan of the credential-store,\nand return the names and versions of every credential\nreturn a string that contains `i`, left-padded with 0's up to PAD_LEN digits\nput a secret called `name` into the secret-store,\nprotected by the key kms_key\n\n!/usr/bin/env python Copyright 2015 Luminal, Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. number of digits in sys.maxint argparse running on old version of python (<2.7) will pass an empty string to this function before it passes the actual value. If an empty string is passes in, just return an empty string generate a a 64 byte key. Half will be for data encryption, the other half for HMAC compute an HMAC using the hmac key and the ciphertext do a consistent fetch of the credential with the highest version Check the HMAC before we decrypt to verify ciphertext integrity print list of credential names and versions, sorted by name and then by version"},"nl_size":{"kind":"number","value":1578,"string":"1,578"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7873867750167847,"string":"0.787387"}}},{"rowIdx":574,"cells":{"content":{"kind":"string","value":"\"\"\"\r\nDjango settings for lab01 project.\r\n\r\nGenerated by 'django-admin startproject' using Django 3.2.6.\r\n\r\nFor more information on this file, see\r\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\r\n\r\nFor the full list of settings and their values, see\r\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\r\n\"\"\"\r\n\r\nfrom pathlib import Path\r\n\r\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\r\nBASE_DIR = Path(__file__).resolve().parent.parent\r\n\r\n\r\n# Quick-start development settings - unsuitable for production\r\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\r\n\r\n# SECURITY WARNING: keep the secret key used in production secret!\r\nSECRET_KEY = 'django-insecure-7-8hv&pc-$$1)7eiiy2m#m^o6cx%oqqv9@z071ec0%218iwt0!'\r\n\r\n# SECURITY WARNING: don't run with debug turned on in production!\r\nDEBUG = True\r\n\r\nALLOWED_HOSTS = []\r\n\r\n\r\n# Application definition\r\n\r\nINSTALLED_APPS = [\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n]\r\n\r\nMIDDLEWARE = [\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n]\r\n\r\nROOT_URLCONF = 'lab01.urls'\r\n\r\nTEMPLATES = [\r\n {\r\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\r\n 'DIRS': [],\r\n 'APP_DIRS': True,\r\n 'OPTIONS': {\r\n 'context_processors': [\r\n 'django.template.context_processors.debug',\r\n 'django.template.context_processors.request',\r\n 'django.contrib.auth.context_processors.auth',\r\n 'django.contrib.messages.context_processors.messages',\r\n ],\r\n },\r\n },\r\n]\r\n\r\nWSGI_APPLICATION = 'lab01.wsgi.application'\r\n\r\n\r\n# Database\r\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\r\n\r\nDATABASES = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.sqlite3',\r\n 'NAME': BASE_DIR / 'db.sqlite3',\r\n }\r\n}\r\n\r\n\r\n# Password validation\r\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\r\n\r\nAUTH_PASSWORD_VALIDATORS = [\r\n {\r\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\r\n },\r\n {\r\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\r\n },\r\n {\r\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\r\n },\r\n {\r\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\r\n },\r\n]\r\n\r\n\r\n# Internationalization\r\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\r\n\r\nLANGUAGE_CODE = 'en-us'\r\n\r\nTIME_ZONE = 'UTC'\r\n\r\nUSE_I18N = True\r\n\r\nUSE_L10N = True\r\n\r\nUSE_TZ = True\r\n\r\n\r\n# Static files (CSS, JavaScript, Images)\r\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\r\n\r\nSTATIC_URL = '/static/'\r\n\r\n# Default primary key field type\r\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\r\n\r\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\r\n"},"path":{"kind":"string","value":"lab01/lab01/settings.py"},"size":{"kind":"number","value":3361,"string":"3,361"},"nl_text":{"kind":"string","value":"Django settings for lab01 project.\n\nGenerated by 'django-admin startproject' using Django 3.2.6.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\n Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.2/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.2/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.2/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.2/howto/static-files/ Default primary key field type https://docs.djangoproject.com/en/3.2/ref/settings/default-auto-field"},"nl_size":{"kind":"number","value":1080,"string":"1,080"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6652405858039856,"string":"0.665241"}}},{"rowIdx":575,"cells":{"content":{"kind":"string","value":"# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com\n#\n# Python tests originally created or extracted from other peoples work. The\n# parts were too small to be protected.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\ndef tupleUnpacking():\n return (*a, b, *c)\n\ndef listUnpacking():\n return [*a, b, *c]\n\ndef setUnpacking():\n return {*a, b, *c}\n\ndef dictUnpacking():\n return {\"a\" : 1, **d}\n\na = range(3)\nb = 5\nc = range(8,10)\nd = {\"a\" : 2}\n\nprint(\"Tuple unpacked\", tupleUnpacking())\nprint(\"List unpacked\", listUnpacking())\nprint(\"Set unpacked\", setUnpacking())\nprint(\"Dict unpacked\", dictUnpacking())\n\n\nnon_iterable = 2.0\n\ndef tupleUnpackingError():\n try:\n return (*a,*non_iterable,*c)\n except Exception as e:\n return e\n\ndef listUnpackingError():\n try:\n return [*a,*non_iterable,*c]\n except Exception as e:\n return e\n\ndef setUnpackingError():\n try:\n return {*a,*non_iterable,*c}\n except Exception as e:\n return e\n\ndef dictUnpackingError():\n try:\n return {\"a\" : 1, **non_iterable}\n except Exception as e:\n return e\n\n\nprint(\"Tuple unpacked error:\", tupleUnpackingError())\nprint(\"List unpacked error:\", listUnpackingError())\nprint(\"Set unpacked error:\", setUnpackingError())\nprint(\"Dict unpacked error:\", dictUnpackingError())\n"},"path":{"kind":"string","value":"tests/basics/Unpacking35.py"},"size":{"kind":"number","value":1866,"string":"1,866"},"nl_text":{"kind":"string","value":"Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com Python tests originally created or extracted from other peoples work. The parts were too small to be protected. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."},"nl_size":{"kind":"number","value":730,"string":"730"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8877567648887634,"string":"0.887757"}}},{"rowIdx":576,"cells":{"content":{"kind":"string","value":"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmcls.models.utils import make_divisible\nfrom ..builder import BACKBONES\nfrom .base_backbone import BaseBackbone\n\n\nclass InvertedResidual(BaseModule):\n \"\"\"InvertedResidual block for MobileNetV2.\n\n Args:\n in_channels (int): The input channels of the InvertedResidual block.\n out_channels (int): The output channels of the InvertedResidual block.\n stride (int): Stride of the middle (first) 3x3 convolution.\n expand_ratio (int): adjusts number of channels of the hidden layer\n in InvertedResidual by this amount.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n\n Returns:\n Tensor: The output tensor\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n expand_ratio,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n with_cp=False,\n init_cfg=None):\n super(InvertedResidual, self).__init__(init_cfg)\n self.stride = stride\n assert stride in [1, 2], f'stride must in [1, 2]. ' \\\n f'But received {stride}.'\n self.with_cp = with_cp\n self.use_res_connect = self.stride == 1 and in_channels == out_channels\n hidden_dim = int(round(in_channels * expand_ratio))\n\n layers = []\n if expand_ratio != 1:\n layers.append(\n ConvModule(\n in_channels=in_channels,\n out_channels=hidden_dim,\n kernel_size=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg))\n layers.extend([\n ConvModule(\n in_channels=hidden_dim,\n out_channels=hidden_dim,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=hidden_dim,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg),\n ConvModule(\n in_channels=hidden_dim,\n out_channels=out_channels,\n kernel_size=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=None)\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n\n def _inner_forward(x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n if self.with_cp and x.requires_grad:\n out = cp.checkpoint(_inner_forward, x)\n else:\n out = _inner_forward(x)\n\n return out\n\n\n@BACKBONES.register_module()\nclass MobileNetV2(BaseBackbone):\n \"\"\"MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (None or Sequence[int]): Output from which stages.\n Default: (7, ).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n \"\"\"\n\n # Parameters to build layers. 4 parameters are needed to construct a\n # layer, from left to right: expand_ratio, channel, num_blocks, stride.\n arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],\n [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],\n [6, 320, 1, 1]]\n\n def __init__(self,\n widen_factor=1.,\n out_indices=(7, ),\n frozen_stages=-1,\n deep_stem=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN'),\n act_cfg=dict(type='ReLU'),\n norm_eval=False,\n with_cp=False,\n init_cfg=[\n dict(type='Kaiming', layer=['Conv2d']),\n dict(\n type='Constant',\n val=1,\n layer=['_BatchNorm', 'GroupNorm'])\n ]):\n super(MobileNetV2, self).__init__(init_cfg)\n self.widen_factor = widen_factor\n self.out_indices = out_indices\n for index in out_indices:\n if index not in range(0, 8):\n raise ValueError('the item in out_indices must in '\n f'range(0, 8). But received {index}')\n\n if frozen_stages not in range(-1, 8):\n raise ValueError('frozen_stages must be in range(-1, 8). '\n f'But received {frozen_stages}')\n self.out_indices = out_indices\n self.frozen_stages = frozen_stages\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n\n self.in_channels = make_divisible(32 * widen_factor, 8)\n if deep_stem:\n self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)\n in_channels_ = 16\n else:\n in_channels_ = 3\n self.conv0 = nn.Sequential()\n self.conv1 = ConvModule(\n in_channels=in_channels_,\n out_channels=self.in_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n self.layers = []\n\n for i, layer_cfg in enumerate(self.arch_settings):\n expand_ratio, channel, num_blocks, stride = layer_cfg\n out_channels = make_divisible(channel * widen_factor, 8)\n inverted_res_layer = self.make_layer(\n out_channels=out_channels,\n num_blocks=num_blocks,\n stride=stride,\n expand_ratio=expand_ratio)\n layer_name = f'layer{i + 1}'\n self.add_module(layer_name, inverted_res_layer)\n self.layers.append(layer_name)\n\n if widen_factor > 1.0:\n self.out_channel = int(1280 * widen_factor)\n else:\n self.out_channel = 1280\n\n layer = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.out_channel,\n kernel_size=1,\n stride=1,\n padding=0,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n self.add_module('conv2', layer)\n self.layers.append('conv2')\n\n def make_layer(self, out_channels, num_blocks, stride, expand_ratio):\n \"\"\"Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n Args:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n \"\"\"\n layers = []\n for i in range(num_blocks):\n if i >= 1:\n stride = 1\n layers.append(\n InvertedResidual(\n self.in_channels,\n out_channels,\n stride,\n expand_ratio=expand_ratio,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg,\n with_cp=self.with_cp))\n self.in_channels = out_channels\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv0(x)\n x = self.conv1(x)\n\n outs = []\n for i, layer_name in enumerate(self.layers):\n layer = getattr(self, layer_name)\n x = layer(x)\n if i in self.out_indices:\n outs.append(x)\n\n return tuple(outs)\n\n def _freeze_stages(self):\n if self.frozen_stages >= 0:\n for param in self.conv1.parameters():\n param.requires_grad = False\n for i in range(1, self.frozen_stages + 1):\n layer = getattr(self, f'layer{i}')\n layer.eval()\n for param in layer.parameters():\n param.requires_grad = False\n\n def train(self, mode=True):\n super(MobileNetV2, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()\n"},"path":{"kind":"string","value":"mmcls/models/backbones/mobilenet_v2.py"},"size":{"kind":"number","value":9966,"string":"9,966"},"nl_text":{"kind":"string","value":"InvertedResidual block for MobileNetV2.\n\nArgs:\n in_channels (int): The input channels of the InvertedResidual block.\n out_channels (int): The output channels of the InvertedResidual block.\n stride (int): Stride of the middle (first) 3x3 convolution.\n expand_ratio (int): adjusts number of channels of the hidden layer\n in InvertedResidual by this amount.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\n\nReturns:\n Tensor: The output tensor\nMobileNetV2 backbone.\n\nArgs:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (None or Sequence[int]): Output from which stages.\n Default: (7, ).\n frozen_stages (int): Stages to be frozen (all param fixed).\n Default: -1, which means not freezing any parameters.\n conv_cfg (dict, optional): Config dict for convolution layer.\n Default: None, which means using conv2d.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='BN').\n act_cfg (dict): Config dict for activation layer.\n Default: dict(type='ReLU6').\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed. Default: False.\nStack InvertedResidual blocks to build a layer for MobileNetV2.\n\nArgs:\n out_channels (int): out_channels of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the first block. Default: 1\n expand_ratio (int): Expand the number of channels of the\n hidden layer in InvertedResidual by this ratio. Default: 6.\n\n Copyright (c) OpenMMLab. All rights reserved. Parameters to build layers. 4 parameters are needed to construct a layer, from left to right: expand_ratio, channel, num_blocks, stride."},"nl_size":{"kind":"number","value":2387,"string":"2,387"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6461518406867981,"string":"0.646152"}}},{"rowIdx":577,"cells":{"content":{"kind":"string","value":"\"\"\"\n多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的\n\"\"\"\nimport threading\n\n\ndef do_sth(arg1, arg2, arg3):\n local_var1 = arg1\n local_var2 = arg2\n local_var3 = arg3\n\n fun1(local_var1, local_var2, local_var3)\n fun2(local_var1, local_var2, local_var3)\n fun3(local_var1, local_var2, local_var3)\n\n\ndef fun1(local_var1, local_var2, local_var3):\n print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,\n local_var2, local_var3))\n\n\ndef fun2(local_var1, local_var2, local_var3):\n print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,\n local_var2, local_var3))\n\n\ndef fun3(local_var1, local_var2, local_var3):\n print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,\n local_var2, local_var3))\n\n\nt1 = threading.Thread(target=do_sth, args=('a', 'b', 'c'))\nt2 = threading.Thread(target=do_sth, args=('d', 'e', 'f'))\n\nt1.start()\nt2.start()\n"},"path":{"kind":"string","value":"17_process_thread/46_why_need_ThreadLocal.py"},"size":{"kind":"number","value":1094,"string":"1,094"},"nl_text":{"kind":"string","value":"多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的"},"nl_size":{"kind":"number","value":45,"string":"45"},"nl_language":{"kind":"string","value":"zh"},"nl_language_score":{"kind":"number","value":0.9986813068389893,"string":"0.998681"}}},{"rowIdx":578,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n# Copyright 2011-2019, Nigel Small\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom unittest import TestCase\n\nfrom neotime import Date, Time, DateTime, Duration\n\nfrom py2neo.data import Node\nfrom py2neo.cypher import cypher_escape, cypher_repr\nfrom py2neo.cypher.encoding import LabelSetView, PropertyDictView, PropertySelector\n\n\nclass LabelSetViewTestCase(TestCase):\n\n def test_can_create_empty_view(self):\n view = LabelSetView([])\n self.assertEqual(repr(view), \"\")\n\n def test_can_create_single_label_view(self):\n view = LabelSetView([\"A\"])\n self.assertEqual(repr(view), \":A\")\n\n def test_can_create_double_label_view(self):\n view = LabelSetView([\"A\", \"B\"])\n self.assertEqual(repr(view), \":A:B\")\n\n def test_can_select_existing_in_view(self):\n view = LabelSetView([\"A\", \"B\"]).B\n self.assertEqual(repr(view), \":B\")\n\n def test_can_select_non_existing_in_view(self):\n view = LabelSetView([\"A\", \"B\"]).C\n self.assertEqual(repr(view), \"\")\n\n def test_can_chain_select(self):\n view = LabelSetView([\"A\", \"B\", \"C\"]).B.C\n self.assertEqual(repr(view), \":B:C\")\n\n def test_can_reselect_same(self):\n view = LabelSetView([\"A\", \"B\", \"C\"]).B.B.C\n self.assertEqual(repr(view), \":B:C\")\n\n def test_length(self):\n view = LabelSetView([\"A\", \"B\", \"C\"])\n self.assertEqual(len(view), 3)\n\n def test_iterable(self):\n view = LabelSetView([\"A\", \"B\", \"C\"])\n self.assertSetEqual(set(view), {\"A\", \"B\", \"C\"})\n\n def test_containment(self):\n view = LabelSetView([\"A\", \"B\", \"C\"])\n self.assertIn(\"A\", view)\n\n def test_non_containment(self):\n view = LabelSetView([\"A\", \"B\", \"C\"])\n self.assertNotIn(\"D\", view)\n\n\nclass PropertyDictViewTestCase(TestCase):\n\n def test_can_create_empty_view(self):\n view = PropertyDictView({})\n self.assertEqual(repr(view), \"{}\")\n\n def test_can_create_single_property_view(self):\n view = PropertyDictView({\"A\": 1})\n self.assertEqual(repr(view), \"{A: 1}\")\n\n def test_can_create_double_property_view(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2})\n self.assertEqual(repr(view), \"{A: 1, B: 2}\")\n\n def test_can_select_existing_in_view(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2}).B\n self.assertEqual(repr(view), \"{B: 2}\")\n\n def test_can_select_non_existing_in_view(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2}).C\n self.assertEqual(repr(view), \"{}\")\n\n def test_can_chain_select(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3}).B.C\n self.assertEqual(repr(view), \"{B: 2, C: 3}\")\n\n def test_can_reselect_same(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3}).B.B.C\n self.assertEqual(repr(view), \"{B: 2, C: 3}\")\n\n def test_length(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertEqual(len(view), 3)\n\n def test_iterable(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertEqual(set(view), {\"A\", \"B\", \"C\"})\n\n def test_containment(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertIn(\"A\", view)\n\n def test_non_containment(self):\n view = PropertyDictView({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertNotIn(\"D\", view)\n\n\nclass PropertySelectorTestCase(TestCase):\n\n def test_simple(self):\n selector = PropertySelector({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertEqual(selector.A, \"1\")\n\n def test_non_existent(self):\n selector = PropertySelector({\"A\": 1, \"B\": 2, \"C\": 3})\n self.assertEqual(selector.D, \"null\")\n\n\nclass NodeReprTestCase(TestCase):\n\n def test_empty(self):\n a = Node()\n r = cypher_repr(a)\n self.assertEqual(\"({})\", r)\n\n def test_single_property(self):\n a = Node(name=\"Alice\")\n r = cypher_repr(a)\n self.assertEqual(\"({name: 'Alice'})\", r)\n\n def test_property_and_label(self):\n a = Node(\"Person\", name=\"Alice\")\n r = cypher_repr(a)\n self.assertEqual(\"(:Person {name: 'Alice'})\", r)\n\n def test_date_property(self):\n a = Node(d=Date(1970, 1, 1))\n r = cypher_repr(a)\n self.assertEqual(\"({d: date('1970-01-01')})\", r)\n\n def test_time_property(self):\n a = Node(t=Time(12, 34, 56))\n r = cypher_repr(a)\n self.assertEqual(\"({t: time('12:34:56.000000000')})\", r)\n\n def test_datetime_property(self):\n a = Node(dt=DateTime(1970, 1, 1, 12, 34, 56))\n r = cypher_repr(a)\n self.assertEqual(\"({dt: datetime('1970-01-01T12:34:56.000000000')})\", r)\n\n def test_duration_property(self):\n a = Node(dur=Duration(days=3))\n r = cypher_repr(a)\n self.assertEqual(\"({dur: duration('P3D')})\", r)\n\n\nclass CypherEscapeTestCase(TestCase):\n\n def test_empty_string(self):\n value = \"\"\n with self.assertRaises(ValueError):\n _ = cypher_escape(value)\n\n def test_simple_string(self):\n value = \"foo\"\n escaped = \"foo\"\n self.assertEqual(escaped, cypher_escape(value))\n\n def test_string_with_space(self):\n value = \"foo bar\"\n escaped = \"`foo bar`\"\n self.assertEqual(escaped, cypher_escape(value))\n\n def test_string_with_backtick(self):\n value = \"foo `bar`\"\n escaped = \"`foo ``bar```\"\n self.assertEqual(escaped, cypher_escape(value))\n"},"path":{"kind":"string","value":"test/unit/test_cypher_encoding.py"},"size":{"kind":"number","value":5974,"string":"5,974"},"nl_text":{"kind":"string","value":"!/usr/bin/env python -*- encoding: utf-8 -*- Copyright 2011-2019, Nigel Small Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License."},"nl_size":{"kind":"number","value":599,"string":"599"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8427037596702576,"string":"0.842704"}}},{"rowIdx":579,"cells":{"content":{"kind":"string","value":"# Written by David Weber\r\n# dsw7@sfu.ca\r\n\r\n\"\"\"\r\nIn this short namespace I house a class that connects to PDB and downloads\r\nfile over PDB file transfer protocol.\r\n\"\"\"\r\n\r\n# ------------------------------------------------------------------------------\r\n\r\nimport gzip\r\nfrom os import remove, getcwd, path # built in\r\n\r\n# my pymol API built on Python2 - try both imports\r\ntry:\r\n from urllib.request import urlretrieve, urlcleanup\r\nexcept ImportError:\r\n from urllib import urlretrieve, urlcleanup \r\n\r\n\r\nROOT = 'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{}/{}'\r\n\r\nclass PDBFile:\r\n def __init__(self, code):\r\n \"\"\"Initialize a PDBFile object with a pdb file of interest\r\n\r\n Parameters\r\n ----------\r\n code : the pdb code if interest\r\n Any valid PDB code can be passed into PDBFile.\r\n\r\n Examples\r\n --------\r\n >>> pdb_file = PDBFile('1rcy') \r\n \r\n \"\"\"\r\n self.code = code.lower()\r\n \r\n\r\n def fetch_from_PDB(self):\r\n \"\"\"\r\n Connects to PDB FTP server, downloads a .gz file of interest,\r\n decompresses the .gz file into .ent and then dumps a copy of\r\n the pdb{code}.ent file into cwd.\r\n\r\n Parameters\r\n ----------\r\n None\r\n\r\n Examples\r\n --------\r\n \r\n >>> inst = PDBFile('1rcy')\r\n >>> path_to_file = inst.fetch_from_PDB()\r\n >>> print(path_to_file)\r\n \r\n \"\"\" \r\n \r\n subdir = self.code[1:3]\r\n infile = 'pdb{}.ent.gz'.format(self.code)\r\n decompressed = infile.strip('.gz')\r\n fullpath = ROOT.format(subdir, infile)\r\n \r\n try:\r\n urlcleanup()\r\n urlretrieve(fullpath, infile)\r\n except Exception:\r\n return 'URLError'\r\n else:\r\n with gzip.open(infile, 'rb') as gz:\r\n with open(decompressed, 'wb') as out:\r\n out.writelines(gz)\r\n remove(infile)\r\n return path.join(getcwd(), decompressed)\r\n \r\n def clear(self):\r\n \"\"\"\r\n Deletes file from current working directory after the file has\r\n been processed by some algorithm.\r\n\r\n Parameters\r\n ----------\r\n None\r\n\r\n Examples\r\n --------\r\n >>> inst = PDBFile('1rcy')\r\n >>> path_to_file = inst.fetch_from_PDB()\r\n >>> print(path_to_file) # process the file using some algorithm\r\n >>> inst.clear()\r\n \r\n \"\"\" \r\n filename = 'pdb{}.ent'.format(self.code)\r\n try:\r\n remove(path.join(getcwd(), filename))\r\n except FileNotFoundError:\r\n print('Cannot delete file. Does not exist.')\r\n \r\n "},"path":{"kind":"string","value":"scalene-triangle/libs/PDB_filegetter.py"},"size":{"kind":"number","value":2794,"string":"2,794"},"nl_text":{"kind":"string","value":"Initialize a PDBFile object with a pdb file of interest\n\nParameters\n----------\ncode : the pdb code if interest\n Any valid PDB code can be passed into PDBFile.\n\nExamples\n--------\n>>> pdb_file = PDBFile('1rcy') \nDeletes file from current working directory after the file has\nbeen processed by some algorithm.\n\nParameters\n----------\nNone\n\nExamples\n--------\n>>> inst = PDBFile('1rcy')\n>>> path_to_file = inst.fetch_from_PDB()\n>>> print(path_to_file) # process the file using some algorithm\n>>> inst.clear()\nConnects to PDB FTP server, downloads a .gz file of interest,\ndecompresses the .gz file into .ent and then dumps a copy of\nthe pdb{code}.ent file into cwd.\n\nParameters\n----------\nNone\n\nExamples\n--------\n\n>>> inst = PDBFile('1rcy')\n>>> path_to_file = inst.fetch_from_PDB()\n>>> print(path_to_file)\nIn this short namespace I house a class that connects to PDB and downloads\nfile over PDB file transfer protocol.\n\n Written by David Weber dsw7@sfu.ca ------------------------------------------------------------------------------ built in my pymol API built on Python2 - try both imports"},"nl_size":{"kind":"number","value":1090,"string":"1,090"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7387703657150269,"string":"0.73877"}}},{"rowIdx":580,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#Chucky_Bot\n\nimport LINETCR\nfrom LINETCR.lib.curve.ttypes import *\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom threading import Thread\nfrom googletrans import Translator\nfrom gtts import gTTS\nimport time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile\n\ncl = LINETCR.LINE()\n#cl.login(qr=True)\ncl.login(token='EsOvPPzeFykCVG8OoGf0.hE4TS1Hheb46PcdMzZKaaa.rzBOrFqSAApZownyv2qBJWU3PWWbf9/oE6G+sSVzUTo=')\ncl.loginResult()\nprint \"Azmi 1-Login Success\\n\"\n\nki = LINETCR.LINE()\n#ki.login(qr=True)\nki.login(token='EsTdk3fyUSbT7LJVwoEd.rLylacrPH39WJb0UIwB8Nq.GYYzsgzj7aHd7mzCSluc3162Uqrry6Jjwf/bFuq9Etw=')\nki.loginResult()\nprint \"Ki-Login Success\\n\"\n\nkk = LINETCR.LINE()\n#kk.login(qr=True)\nkk.login(token='EsNKJDaP0J7Pt7syTOW9.GgPTp3/FisKkVX1rJHeroq.hUG0VDbWHz8R7o80xI0Pvme8dBb3dSsmCnat0PRX+JM=')\nkk.loginResult()\nprint \"Kk-Login Success\\n\"\n\n#kc = LINETCR.LINE()\n#kc.login(qr=True)\n#kc.login(token='TOKEN_KAMU_DISINI_BEIB')\n#kc.loginResult()\n#print \"Kc-Login Success\\n\"\n\n#kr = LINETCR.LINE()\n#kr.login(qr=True)\n#kr.login(token='TOKEN_KAMU_DISINI_BEIB')\n#kr.loginResult()\n#print \"Kr-Login Success\\n\"\n\n#km = LINETCR.LINE()\n#km.login(qr=True)\n#km.login(token='TOKEN_KAMU_DISINI_BEIB')\n#km.loginResult()\nprint \"Km-Login Success\\n\\n=====[Sukses All Login]=====\"\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nselfMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ S E L F ☜☆\n╠═════════════════════════\n╠➩〘Hi〙\n╠➩〘Me〙\n╠➩〘Mymid〙\n╠➩〘Mid @〙\n╠➩〘SearchID: (ID LINE)〙\n╠➩〘Checkdate (DD/MM/YY)〙\n╠➩〘Kalender〙\n╠➩〘Steal contact〙\n╠➩〘Pp @〙\n╠➩〘Cover @〙\n╠➩〘Auto like〙\n╠➩〘Scbc Text〙\n╠➩〘Cbc Text〙\n╠➩〘Gbc Text〙\n╠➩〘Getbio @〙\n╠➩〘Getinfo @〙\n╠➩〘Getname @〙\n╠➩〘Getprofile @〙\n╠➩〘Getcontact @〙\n╠➩〘Getvid @〙\n╠➩〘Friendlist〙\n╠═════════════════════════\n║ ☆☞ S E L F ☜☆\n╚═════════════════════════\n\"\"\"\n\nbotMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ B O T ☜☆\n╠═════════════════════════\n╠➩〘Absen〙\n╠➩〘Respon〙\n╠➩〘Runtime〙\n╠➩〘Kapten copy @〙\n╠➩〘TC1 copy @〙\n╠➩〘TC2 copy @〙\n╠➩〘TC3 copy @〙\n╠➩〘TC4 copy @〙\n╠➩〘Backup all〙\n╠➩〘/bio Text〙\n╠➩〘@bye (Usir Kapten)〙\n╠➩〘Bye all (Usir Semua)〙\n╠═════════════════════════\n║ ☆☞ B O T ☜☆\n╚═════════════════════════\n\"\"\"\n\nmediaMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ M E D I A ☜☆\n╠═════════════════════════\n╠➩〘Gift〙\n╠➩〘Gift1 @ s/d Gift10 @〙\n╠➩〘Giftbycontact〙\n╠➩〘All gift〙\n╠➩〘Gif gore〙\n╠➩〘Google: (Text)〙\n╠➩〘Playstore NamaApp〙\n╠➩〘Fancytext: Text〙\n╠➩〘/musik Judul-Penyanyi〙\n╠➩〘/lirik Judul-Penyanyi〙\n╠➩〘/musrik Judul-Penyanyi〙\n╠➩〘/ig UrsnameInstagram〙\n╠➩〘Checkig UrsnameInstagram〙\n╠➩〘/apakah Text (Kerang Ajaib)〙\n╠➩〘/kapan Text (Kerang Ajaib)〙\n╠➩〘/hari Text (Kerang Ajaib)〙\n╠➩〘/berapa Text (Kerang Ajaib)〙\n╠➩〘/berapakah Text〙\n╠➩〘Youtubelink: Judul Video〙\n╠➩〘Youtubevideo: Judul Video〙\n╠➩〘Youtubesearch: Judul Video〙\n╠➩〘Image NamaGambar〙\n╠➩〘Say-id Text〙\n╠➩〘Say-en Text〙\n╠➩〘Say-jp Text〙\n╠➩〘Image NamaGambar〙\n╠➩〘Tr-id Text (Translate En Ke ID〙\n╠➩〘Tr-en Text (Translate ID Ke En〙\n╠➩〘Tr-th Text (Translate ID Ke Th〙\n╠➩〘Id@en Text (Translate ID Ke En〙\n╠➩〘Id@th Text (Translate ID Ke TH〙\n╠➩〘En@id Text (Translate En Ke ID〙\n╠═════════════════════════\n║ ☆☞ M E D I A ☜☆\n╚═════════════════════════\n\"\"\"\n\ngroupMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ G R O U P ☜☆\n╠═════════════════════════\n╠➩〘Welcome〙\n╠➩〘Say welcome〙\n╠➩〘Invite creator〙\n╠➩〘Setview〙\n╠➩〘Viewseen〙\n╠➩〘Gn: (NamaGroup)〙\n╠➩〘Tag all〙\n╠➩〘Recover〙\n╠➩〘Cancel〙\n╠➩〘Cancelall〙\n╠➩〘Gcreator〙\n╠➩〘Ginfo〙\n╠➩〘Gurl〙\n╠➩〘List group〙\n╠➩〘Pict group: (NamaGroup)〙\n╠➩〘Spam: (Text)〙\n╠➩〘Spam〙\n╠➩〘Add all〙\n╠➩〘Kick: (Mid)〙\n╠➩〘Invite: (Mid)〙\n╠➩〘Invite〙\n╠➩〘Memlist〙\n╠➩〘Getgroup image〙\n╠➩〘Urlgroup Image〙\n╠═════════════════════════\n║ ☆☞ G R O U P ☜☆\n╚═════════════════════════\n\"\"\"\ntjia=\"u71b6799e1c37868a871d442e67633182\"\n\nsetMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ S E T ☜☆\n╠═════════════════════════\n╠➩〘Sambutan on/off〙\n╠➩〘Url on/off〙\n╠➩〘Alwaysread on/off〙\n╠➩〘Sider on/off〙\n╠➩〘Contact on/off〙\n╠➩〘Simisimi on/off〙\n╠═════════════════════════\n║ ☆☞ S E T ☜☆\n╚═════════════════════════\n\"\"\"\n\ncreatorMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ C R E A T O R ☜☆\n╠═════════════════════════\n╠➩〘Admin add @〙\n╠➩〘Admin remove @〙\n╠➩〘/cnkapten〙\n╠➩〘/cntc1〙\n╠➩〘/cntc2〙\n╠➩〘/cntc3〙\n╠➩〘/cntc4〙\n╠➩〘Crash〙\n╠➩〘Kickall〙\n╠➩〘Bc: (Text)〙\n╠➩〘Nk: @〙\n╠➩〘Ulti @〙\n╠➩〘Join group: (NamaGroup〙\n╠➩〘Leave group: (NamaGroup〙\n╠➩〘Leave all group〙\n╠➩〘Bot restart〙\n╠➩〘Turn off〙\n╠═════════════════════════\n║ ☆☞ C R E A T O R ☜☆\n╚═════════════════════════\n\"\"\"\n\nadminMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ A D M I N ☜☆\n╠═════════════════════════\n╠➩〘Admin list〙\n╠➩〘Ban〙\n╠➩〘Unban〙\n╠➩〘Ban @〙\n╠➩〘Unban @〙\n╠➩〘Ban list〙\n╠➩〘Clear ban〙\n╠➩〘Kill〙\n╠➩〘Kick @〙\n╠➩〘Set member: (Jumblah)〙\n╠➩〘Ban group: (NamaGroup〙\n╠➩〘Del ban: (NamaGroup〙\n╠➩〘List ban〙\n╠➩〘Kill ban〙\n╠➩〘Glist〙\n╠➩〘Glistmid〙\n╠➩〘Details group: (Gid)〙\n╠➩〘Cancel invite: (Gid)〙\n╠➩〘Invitemeto: (Gid)〙\n╠➩〘Kapten acc invite〙\n╠➩〘TC1 acc invite〙\n╠➩〘TC2 acc invite〙\n╠➩〘TC3 acc invite〙\n╠➩〘TC4 acc invite〙\n╠➩〘Removechat〙\n╠➩〘Join on/off〙\n╠➩〘Joincancel on/off〙\n╠➩〘Respon on/off〙\n╠➩〘Responkick on/off〙\n╠➩〘Leave on/off〙\n╠➩〘All join / (TC1/2/3/4 Join)〙\n╠═════════════════════════\n║ ☆☞ A D M I N ☜☆\n╚═════════════════════════\n\"\"\"\n\nhelpMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ H E L P ☜☆\n╠═════════════════════════\n╠➩〘Help protect〙\n╠➩〘Help self〙\n╠➩〘Help bot〙\n╠➩〘Help group〙\n╠➩〘Help set〙\n╠➩〘Help media〙\n╠➩〘Help admin〙\n╠➩〘Help creator〙\n╠➩〘Owner〙\n╠➩〘Pap owner〙\n╠➩〘Admin〙\n╠➩〘Speed〙\n╠➩〘Speed test〙\n╠➩〘Status〙\n╠═════════════════════════\n║ ☆☞ H E L P ☜☆\n╚═════════════════════════\n\"\"\"\n\nprotectMessage =\"\"\"\n╔═════════════════════════\n║ ☆☞ P R O T E C T ☜☆\n╠═════════════════════════\n╠➩〘Allprotect on/off〙\n╠➩〘Autocancel on/off〙\n╠➩〘Qr on/off〙\n╠➩〘Autokick on/off〙\n╠➩〘Ghost on/off〙\n╠➩〘Invitepro on/off〙\n╠═════════════════════════\n║ ☆☞ P R O T E C T ☜☆\n╚═════════════════════════\n\"\"\"\n\n\nKAC=[cl,ki,kk]\nmid = cl.getProfile().mid\nAmid = ki.getProfile().mid\nBmid = kk.getProfile().mid\nBots=[mid,Amid,Bmid]\nCreator=[\"u71b6799e1c37868a871d442e67633182\"]\nadmin=[\"u71b6799e1c37868a871d442e67633182\"]\n\ncontact = cl.getProfile()\nbackup1 = cl.getProfile()\nbackup1.displayName = contact.displayName\nbackup1.statusMessage = contact.statusMessage \nbackup1.pictureStatus = contact.pictureStatus\n\ncontact = ki.getProfile()\nbackup2 = ki.getProfile()\nbackup2.displayName = contact.displayName\nbackup2.statusMessage = contact.statusMessage \nbackup2.pictureStatus = contact.pictureStatus\n\ncontact = kk.getProfile()\nbackup3 = kk.getProfile()\nbackup3.displayName = contact.displayName\nbackup3.statusMessage = contact.statusMessage \nbackup3.pictureStatus = contact.pictureStatus\n\n\n\n\n\nresponsename = cl.getProfile().displayName\nresponsename2 = ki.getProfile().displayName\nresponsename3 = kk.getProfile().displayName\n\n\nwait = {\n \"LeaveRoom\":True,\n \"AutoJoin\":False,\n \"AutoJoinCancel\":True,\n \"memberscancel\":0,\n \"Members\":1,\n \"AutoCancel\":{},\n \"AutoCancelon\":False, \n \"joinkick\":False,\n \"AutoKick\":{},\n \"AutoKickon\":False,\n 'pap':{},\n 'invite':{},\n 'steal':{},\n 'gift':{},\n 'likeOn':{},\n 'Leave':{}, \n 'detectMention':True,\n 'kickMention':False, \n 'timeline':True,\n \"Timeline\":True,\n \"comment1\":\"Kenapa Kak?\",\n \"comment2\":\"Wkwkwk \(○^ω^○)/\",\n \"comment3\":\"Lucu Banget!!! ヘ(^_^)ヘ\",\n \"comment4\":\"Nice Kak (^_^)\",\n \"comment5\":\"Bot Auto Like ©By : Azmi\\nContact Me : 👉 line.me/ti/p/~a_ulul15\", \n \"commentOn\":True,\n \"commentBlack\":{},\n \"message\":\"Thx For Add Me (^_^)\\nInvite Me To Your Group ヘ(^_^)ヘ\", \n \"blacklist\":{},\n \"wblacklist\":False,\n \"dblacklist\":False,\n \"Qr\":{},\n \"Qron\":False,\n \"Contact\":False,\n \"Sambutan\":True,\n \"Ghost\":False,\n \"inviteprotect\":False, \n \"alwaysRead\":False, \n \"Sider\":{},\n \"Simi\":{}, \n \"lang\":\"JP\",\n \"BlGroup\":{}\n}\n\nsettings = {\n \"simiSimi\":{}\n }\n \ncctv = {\n \"cyduk\":{},\n \"point\":{},\n \"sidermem\":{}\n} \n\nwait2 = {\n \"readPoint\":{},\n \"readMember\":{},\n \"setTime\":{},\n \"ROM\":{}\n }\n\nsetTime = {}\nsetTime = wait2['setTime']\nmulai = time.time() \n\ndef download_page(url):\n version = (3,0)\n cur_version = sys.version_info\n if cur_version >= version: \n import urllib,request \n try:\n headers = {}\n headers['User-Agent'] = \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\"\n req = urllib,request.Request(url, headers = headers)\n resp = urllib,request.urlopen(req)\n respData = str(resp.read())\n return respData\n except Exception as e:\n print(str(e))\n else: \n import urllib2\n try:\n headers = {}\n headers['User-Agent'] = \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17\"\n req = urllib2.Request(url, headers = headers)\n response = urllib2.urlopen(req)\n page = response.read()\n return page\n except:\n return\"Page Not found\"\n\n\ndef _images_get_next_item(s):\n start_line = s.find('rg_di')\n if start_line == -1: \n end_quote = 0\n link = \"no_links\"\n return link, end_quote\n else:\n start_line = s.find('\"class=\"rg_meta\"')\n start_content = s.find('\"ou\"',start_line+90)\n end_content = s.find(',\"ow\"',start_content-90)\n content_raw = str(s[start_content+6:end_content-1])\n return content_raw, end_content\n\n\ndef _images_get_all_items(page):\n items = []\n while True:\n item, end_content = _images_get_next_item(page)\n if item == \"no_links\":\n break\n else:\n items.append(item) \n time.sleep(0.1) \n page = page[end_content:]\n return items\n \ndef waktu(secs):\n mins, secs = divmod(secs,60)\n hours, mins = divmod(mins,60)\n return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs) \n \ndef cms(string, commands):# /XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...\n tex = [\"+\",\"@\",\"/\",\">\",\";\",\"^\",\"%\",\"$\",\"^\",\"サテラ:\",\"サテラ:\",\"サテラ:\",\"サテラ:\"]\n for texX in tex:\n for command in commands:\n if string ==command:\n return True\n return False \n\ndef upload_tempimage(client):\n '''\n Upload a picture of a kitten. We don't ship one, so get creative!\n '''\n config = {\n 'album': album,\n 'name': 'bot auto upload',\n 'title': 'bot auto upload',\n 'description': 'bot auto upload'\n }\n\n print(\"Uploading image... \")\n image = client.upload_from_path(image_path, config=config, anon=False)\n print(\"Done\")\n print()\n\n return image\n \ndef sendAudio(self, to_, path):\n M = Message()\n M.text = None\n M.to = to_\n M.contentMetadata = None\n M.contentPreview = None\n M.contentType = 3\n M_id = self._client.sendMessage(0,M).id\n files = {\n 'file': open(path, 'rb'),\n }\n \ndef sendMessage(to, text, contentMetadata={}, contentType=0):\n mes = Message()\n mes.to, mes.from_ = to, profile.mid\n mes.text = text\n mes.contentType, mes.contentMetadata = contentType, contentMetadata\n if to not in messageReq:\n messageReq[to] = -1\n messageReq[to] += 1\n \ndef sendImage(self, to_, path):\n M = Message(to=to_, text=None, contentType = 1)\n M.contentMetadata = None\n M.contentPreview = None\n M2 = self._client.sendMessage(0,M)\n M_id = M2.id\n files = {\n 'file': open(path, 'rb'),\n }\n params = {\n 'name': 'media',\n 'oid': M_id,\n 'size': len(open(path, 'rb').read()),\n 'type': 'image',\n 'ver': '1.0',\n }\n data = {\n 'params': json.dumps(params)\n }\n r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)\n if r.status_code != 201:\n raise Exception('Upload image failure.')\n return True\n\n\ndef sendImageWithURL(self, to_, url):\n path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'w') as f:\n shutil.copyfileobj(r.raw, f)\n else:\n raise Exception('Download image failure.')\n try:\n self.sendImage(to_, path)\n except:\n try:\n self.sendImage(to_, path)\n except Exception as e:\n raise e\n\ndef sendAudio(self, to_, path):\n M = Message()\n M.text = None\n M.to = to_\n M.contentMetadata = None\n M.contentPreview = None\n M.contentType = 3\n M_id = self._client.sendMessage(0,M).id\n files = {\n 'file': open(path, 'rb'),\n }\n params = {\n 'name': 'media',\n 'oid': M_id,\n 'size': len(open(path, 'rb').read()),\n 'type': 'audio',\n 'ver': '1.0',\n }\n data = {\n 'params': json.dumps(params)\n }\n r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)\n if r.status_code != 201:\n raise Exception('Upload audio failure.')\n return True\n\ndef sendAudioWithURL(self, to_, url):\n path = self.downloadFileWithURL(url)\n try:\n self.sendAudio(to_, path)\n except Exception as e:\n raise Exception(e)\n\ndef sendAudioWithUrl(self, to_, url):\n path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))\n r = requests.get(url, stream=True, verify=False)\n if r.status_code == 200:\n with open(path, 'w') as f:\n shutil.copyfileobj(r.raw, f)\n else:\n raise Exception('Download audio failure.')\n try:\n self.sendAudio(to_, path)\n except Exception as e:\n raise e\n \ndef downloadFileWithURL(self, fileUrl):\n saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))\n r = self.get_content(fileUrl)\n if r.status_code == 200:\n with open(saveAs, 'wb') as f:\n shutil.copyfileobj(r.raw, f)\n return saveAs\n else:\n raise Exception('Download file failure.')\n\ndef restart_program():\n python = sys.executable\n os.execl(python, python, * sys.argv)\n\n\ndef bot(op):\n try:\n\n if op.type == 0:\n return\n\n if op.type == 5:\n if wait[\"autoAdd\"] == True:\n cl.findAndAddContactsByMid(op.param1)\n if(wait[\"message\"]in[\"\",\" \",\"\\n\",None]):\n pass\n else:\n cl.sendText(op.param1,str(wait[\"message\"]))\n\n\n if op.type == 55:\n\t try:\n\t group_id = op.param1\n\t user_id=op.param2\n\t subprocess.Popen('echo \"'+ user_id+'|'+str(op.createdTime)+'\" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )\n\t except Exception as e:\n\t print e\n\t \n if op.type == 55:\n try:\n if cctv['cyduk'][op.param1]==True:\n if op.param1 in cctv['point']:\n Name = cl.getContact(op.param2).displayName\n Name = ki.getContact(op.param2).displayName\n Name = kk.getContact(op.param2).displayName\n Name = kc.getContact(op.param2).displayName\n Name = kr.getContact(op.param2).displayName\n if Name in cctv['sidermem'][op.param1]:\n pass\n else:\n cctv['sidermem'][op.param1] += \"\\n• \" + Name\n if \" \" in Name:\n nick = Name.split(' ')\n if len(nick) == 2:\n random.choice(KAC).sendText(op.param1, \"Haii \" + \"☞ \" + nick[0] + \" ☜\" + \"\\nNgintip Aja Niih. . .\\nChat Kek Idiih (-__-) \")\n else:\n random.choice(KAC).sendText(op.param1, \"Haii \" + \"☞ \" + nick[1] + \" ☜\" + \"\\nBetah Banget Jadi Penonton. . .\\nChat Napa (-__-) \")\n else:\n random.choice(KAC).sendText(op.param1, \"Haii \" + \"☞ \" + Name + \" ☜\" + \"\\nNgapain Kak Ngintip Aja???\\nSini Gabung Chat... \")\n else:\n pass\n else:\n pass\n except:\n pass\n\n else:\n pass \t \n\t \n\n if op.type == 22:\n cl.leaveRoom(op.param1)\n\n if op.type == 21:\n cl.leaveRoom(op.param1)\n\n\n if op.type == 13:\n\t print op.param3\n if op.param3 in mid:\n\t\tif op.param2 in Creator:\n\t\t cl.acceptGroupInvitation(op.param1)\n if op.param3 in Amid:\n\t\tif op.param2 in Creator:\n\t\t ki.acceptGroupInvitation(op.param1)\n if op.param3 in Bmid:\n\t\tif op.param2 in Creator:\n\t\t kk.acceptGroupInvitation(op.param1)\n if op.param3 in Cmid:\n\t\tif op.param2 in Creator:\n\t\t kc.acceptGroupInvitation(op.param1)\n if op.param3 in Dmid:\n\t\tif op.param2 in Creator:\n\t\t kr.acceptGroupInvitation(op.param1)\n \n if op.param3 in mid:\n\t\tif op.param2 in Amid:\n\t\t cl.acceptGroupInvitation(op.param1)\n if op.param3 in mid:\n\t\tif op.param2 in Bmid:\n\t\t cl.acceptGroupInvitation(op.param1)\n if op.param3 in mid:\n\t\tif op.param2 in Cmid:\n\t\t cl.acceptGroupInvitation(op.param1)\n \n if op.param3 in Amid:\n\t\tif op.param2 in mid:\n\t\t ki.acceptGroupInvitation(op.param1)\n if op.param3 in Amid:\n\t\tif op.param2 in Bmid:\n\t\t ki.acceptGroupInvitation(op.param1)\n if op.param3 in Amid:\n\t\tif op.param2 in Cmid:\n\t\t ki.acceptGroupInvitation(op.param1)\n \n if op.param3 in Bmid:\n\t\tif op.param2 in mid:\n\t\t kk.acceptGroupInvitation(op.param1)\n if op.param3 in Bmid:\n\t\tif op.param2 in Amid:\n\t\t kk.acceptGroupInvitation(op.param1)\n if op.param3 in Bmid:\n\t\tif op.param2 in Cmid:\n\t\t kk.acceptGroupInvitation(op.param1)\n \n if op.param3 in Cmid:\n\t\tif op.param2 in mid:\n\t\t kc.acceptGroupInvitation(op.param1)\n if op.param3 in Cmid:\n\t\tif op.param2 in Amid:\n\t\t kc.acceptGroupInvitation(op.param1)\n if op.param3 in Cmid:\n\t\tif op.param2 in Cmid:\n\t\t kc.acceptGroupInvitation(op.param1)\n \n if op.param3 in Dmid:\n\t\tif op.param2 in mid:\n\t\t kr.acceptGroupInvitation(op.param1)\n if op.param3 in Dmid:\n\t\tif op.param2 in Amid:\n\t\t kr.acceptGroupInvitation(op.param1)\n if op.param3 in Dmid:\n\t\tif op.param2 in Bmid:\n\t\t kr.acceptGroupInvitation(op.param1)\n\t\t \n\t if mid in op.param3:\t \n if wait[\"AutoJoinCancel\"] == True:\n\t\t G = cl.getGroup(op.param1)\n if len(G.members) <= wait[\"memberscancel\"]:\n cl.acceptGroupInvitation(op.param1)\n cl.sendText(op.param1,\"Maaf \" + cl.getContact(op.param2).displayName + \"\\nMember Kurang Dari 30 Orang\\nUntuk Info, Silahkan Chat Owner Kami!\")\n c = Message(to=op.param1, from_=None, text=None, contentType=13)\n c.contentMetadata={'mid':tjia}\n cl.sendMessage(c) \n cl.leaveGroup(op.param1) \n\t\t else:\n cl.acceptGroupInvitation(op.param1)\n\t\t\tG = cl.getGroup(op.param1)\n\t\t\tG.preventJoinByTicket = False\n\t\t\tcl.updateGroup(G)\n\t\t\tTi = cl.reissueGroupTicket(op.param1)\n\t\t\tki.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkk.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkc.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkr.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tG.preventJoinByTicket = True\n\t\t\tcl.updateGroup(G)\n\t\t\tcl.sendText(op.param1,\"☆Ketik ☞Help☜ Untuk Bantuan☆\\n☆Harap Gunakan Dengan Bijak ^_^ ☆\")\n \t\t \n \n\t if mid in op.param3:\n if wait[\"AutoJoin\"] == True:\n\t\t G = cl.getGroup(op.param1)\n if len(G.members) <= wait[\"Members\"]:\n cl.rejectGroupInvitation(op.param1)\n\t\t else:\n cl.acceptGroupInvitation(op.param1)\n\t\t\tG = cl.getGroup(op.param1)\n\t\t\tG.preventJoinByTicket = False\n\t\t\tcl.updateGroup(G)\n\t\t\tTi = cl.reissueGroupTicket(op.param1)\n\t\t\tki.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkk.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkc.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tkr.acceptGroupInvitationByTicket(op.param1,Ti)\n\t\t\tG.preventJoinByTicket = True\n\t\t\tcl.updateGroup(G)\n\t\t\tcl.sendText(op.param1,\"☆Ketik ☞Help☜ Untuk Bantuan☆\\n☆Harap Gunakan Dengan Bijak ^_^ ☆\")\n\t else:\n if wait[\"AutoCancel\"][op.param1] == True:\n\t\t if op.param3 in admin:\n\t\t\tpass\n\t\t else:\n cl.cancelGroupInvitation(op.param1, [op.param3])\n\t\telse:\n\t\t if op.param3 in wait[\"blacklist\"]:\n\t\t\tcl.cancelGroupInvitation(op.param1, [op.param3])\n\t\t\tcl.sendText(op.param1, \"Blacklist Detected\")\n\t\t else:\n\t\t\tpass\n\n\n if op.type == 19:\n\t\tif wait[\"AutoKick\"][op.param1] == True:\n\t\t try:\n\t\t\tif op.param3 in Creator:\n\t\t\t if op.param3 in admin:\n\t\t\t if op.param3 in Bots:\n\t\t\t pass\n\t\t if op.param2 in Creator:\n\t\t if op.param2 in admin:\n\t\t if op.param2 in Bots:\n\t\t pass\n\t\t else:\n\t\t random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\t\t if op.param2 in wait[\"blacklist\"]:\n\t\t pass\n\t\t else:\n\t\t\t random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])\n\t\t except:\n\t\t try:\n\t\t\t if op.param2 not in Creator:\n\t\t\t if op.param2 not in admin:\n\t\t\t if op.param2 not in Bots:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\t\t\t if op.param2 in wait[\"blacklist\"]:\n\t\t\t pass\n\t\t\t else:\n\t\t\t random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])\n\t\t except:\n\t\t\t print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Creator:\n\t\t\t if op.param2 in admin:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\t\t if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Creator:\n\t\t if op.param2 in admin:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\t\telse:\n\t\t pass\n\t\t\n\n\n if mid in op.param3:\n if op.param2 in Creator:\n if op.param2 in Bots:\n pass\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\t\t\trandom.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n\t\t\t random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n G = ki.getGroup(op.param1)\n G.preventJoinByTicket = False\n ki.updateGroup(G)\n Ti = ki.reissueGroupTicket(op.param1)\n cl.acceptGroupInvitationByTicket(op.param1,Ti)\n ki.acceptGroupInvitationByTicket(op.param1,Ti)\n kk.acceptGroupInvitationByTicket(op.param1,Ti)\n kc.acceptGroupInvitationByTicket(op.param1,Ti)\n kr.acceptGroupInvitationByTicket(op.param1,Ti)\n X = cl.getGroup(op.param1)\n X.preventJoinByTicket = True\n cl.updateGroup(X)\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n if Amid in op.param3:\n if op.param2 in Bots:\n pass\n try:\n kk.kickoutFromGroup(op.param1,[op.param2])\n kc.kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n X = kk.getGroup(op.param1)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n Ti = kk.reissueGroupTicket(op.param1)\n cl.acceptGroupInvitationByTicket(op.param1,Ti)\n ki.acceptGroupInvitationByTicket(op.param1,Ti)\n kk.acceptGroupInvitationByTicket(op.param1,Ti)\n kr.acceptGroupInvitationByTicket(op.param1,Ti)\n G = ki.getGroup(op.param1)\n G.preventJoinByTicket = True\n ki.updateGroup(G)\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n if Bmid in op.param3:\n if op.param2 in Bots:\n pass\n try:\n kc.kickoutFromGroup(op.param1,[op.param2])\n kk.kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n X = kc.getGroup(op.param1)\n X.preventJoinByTicket = False\n kc.updateGroup(X)\n Ti = kc.reissueGroupTicket(op.param1)\n cl.acceptGroupInvitationByTicket(op.param1,Ti)\n ki.acceptGroupInvitationByTicket(op.param1,Ti)\n kk.acceptGroupInvitationByTicket(op.param1,Ti)\n kc.acceptGroupInvitationByTicket(op.param1,Ti)\n kr.acceptGroupInvitationByTicket(op.param1,Ti)\n G = kk.getGroup(op.param1)\n G.preventJoinByTicket = True\n kk.updateGroup(G)\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n if Cmid in op.param3:\n if op.param2 in Bots:\n pass\n try:\n cl.kickoutFromGroup(op.param1,[op.param2])\n kk.kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n X = cl.getGroup(op.param1)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n Ti = cl.reissueGroupTicket(op.param1)\n cl.acceptGroupInvitationByTicket(op.param1,Ti)\n ki.acceptGroupInvitationByTicket(op.param1,Ti)\n kk.acceptGroupInvitationByTicket(op.param1,Ti)\n kc.acceptGroupInvitationByTicket(op.param1,Ti)\n kr.acceptGroupInvitationByTicket(op.param1,Ti)\n G = kc.getGroup(op.param1)\n G.preventJoinByTicket = True\n kc.updateGroup(G)\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True\n \n if Dmid in op.param3:\n if op.param2 in Bots:\n pass\n try:\n cl.kickoutFromGroup(op.param1,[op.param2])\n kk.kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t\t else:\n wait[\"blacklist\"][op.param2] = True\n\n X = cl.getGroup(op.param1)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n Ti = cl.reissueGroupTicket(op.param1)\n cl.acceptGroupInvitationByTicket(op.param1,Ti)\n ki.acceptGroupInvitationByTicket(op.param1,Ti)\n kk.acceptGroupInvitationByTicket(op.param1,Ti)\n kc.acceptGroupInvitationByTicket(op.param1,Ti)\n kr.acceptGroupInvitationByTicket(op.param1,Ti)\n G = kc.getGroup(op.param1)\n G.preventJoinByTicket = True\n kc.updateGroup(G)\n if op.param2 in wait[\"blacklist\"]:\n pass\n else:\n\t\t if op.param2 in Bots:\n\t\t\t pass\n\t\t else:\n wait[\"blacklist\"][op.param2] = True \n \n if Creator in op.param3:\n if admin in op.param3:\n if op.param2 in Bots:\n pass\n try:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\t\t\trandom.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n except:\n try:\n\t\t\t if op.param2 not in Bots:\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\t\t\t if op.param2 in wait[\"blacklist\"]:\n\t\t\t pass\n\t\t\t else:\n\t\t\t random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])\n except:\n print (\"client Kick regulation or Because it does not exist in the group\\ngid=[\"+op.param1+\"]\\nmid=[\"+op.param2+\"]\")\n if op.param2 in wait[\"blacklist\"]:\n pass\n if op.param2 in wait[\"whitelist\"]:\n pass\n else:\n wait[\"blacklist\"][op.param2] = True\n random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])\n if op.param2 in wait[\"blacklist\"]:\n pass\n if op.param2 in wait[\"whitelist\"]:\n pass\n else:\n wait[\"blacklist\"][op.param2] = True\n\n\n if op.type == 11:\n if wait[\"Qr\"][op.param1] == True:\n if op.param2 not in Bots:\n if op.param2 not in admin:\n G = random.choice(KAC).getGroup(op.param1)\n G.preventJoinByTicket = True\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n random.choice(KAC).updateGroup(G)\n\n\n if op.type == 17:\n if wait[\"Sambutan\"] == True:\n if op.param2 in admin:\n return\n ginfo = cl.getGroup(op.param1)\n contact = cl.getContact(op.param2)\n image = \"http://dl.profile.line-cdn.net/\" + contact.pictureStatus\n c = Message(to=op.param1, from_=None, text=None, contentType=13)\n c.contentMetadata={'mid':op.param2}\n cl.sendMessage(c)\n cl.sendText(op.param1,\"Hallo \" + cl.getContact(op.param2).displayName + \"\\nWelcome To ☞ \" + str(ginfo.name) + \" ☜\" + \"\\nBudayakan Cek Note\\nDan Semoga Betah Disini ^_^\")\n cl.sendImageWithURL(op.param1,image)\n print \"MEMBER JOIN TO GROUP\"\n \n \n if op.type == 17:\n if wait[\"joinkick\"] == True:\n if op.param2 in admin:\n if op.param2 in Bots:\n return\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n print \"MEMBER JOIN KICK TO GROUP\"\n\n if op.type == 15:\n if wait[\"Sambutan\"] == True:\n if op.param2 in admin:\n return\n cl.sendText(op.param1,\"Good Bye \" + cl.getContact(op.param2).displayName + \"\\nSee You Next Time . . . (p′︵‵。) 🤗\")\n random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])\n print \"MEMBER HAS LEFT THE GROUP\"\n\n\n if op.type == 13:\n if op.param2 not in Creator:\n if op.param2 not in admin:\n if op.param2 not in Bots:\n if op.param2 in Creator:\n if op.param2 in admin:\n if op.param2 in Bots:\n pass\n elif wait[\"inviteprotect\"] == True:\n wait [\"blacklist\"][op.param2] = True\n cl.cancelGroupInvitation(op.param1,[op.param3])\n random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])\n\n if op.type == 19:\n\t if wait[\"Ghost\"] == True:\n if op.param2 in admin:\n if op.param2 in Bots:\n pass\n else:\n try:\n G = cl.getGroup(op.param1)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n Ticket = cl.reissueGroupTicket(op.param1)\n km.acceptGroupInvitationByTicket(op.param1,Ticket)\n time.sleep(0.01)\n km.kickoutFromGroup(op.param1,[op.param2])\n c = Message(to=op.param1, from_=None, text=None, contentType=13)\n c.contentMetadata={'mid':op.param2}\n km.sendMessage(c)\n km.leaveGroup(op.param1)\n G.preventJoinByTicket = True\n cl.updateGroup(G)\n wait[\"blacklist\"][op.param2] = True\n except:\n G = cl.getGroup(op.param1)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n Ticket = cl.reissueGroupTicket(op.param1)\n km.acceptGroupInvitationByTicket(op.param1,Ticket)\n time.sleep(0.01)\n km.kickoutFromGroup(op.param1,[op.param2])\n c = Message(to=op.param1, from_=None, text=None, contentType=13)\n c.contentMetadata={'mid':op.param2}\n km.sendMessage(c)\n km.leaveGroup(op.param1)\n G.preventJoinByTicket = True\n cl.updateGroup(G)\n wait[\"blacklist\"][op.param2] = True\n\n\n\n if op.type == 26:\n msg = op.message\n\n\n\n if wait[\"alwaysRead\"] == True:\n if msg.toType == 0:\n cl.sendChatChecked(msg.from_,msg.id)\n else:\n cl.sendChatChecked(msg.to,msg.id)\n \n if msg.contentType == 16:\n if wait['likeOn'] == True:\n url = msg.contentMetadata[\"postEndUrl\"]\n cl.like(url[25:58], url[66:], likeType=1005)\n ki.like(url[25:58], url[66:], likeType=1002)\n kk.like(url[25:58], url[66:], likeType=1004)\n kc.like(url[25:58], url[66:], likeType=1003)\n kr.like(url[25:58], url[66:], likeType=1001)\n cl.comment(url[25:58], url[66:], wait[\"comment1\"])\n ki.comment(url[25:58], url[66:], wait[\"comment2\"])\n kk.comment(url[25:58], url[66:], wait[\"comment3\"])\n kc.comment(url[25:58], url[66:], wait[\"comment4\"])\n kr.comment(url[25:58], url[66:], wait[\"comment5\"])\n cl.sendText(msg.to,\"Like Success\") \n wait['likeOn'] = False\n\n if op.type == 26:\n msg = op.message\n if msg.to in settings[\"simiSimi\"]:\n if settings[\"simiSimi\"][msg.to] == True:\n if msg.text is not None:\n text = msg.text\n r = requests.get(\"http://api.ntcorp.us/chatbot/v1/?text=\" + text.replace(\" \",\"+\") + \"&key=beta1.nt\")\n data = r.text\n data = json.loads(data)\n if data['status'] == 200:\n if data['result']['result'] == 100:\n cl.sendText(msg.to,data['result']['response'].encode('utf-8'))\n\n if 'MENTION' in msg.contentMetadata.keys() != None:\n if wait[\"kickMention\"] == True:\n contact = cl.getContact(msg.from_)\n cName = contact.displayName\n balas = [\"Aku Bilang Jangan Ngetag Lagi \" + cName + \"\\nAku Kick Kamu! Sorry, Byee!!!\"]\n ret_ = random.choice(balas) \n name = re.findall(r'@(\\w+)', msg.text)\n mention = ast.literal_eval(msg.contentMetadata['MENTION'])\n mentionees = mention['MENTIONEES']\n for mention in mentionees:\n if mention['M'] in admin:\n cl.sendText(msg.to,ret_)\n random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])\n break \n if mention['M'] in Bots:\n cl.sendText(msg.to,ret_)\n random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])\n break \n \n if 'MENTION' in msg.contentMetadata.keys() != None:\n if wait[\"detectMention\"] == True:\n contact = cl.getContact(msg.from_)\n cName = contact.displayName\n balas = [\"Sekali lagi nge tag gw sumpahin jomblo seumur hidup!\",\"Dont Tag!! Lagi Sibuk\",cName + \" Ngapain Ngetag?\",cName + \" Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja\",\"Tag Mulu Lo Anjirr!\",\"Dia Lagi Off\", cName + \" Kenapa Tag? Kangen?\",\"Dia Lagi Tidur\\nJangan Di Tag \" + cName, \"Jangan Suka Tag Gua \" + cName, \"Kamu Siapa \" + cName + \"?\", \"Ada Perlu Apa \" + cName + \"?\",\"Woii \" + cName + \" Jangan Ngetag, Riibut!\"]\n ret_ = random.choice(balas)\n name = re.findall(r'@(\\w+)', msg.text)\n mention = ast.literal_eval(msg.contentMetadata['MENTION'])\n mentionees = mention['MENTIONEES']\n for mention in mentionees:\n if mention['M'] in admin:\n cl.sendText(msg.to,ret_)\n break \n if mention['M'] in Bots:\n cl.sendText(msg.to,ret_)\n break \n\n if msg.contentType == 13:\n if wait[\"wblacklist\"] == True:\n\t\t if msg.contentMetadata[\"mid\"] not in admin:\n if msg.contentMetadata[\"mid\"] in wait[\"blacklist\"]:\n random.choice(KAC).sendText(msg.to,\"Sudah\")\n wait[\"wblacklist\"] = False\n else:\n wait[\"blacklist\"][msg.contentMetadata[\"mid\"]] = True\n wait[\"wblacklist\"] = False\n random.choice(KAC).sendText(msg.to,\"Ditambahkan\")\n\t\t else:\n\t\t\tcl.sendText(msg.to,\"Admin Detected~\")\n\t\t\t\n\n elif wait[\"dblacklist\"] == True:\n if msg.contentMetadata[\"mid\"] in wait[\"blacklist\"]:\n del wait[\"blacklist\"][msg.contentMetadata[\"mid\"]]\n random.choice(KAC).sendText(msg.to,\"Terhapus\")\n wait[\"dblacklist\"] = False\n\n else:\n wait[\"dblacklist\"] = False\n random.choice(KAC).sendText(msg.to,\"Tidak Ada Black List\")\n \n \n \n elif wait[\"Contact\"] == True:\n msg.contentType = 0\n cl.sendText(msg.to,msg.contentMetadata[\"mid\"])\n if 'displayName' in msg.contentMetadata:\n contact = cl.getContact(msg.contentMetadata[\"mid\"])\n try:\n cu = cl.channel.getCover(msg.contentMetadata[\"mid\"])\n except:\n cu = \"\"\n cl.sendText(msg.to,\"Nama:\\n\" + msg.contentMetadata[\"displayName\"] + \"\\n\\nMid:\\n\" + msg.contentMetadata[\"mid\"] + \"\\n\\nStatus:\\n\" + contact.statusMessage + \"\\n\\nPhoto Profile:\\nhttp://dl.profile.line-cdn.net/\" + contact.pictureStatus + \"\\n\\nPhoto Cover:\\n\" + str(cu))\n else:\n contact = cl.getContact(msg.contentMetadata[\"mid\"])\n try:\n cu = cl.channel.getCover(msg.contentMetadata[\"mid\"])\n except:\n cu = \"\"\n cl.sendText(msg.to,\"Nama:\\n\" + msg.contentMetadata[\"displayName\"] + \"\\n\\nMid:\\n\" + msg.contentMetadata[\"mid\"] + \"\\n\\nStatus:\\n\" + contact.statusMessage + \"\\n\\nPhoto Profile:\\nhttp://dl.profile.line-cdn.net/\" + contact.pictureStatus + \"\\n\\nPhoto Cover:\\n\" + str(cu))\n\n\n \n elif msg.text == \"Ginfo\":\n if msg.toType == 2:\n ginfo = cl.getGroup(msg.to)\n try:\n gCreator = ginfo.creator.displayName\n except:\n gCreator = \"Error\"\n if wait[\"lang\"] == \"JP\":\n if ginfo.invitee is None:\n sinvitee = \"0\"\n else:\n sinvitee = str(len(ginfo.invitee))\n if ginfo.preventJoinByTicket == True:\n u = \"close\"\n else:\n u = \"open\"\n cl.sendText(msg.to,\"[Group name]\\n\" + str(ginfo.name) + \"\\n\\n[Gid]\\n\" + msg.to + \"\\n\\n[Group creator]\\n\" + gCreator + \"\\n\\n[Profile status]\\nhttp://dl.profile.line.naver.jp/\" + ginfo.pictureStatus + \"\\n\\nMembers:\" + str(len(ginfo.members)) + \"members\\nPending:\" + sinvitee + \"people\\nURL:\" + u + \"it is inside\")\n else:\n cl.sendText(msg.to,\"[group name]\\n\" + str(ginfo.name) + \"\\n[gid]\\n\" + msg.to + \"\\n[group creator]\\n\" + gCreator + \"\\n[profile status]\\nhttp://dl.profile.line.naver.jp/\" + ginfo.pictureStatus)\n else:\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Can not be used outside the group\")\n else:\n cl.sendText(msg.to,\"Not for use less than group\")\n \n\n \n elif msg.text is None:\n return\n \n elif msg.text in [\"Creator\",\"Owner\"]:\n msg.contentType = 13\n msg.contentMetadata = {'mid': tjia}\n cl.sendMessage(msg)\n\t\tcl.sendText(msg.to,\"Itu tukang tikungnya(^_^)\")\n\t\t\n elif msg.text in [\"Admin\",\"admin\"]:\n msg.contentType = 13\n admin1 = \"u71b6799e1c37868a871d442e67633182\"\n admin2 = \"u46560b002469877f708c1d2e8966fc9d\"\n admin3 = \"u1dee2db35847101e3aa420e667390000\"\n msg.contentMetadata = {'mid': tjia}\n random.choice(KAC).sendMessage(msg)\n msg.contentMetadata = {'mid': admin1}\n random.choice(KAC).sendMessage(msg)\n msg.contentMetadata = {'mid': admin2}\n random.choice(KAC).sendMessage(msg)\n msg.contentMetadata = {'mid': admin3}\n random.choice(KAC).sendMessage(msg) \n\t\trandom.choice(KAC).sendText(msg.to,\"Itu Admin Kami (^_^)\")\t\n\t\t\n \n \n elif \"Admin add @\" in msg.text:\n if msg.from_ in Creator:\n print \"[Command]Admin add executing\"\n _name = msg.text.replace(\"Admin add @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = cl.getGroup(msg.to)\n gs = ki.getGroup(msg.to)\n gs = kk.getGroup(msg.to)\n gs = kc.getGroup(msg.to)\n gs = kr.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n random.choice(KAC).sendText(msg.to,\"Contact Tidak Di Temukan\")\n else:\n for target in targets:\n try:\n admin.append(target)\n cl.sendText(msg.to,\"Admin Chucky Ditambahkan\")\n except:\n pass\n print \"[Command]Admin add executed\"\n else:\n cl.sendText(msg.to,\"Command Denied.\")\n cl.sendText(msg.to,\"Creator Permission Required.\")\n \n elif \"Admin remove @\" in msg.text:\n if msg.from_ in Creator:\n print \"[Command]Admin Remove Executing\"\n _name = msg.text.replace(\"Admin remove @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = cl.getGroup(msg.to)\n gs = ki.getGroup(msg.to)\n gs = kk.getGroup(msg.to)\n gs = kc.getGroup(msg.to)\n gs = kr.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n random.choice(KAC).sendText(msg.to,\"Contact Tidak Di Temukan\")\n else:\n for target in targets:\n try:\n admin.remove(target)\n cl.sendText(msg.to,\"Admin Chucky Dihapus\")\n except:\n pass\n print \"[Command]Admin remove executed\"\n else:\n cl.sendText(msg.to,\"Command Denied.\")\n cl.sendText(msg.to,\"Creator Permission Required.\")\n \n elif msg.text in [\"Admin list\",\"admin list\",\"List admin\"]:\n if admin == []:\n cl.sendText(msg.to,\"The Admin List Is Empty\")\n else:\n cl.sendText(msg.to,\"Tunggu...\")\n mc = \"╔═════════════════════════\\n║ ☆☞ ADMIN CHUCKY ☜☆\\n╠═════════════════════════\\n\"\n for mi_d in admin:\n mc += \"╠••> \" +cl.getContact(mi_d).displayName + \"\\n\"\n cl.sendText(msg.to,mc + \"╚═════════════════════════\")\n print \"[Command]Admin List executed\"\n \n\n \n\n\t elif msg.text in [\"Group creator\",\"Gcreator\",\"gcreator\"]:\n\t\tginfo = cl.getGroup(msg.to)\n\t\tgCreator = ginfo.creator.mid\n msg.contentType = 13\n msg.contentMetadata = {'mid': gCreator}\n cl.sendMessage(msg)\n\t\tcl.sendText(msg.to,\"Itu Yang Buat Grup Ini\")\n \n\n \n elif msg.contentType == 16:\n if wait[\"Timeline\"] == True:\n msg.contentType = 0\n msg.text = \"post URL\\n\" + msg.contentMetadata[\"postEndUrl\"]\n random.choice(KAC).sendText(msg.to,msg.text)\n\n \n if msg.contentType == 13:\n if wait[\"steal\"] == True:\n _name = msg.contentMetadata[\"displayName\"]\n copy = msg.contentMetadata[\"mid\"]\n groups = cl.getGroup(msg.to)\n pending = groups.invitee\n targets = []\n for s in groups.members:\n if _name in s.displayName:\n print \"[Target] Stealed\"\n break \n else:\n targets.append(copy)\n if targets == []:\n pass\n else:\n for target in targets:\n try:\n cl.findAndAddContactsByMid(target)\n contact = cl.getContact(target)\n cu = cl.channel.getCover(target)\n path = str(cu)\n image = \"http://dl.profile.line-cdn.net/\" + contact.pictureStatus\n cl.sendText(msg.to,\"Nama :\\n\" + contact.displayName + \"\\n\\nMid :\\n\" + msg.contentMetadata[\"mid\"] + \"\\n\\nBio :\\n\" + contact.statusMessage)\n cl.sendText(msg.to,\"Profile Picture \" + contact.displayName)\n cl.sendImageWithURL(msg.to,image)\n cl.sendText(msg.to,\"Cover \" + contact.displayName)\n cl.sendImageWithURL(msg.to,path)\n wait[\"steal\"] = False\n break\n except:\n pass\n\n\n if msg.contentType == 13:\n if wait[\"gift\"] == True:\n _name = msg.contentMetadata[\"displayName\"]\n copy = msg.contentMetadata[\"mid\"]\n groups = cl.getGroup(msg.to)\n pending = groups.invitee\n targets = []\n for s in groups.members:\n if _name in s.displayName:\n print \"[Target] Gift\"\n break \n else:\n targets.append(copy)\n if targets == []:\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,\"Gift Sudah Terkirim!\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '1',\n 'STKPKGID': '1296261'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n wait['gift'] = False\n break\n except:\n msg.contentMetadata = {'mid': target}\n wait[\"gift\"] = False\n break\n\n\n if msg.contentType == 13:\n if wait['invite'] == True:\n _name = msg.contentMetadata[\"displayName\"]\n invite = msg.contentMetadata[\"mid\"]\n groups = cl.getGroup(msg.to)\n groups = ki.getGroup(msg.to) \n groups = kk.getGroup(msg.to) \n groups = kc.getGroup(msg.to) \n groups = kr.getGroup(msg.to) \n pending = groups.invitee\n targets = []\n for s in groups.members:\n if _name in s.displayName:\n random.choice(KAC).sendText(msg.to, _name + \" Berada DiGrup Ini\")\n else:\n targets.append(invite)\n if targets == []:\n pass\n else:\n for target in targets:\n try:\n cl.findAndAddContactsByMid(target)\n ki.findAndAddContactsByMid(target) \n kk.findAndAddContactsByMid(target) \n kc.findAndAddContactsByMid(target) \n kr.findAndAddContactsByMid(target) \n random.choice(KAC).inviteIntoGroup(msg.to,[target])\n random.choice(KAC).sendText(msg.to,\"Invite \" + _name)\n wait['invite'] = False\n break \n except: \n random.choice(KAC).sendText(msg.to,\"Limit Invite\")\n wait['invite'] = False\n break\n \n \n elif msg.text in [\"Key creator\",\"help creator\",\"Help creator\"]:\n cl.sendText(msg.to,creatorMessage)\n\n elif msg.text in [\"Key group\",\"help group\",\"Help group\"]:\n cl.sendText(msg.to,groupMessage)\n\n elif msg.text in [\"Key\",\"help\",\"Help\"]:\n cl.sendText(msg.to,helpMessage)\n\n elif msg.text in [\"Key self\",\"help self\",\"Help self\"]:\n cl.sendText(msg.to,selfMessage)\n\n elif msg.text in [\"Key bot\",\"help bot\",\"Help bot\"]:\n cl.sendText(msg.to,botMessage)\n\n elif msg.text in [\"Key set\",\"help set\",\"Help set\"]:\n cl.sendText(msg.to,setMessage)\n\n elif msg.text in [\"Key media\",\"help media\",\"Help media\"]:\n cl.sendText(msg.to,mediaMessage)\n \n elif msg.text in [\"Key admin\",\"help admin\",\"Help admin\"]:\n cl.sendText(msg.to,adminMessage) \n \n elif msg.text in [\"Key protect\",\"help protect\",\"Help protect\"]:\n cl.sendText(msg.to,protectMessage) \n \n\n \n elif msg.text in [\"List group\"]:\n gid = cl.getGroupIdsJoined()\n h = \"\"\n\t\t jml = 0\n for i in gid:\n\t\t gn = cl.getGroup(i).name\n h += \"♦【%s】\\n\" % (gn)\n\t\t jml += 1\n cl.sendText(msg.to,\"=======[List Group]=======\\n\"+ h +\"\\nTotal Group: \"+str(jml))\n \n\t elif \"Ban group: \" in msg.text:\n\t\tgrp = msg.text.replace(\"Ban group: \",\"\")\n\t\tgid = cl.getGroupIdsJoined()\n\t\tif msg.from_ in admin:\n\t\t for i in gid:\n\t\t h = cl.getGroup(i).name\n\t\t\tif h == grp:\n\t\t\t wait[\"BlGroup\"][i]=True\n\t\t\t cl.sendText(msg.to, \"Success Ban Group : \"+grp)\n\t\t\telse:\n\t\t\t pass\n\t\telse:\n\t\t cl.sendText(msg.to, \"Only Admin\")\n \n elif msg.text in [\"List ban\",\"List ban group\"]:\n\t\tif msg.from_ in admin:\n if wait[\"BlGroup\"] == {}:\n random.choice(KAC).sendText(msg.to,\"Tidak Ada\")\n else:\n mc = \"\"\n for gid in wait[\"BlGroup\"]:\n mc += \"-> \" +cl.getGroup(gid).name + \"\\n\"\n random.choice(KAC).sendText(msg.to,\"===[Ban Group]===\\n\"+mc)\n\t\telse:\n\t\t cl.sendText(msg.to, \"Khusus Admin\")\n \n\t elif msg.text in [\"Del ban: \"]:\n\t\tif msg.from_ in admin:\n\t\t ng = msg.text.replace(\"Del ban: \",\"\")\n\t\t for gid in wait[\"BlGroup\"]:\n\t\t if cl.getGroup(gid).name == ng:\n\t\t\t del wait[\"BlGroup\"][gid]\n\t\t\t cl.sendText(msg.to, \"Success del ban \"+ng)\n\t\t else:\n\t\t\t pass\n\t\telse:\n\t\t cl.sendText(msg.to, \"Only Admin\")\n \n elif \"Join group: \" in msg.text:\n\t\tng = msg.text.replace(\"Join group: \",\"\")\n\t\tgid = cl.getGroupIdsJoined()\n\t\tgid = ki.getGroupIdsJoined()\n\t\tgid = kk.getGroupIdsJoined()\n\t\tgid = kc.getGroupIdsJoined()\n\t\tgid = kr.getGroupIdsJoined()\n\t\ttry:\n\t\t if msg.from_ in Creator:\n for i in gid:\n h = cl.getGroup(i).name\n h = ki.getGroup(i).name\n h = kk.getGroup(i).name\n h = kc.getGroup(i).name\n h = kr.getGroup(i).name\n\t\t if h == ng:\n\t\t random.choice(KAC).inviteIntoGroup(i,[Creator])\n\t\t\t cl.sendText(msg.to,\"Success Join To [\"+ h +\"] Group\")\n\t\t\t else:\n\t\t\t pass\n\t\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\t\texcept Exception as e:\n\t\t cl.sendText(msg.to, str(e))\n \n\t elif \"Leave group: \" in msg.text:\n\t\tng = msg.text.replace(\"Leave group: \",\"\")\n\t\tgid = cl.getGroupIdsJoined()\n\t\tif msg.from_ in Creator:\n for i in gid:\n h = cl.getGroup(i).name\n\t\t if h == ng:\n\t\t\t cl.sendText(i,\"Bot Di Paksa Keluar Oleh Owner!\")\n\t\t cl.leaveGroup(i)\n\t\t\t ki.leaveGroup(i)\n\t\t\t kk.leaveGroup(i)\n\t\t\t kc.leaveGroup(i)\n\t\t\t kr.leaveGroup(i)\n\t\t\t cl.sendText(msg.to,\"Success Left [\"+ h +\"] group\")\n\t\t\telse:\n\t\t\t pass\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n \n\t elif \"Leave all group\" == msg.text:\n\t\tgid = cl.getGroupIdsJoined()\n if msg.from_ in Creator:\n\t\t for i in gid:\n\t\t\tcl.sendText(i,\"Bot Di Paksa Keluar Oleh Owner!\")\n\t\t cl.leaveGroup(i)\n\t\t\tki.leaveGroup(i)\n\t\t\tkk.leaveGroup(i)\n\t\t\tkc.leaveGroup(i)\n\t\t\tkr.leaveGroup(i)\n\t\t cl.sendText(msg.to,\"Success Leave All Group\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\t\t \n\n elif \"Pict group: \" in msg.text:\n saya = msg.text.replace('Pict group: ','')\n gid = cl.getGroupIdsJoined()\n for i in gid:\n h = cl.getGroup(i).name\n gna = cl.getGroup(i)\n if h == saya:\n cl.sendImageWithURL(msg.to,\"http://dl.profile.line.naver.jp/\"+ gna.pictureStatus)\t\t \n\t\t \n \n elif msg.text in [\"cancelall\",\"Cancelall\"]:\n if msg.toType == 2:\n X = cl.getGroup(msg.to)\n if X.invitee is not None:\n gInviMids = [contact.mid for contact in X.invitee]\n cl.cancelGroupInvitation(msg.to, gInviMids)\n else:\n cl.sendText(msg.to,\"Tidak Ada Yang Pending\")\n else:\n cl.sendText(msg.to,\"Tidak Bisa Digunakan Diluar Group\")\n \n elif msg.text in [\"Ourl\",\"Url on\"]:\n if msg.toType == 2:\n X = cl.getGroup(msg.to)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n cl.sendText(msg.to,\"Url Sudah Aktif\")\n else:\n cl.sendText(msg.to,\"Can not be used outside the group\")\n \n elif msg.text in [\"Curl\",\"Url off\"]:\n if msg.toType == 2:\n X = cl.getGroup(msg.to)\n X.preventJoinByTicket = True\n cl.updateGroup(X)\n cl.sendText(msg.to,\"Url Sudah Di Nonaktifkan\")\n\n else:\n cl.sendText(msg.to,\"Can not be used outside the group\")\n \n elif msg.text in [\"Join on\",\"Autojoin on\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoJoin\"] = True\n wait[\"AutoJoinCancel\"] = False\n cl.sendText(msg.to,\"Auto Join Sudah Aktif\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n elif msg.text in [\"Join off\",\"Autojoin off\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoJoin\"] = False\n cl.sendText(msg.to,\"Auto Join Sudah Di Nonaktifkan\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\t\t \n\t\t \n elif msg.text in [\"Joincancel on\",\"Autojoincancel on\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoJoinCancel\"] = True\n wait[\"AutoJoin\"] = False\n cl.sendText(msg.to,\"Auto Join Cancel Sudah Aktif\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n elif msg.text in [\"Joincancel off\",\"Autojoincancel off\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoJoinCancel\"] = False\n cl.sendText(msg.to,\"Auto Join Cancel Sudah Di Nonaktifkan\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t \n\t\t \n \n elif msg.text in [\"Respon on\"]:\n\t\tif msg.from_ in admin:\n wait[\"detectMention\"] = True\n wait[\"kickMention\"] = False\n cl.sendText(msg.to,\"Auto Respon Sudah Aktif\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n elif msg.text in [\"Respon off\"]:\n\t\tif msg.from_ in admin:\n wait[\"detectMention\"] = False\n cl.sendText(msg.to,\"Auto Respon Sudah Off\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\n\t\t \n\t\t \n \n elif msg.text in [\"Responkick on\"]:\n\t\tif msg.from_ in admin:\n wait[\"kickMention\"] = True \n wait[\"detectMention\"] = False\n cl.sendText(msg.to,\"Auto Respon Kick Sudah Aktif\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n elif msg.text in [\"Responkick off\"]:\n\t\tif msg.from_ in admin:\n wait[\"kickMention\"] = False \n cl.sendText(msg.to,\"Auto Respon Kick Sudah Off\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\t \n \n elif msg.text in [\"Leave on\"]:\n\t\tif msg.from_ in admin:\n wait[\"Leave\"] = True\n cl.sendText(msg.to,\"Leave Sudah Aktif\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\t\t \n \n\t elif msg.text in [\"Autocancel on\"]:\n\t if msg.from_ in admin:\t \n wait[\"AutoCancel\"][msg.to] = True\n wait[\"AutoCancelon\"] = True\n cl.sendText(msg.to,\"Auto Cancel Sudah Aktif\")\n\t\tprint wait[\"AutoCancel\"]\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\n\n\t elif msg.text in [\"Autocancel off\"]:\n\t if msg.from_ in admin:\t \n wait[\"AutoCancel\"][msg.to] = False\n wait[\"AutoCancelon\"] = False\n cl.sendText(msg.to,\"Auto Cancel Sudah Di Nonaktifkan\")\n\t\tprint wait[\"AutoCancel\"]\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\n\n\n\t elif msg.text in [\"Joinkick on\"]:\n\t if msg.from_ in admin:\t \n wait[\"joinkick\"] = True\n wait[\"Sambutan\"] = False\n cl.sendText(msg.to,\"Join Kick Sudah Aktif\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\n\n\t elif msg.text in [\"Joinkick off\"]:\n\t if msg.from_ in admin:\t \n wait[\"joinkick\"] = False\n cl.sendText(msg.to,\"Join Kick Sudah Di Nonaktifkan\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\n\n\t\t \n\n\t elif msg.text in [\"Invitepro on\",\"Inviteprotect on\"]:\n\t if msg.from_ in admin:\t \n wait[\"inviteprotect\"] = True\n cl.sendText(msg.to,\"Invite Protect Sudah Aktif\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\n\n\t elif msg.text in [\"Invitepro off\",\"Inviteprotect off\"]:\n\t if msg.from_ in admin:\t \n wait[\"inviteprotect\"] = False\n cl.sendText(msg.to,\"Invite Protect Sudah Di Nonaktifkan\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t \n\n\t elif \"Qr on\" in msg.text:\n\t if msg.from_ in admin:\t \n\t wait[\"Qr\"][msg.to] = True\n\t wait[\"Qron\"] = True\n\t \tcl.sendText(msg.to,\"QR Protect Sudah Aktif\")\n\t\tprint wait[\"Qr\"]\t \t\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t \t\n\n\t elif \"Qr off\" in msg.text:\n\t if msg.from_ in admin:\t \n\t \twait[\"Qr\"][msg.to] = False\n\t \twait[\"Qron\"] = False\n\t \tcl.sendText(msg.to,\"Qr Protect Sudah Di Nonaktifkan\")\n\t\tprint wait[\"Qr\"]\t \t\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t \t\n \n\t elif msg.text in [\"Autokick on\"]:\n\t if msg.from_ in admin:\t \n wait[\"AutoKick\"][msg.to] = True\n wait[\"AutoKickon\"] = True\n cl.sendText(msg.to,\"Auto Kick Sudah Aktif\")\n\t\tprint wait[\"AutoKick\"]\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\n\n\t elif msg.text in [\"Autokick off\"]:\n\t if msg.from_ in admin:\t \n wait[\"AutoKick\"][msg.to] = False\n wait[\"AutoKickon\"] = False\n cl.sendText(msg.to,\"Auto Kick Sudah Di Nonaktifkan\")\n\t\tprint wait[\"AutoKick\"]\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\n\n\n\t elif msg.text in [\"Ghost on\"]:\n\t if msg.from_ in admin:\t \n wait[\"Ghost\"] = True\n cl.sendText(msg.to,\"Ghost Sudah Aktif\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t\t\n\n\t elif msg.text in [\"Ghost off\"]:\n\t if msg.from_ in admin:\t \n wait[\"Ghost\"] = False\n cl.sendText(msg.to,\"Ghost Sudah Di Nonaktifkan\")\n\t else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\t \n\n elif msg.text in [\"Allprotect on\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoCancel\"][msg.to] = True\n wait[\"AutoCancelon\"] = True\n wait[\"inviteprotect\"] = True \n wait[\"joinkick\"] = True \n wait[\"AutoKick\"][msg.to] = True\n wait[\"AutoKickon\"] = True\n wait[\"Qr\"][msg.to] = True\n wait[\"Qron\"] = True\n wait[\"Ghost\"] = True \n cl.sendText(msg.to,\"All Protect Sudah Aktif Semua\")\n\t\t print wait[\"AutoCancel\"]\n\t\t print wait[\"AutoKick\"]\n\t\t print wait[\"Qr\"]\n\t\telse:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n elif msg.text in [\"Allprotect off\"]:\n\t\tif msg.from_ in admin:\n wait[\"AutoCancel\"][msg.to] = False\n wait[\"AutoCancelon\"] = False\n wait[\"inviteprotect\"] = False \n wait[\"joinkick\"] = False\n wait[\"AutoKick\"][msg.to] = False\n wait[\"AutoKickon\"] = False\n wait[\"Qr\"][msg.to] = False\n wait[\"Qron\"] = False\n wait[\"Ghost\"] = False \n cl.sendText(msg.to,\"All Protect Sudah Di Nonaktifkan Semua\")\n\t\t print wait[\"AutoCancel\"]\n\t\t print wait[\"AutoKick\"]\n\t\t print wait[\"Qr\"]\n\t\telse: \n\t\t#else:\n\t\t cl.sendText(msg.to,\"Only Admin\")\n\n\n elif msg.text in [\"K on\",\"Contact on\"]:\n wait[\"Contact\"] = True\n cl.sendText(msg.to,\"Contact Sudah Aktif\")\n\n elif msg.text in [\"K off\",\"Contact off\"]:\n wait[\"Contact\"] = False\n cl.sendText(msg.to,\"Contact Sudah Di Nonaktifkan\")\n \n\n elif msg.text in [\"Alwaysread on\"]:\n wait[\"alwaysRead\"] = True\n cl.sendText(msg.to,\"Always Read Sudah Aktif\")\n\n elif msg.text in [\"Alwaysread off\"]:\n wait[\"alwaysRead\"] = False\n cl.sendText(msg.to,\"Always Read Sudah Di Nonaktifkan\") \n\n\n elif msg.text in [\"Sambutan on\"]:\n if wait[\"Sambutan\"] == True:\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Sambutan Di Aktifkanヾ(*´∀`*)ノ\")\n else:\n wait[\"Sambutan\"] = True\n wait[\"joinkick\"] = False\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Sudah Onヽ(´▽`)/\")\n\n elif msg.text in [\"Sambutan off\"]:\n if wait[\"Sambutan\"] == False:\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Sambutan Di Nonaktifkan( ^∇^)\")\n else:\n wait[\"Sambutan\"] = False\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Sudah Off(p′︵‵。)\")\n \n \n elif \"Sider on\" in msg.text:\n try:\n del cctv['point'][msg.to]\n del cctv['sidermem'][msg.to]\n del cctv['cyduk'][msg.to]\n except:\n pass\n cctv['point'][msg.to] = msg.id\n cctv['sidermem'][msg.to] = \"\"\n cctv['cyduk'][msg.to]=True\n wait[\"Sider\"] = True\n cl.sendText(msg.to,\"Siap On Cek Sider\")\n \n elif \"Sider off\" in msg.text:\n if msg.to in cctv['point']:\n cctv['cyduk'][msg.to]=False\n wait[\"Sider\"] = False\n cl.sendText(msg.to, \"Cek Sider Off\")\n else:\n cl.sendText(msg.to, \"Heh Belom Di Set\") \n\n\n elif msg.text in [\"Status\"]:\n md = \"\"\n\t\tif wait[\"Sambutan\"] == True: md+=\"╠➩✔️ Sambutan : On\\n\"\n\t\telse:md+=\"╠➩❌ Sambutan : Off\\n\"\n\t\tif wait[\"joinkick\"] == True: md+=\"╠➩✔️ Join Kick : On\\n\"\n\t\telse:md+=\"╠➩❌ Join Kick : Off\\n\"\t\t\n\t\tif wait[\"AutoJoin\"] == True: md+=\"╠➩✔️ Auto Join : On\\n\"\n else: md +=\"╠➩❌ Auto Join : Off\\n\"\n\t\tif wait[\"AutoJoinCancel\"] == True: md+=\"╠➩✔️ Auto Join Cancel : On\\n\"\n else: md +=\"╠➩❌ Auto Join Cancel : Off\\n\" \n\t\tif wait[\"Leave\"] == True: md+=\"╠➩✔️ Leave : On\\n\"\n else: md +=\"╠➩❌ Leave : Off\\n\" \n\t\tif wait[\"Contact\"] == True: md+=\"╠➩✔️ Info Contact : On\\n\"\n\t\telse: md+=\"╠➩❌ Info Contact : Off\\n\"\n if wait[\"AutoCancelon\"] == True:md+=\"╠➩✔️ Auto Cancel : On\\n\"\n else: md+= \"╠➩❌ Auto Cancel : Off\\n\"\n if wait[\"inviteprotect\"] == True:md+=\"╠➩✔️ Invite Protect : On\\n\"\n else: md+= \"╠➩❌ Invite Protect : Off\\n\" \n\t\tif wait[\"Qron\"] == True: md+=\"╠➩✔️ Qr Protect : On\\n\"\n\t\telse:md+=\"╠➩❌ Qr Protect : Off\\n\"\n\t\tif wait[\"AutoKickon\"] == True: md+=\"╠➩✔️ Auto Kick : On\\n\"\n\t\telse:md+=\"╠➩❌ Auto Kick : Off\\n\"\n\t\tif wait[\"Ghost\"] == True: md+=\"╠➩✔️ Ghost : On\\n\"\n\t\telse:md+=\"╠➩❌ Ghost : Off\\n\"\n\t\tif wait[\"alwaysRead\"] == True: md+=\"╠➩✔️ Always Read : On\\n\"\n\t\telse:md+=\"╠➩❌ Always Read: Off\\n\"\n\t\tif wait[\"detectMention\"] == True: md+=\"╠➩✔️ Auto Respon : On\\n\"\n\t\telse:md+=\"╠➩❌ Auto Respon : Off\\n\"\t\t\n\t\tif wait[\"kickMention\"] == True: md+=\"╠➩✔️ Auto Respon Kick : On\\n\"\n\t\telse:md+=\"╠➩❌ Auto Respon Kick : Off\\n\"\t\t\t\t\n\t\tif wait[\"Sider\"] == True: md+=\"╠➩✔️ Auto Sider : On\\n\"\n\t\telse:md+=\"╠➩❌ Auto Sider: Off\\n\"\t\n\t\tif wait[\"Simi\"] == True: md+=\"╠➩✔️ Simisimi : On\\n\"\n\t\telse:md+=\"╠➩❌ Simisimi: Off\\n\"\t\t\n cl.sendText(msg.to,\"╔═════════════════════════\\n\"\"║ ☆☞ S T A T U S ☜☆\\n\"\"╠═════════════════════════\\n\"+md+\"╚═════════════════════════\")\n\n\n elif msg.text in [\"Gift\",\"gift\"]:\n msg.contentType = 9\n msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',\n 'PRDTYPE': 'THEME',\n 'MSGTPL': '5'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text in [\"All gift\"]:\n msg.contentType = 9\n msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',\n 'PRDTYPE': 'THEME',\n 'MSGTPL': '5'}\n msg.text = None\n ki.sendMessage(msg)\n kk.sendMessage(msg)\n kc.sendMessage(msg)\n\n elif msg.text in [\"TC1 Gift\",\"TC1 gift\"]:\n msg.contentType = 9\n msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',\n 'PRDTYPE': 'THEME',\n 'MSGTPL': '6'}\n msg.text = None\n ki.sendMessage(msg)\n\n elif msg.text in [\"TC2 Gift\",\"TC2 gift\"]:\n msg.contentType = 9\n msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',\n 'PRDTYPE': 'THEME',\n 'MSGTPL': '7'}\n msg.text = None\n kk.sendMessage(msg)\n\n elif msg.text in [\"TC3 Gift\",\"TC3 gift\"]:\n msg.contentType = 9\n msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',\n 'PRDTYPE': 'THEME',\n 'MSGTPL': '8'}\n msg.text = None\n kc.sendMessage(msg)\n \n \n elif \"Gift1 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift1 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '1',\n 'STKPKGID': '1380280'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift2 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift2 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '2',\n 'STKPKGID': '1360738'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift3 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift3 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '3',\n 'STKPKGID': '1395389'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift4 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift4 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '4',\n 'STKPKGID': '1329191'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift5 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift5 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '1',\n 'STKPKGID': '9057'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift6 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift6 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '2',\n 'STKPKGID': '9167'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift7 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift7 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '3',\n 'STKPKGID': '7334'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift8 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift8 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '1',\n 'STKPKGID': '1380280'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift9 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift9 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '4',\n 'STKPKGID': '1405277'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n elif \"Gift10 \" in msg.text:\n msg.contentType = 13\n nk0 = msg.text.replace(\"Gift10 \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n gs = cl.getGroup(msg.to)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n cl.sendText(msg.to,_name + \" Check Your Gift\")\n msg.contentType = 9\n msg.contentMetadata= {'PRDTYPE': 'STICKER',\n 'STKVER': '1',\n 'MSGTPL': '1',\n 'STKPKGID': '1296261'}\n msg.to = target\n msg.text = None\n cl.sendMessage(msg)\n except:\n msg.contentMetadata = {'mid': target}\n\n\n elif msg.text.lower() in [\"wkwkwk\",\"wkwk\",\"hahaha\",\"haha\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '100',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"hehehe\",\"hehe\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '10',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"galau\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '9',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"you\",\"kau\",\"kamu\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '7',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"marah\",\"hadeuh\",\"hadeh\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '6',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"please\",\"pliss\",\"mohon\",\"tolong\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '4',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"haa\",\"haaa\",\"kaget\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '3',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"lucu\",\"ngakak\",\"lol\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '110',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"hmm\",\"hmmm\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '101',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"tidur\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '1',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"gemes\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '2',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"cantik\",\"imut\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '5',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"nyanyi\",\"lalala\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '11',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"gugup\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '8',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"ok\",\"oke\",\"okay\",\"oce\",\"okee\",\"sip\",\"siph\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '13',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"mantab\",\"mantap\",\"nice\",\"keren\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '14',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"ngejek\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '15',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"nangis\",\"sedih\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '16',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"woi\",\"kampret\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '102',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n elif msg.text.lower() in [\"huft\"]:\n msg.contentType = 7\n msg.contentMetadata={'STKID': '104',\n 'STKPKGID': '1',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n \n \n\n\n elif msg.text in [\"Tagall\",\"Tag all\"]:\n group = cl.getGroup(msg.to)\n nama = [contact.mid for contact in group.members]\n\n cb = \"\"\n cb2 = \"\"\n strt = int(0)\n akh = int(0)\n for md in nama:\n akh = akh + int(6)\n\n cb += \"\"\"{\"S\":\"\"\"+json.dumps(str(strt))+\"\"\",\"E\":\"\"\"+json.dumps(str(akh))+\"\"\",\"M\":\"\"\"+json.dumps(md)+\"},\"\"\"\n\n strt = strt + int(7)\n akh = akh + 1\n cb2 += \"@nrik \\n\"\n\n cb = (cb[:int(len(cb)-1)])\n msg.contentType = 0\n msg.text = cb2\n msg.contentMetadata ={'MENTION':'{\"MENTIONEES\":['+cb+']}','EMTVER':'4'}\n\n try:\n cl.sendMessage(msg)\n except Exception as error:\n print error\n\n\n elif msg.text in [\"Setview\",\"Setpoint\",\"Cctv\"]:\n subprocess.Popen(\"echo '' > dataSeen/\"+msg.to+\".txt\", shell=True, stdout=subprocess.PIPE)\n cl.sendText(msg.to, \"☆Checkpoint Checked☆\")\n print \"Setview\"\n\n elif msg.text in [\"Viewseen\",\"Check\",\"Ciduk\",\"Cyduk\"]:\n\t lurkGroup = \"\"\n\t dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []\n with open('dataSeen/'+msg.to+'.txt','r') as rr:\n contactArr = rr.readlines()\n for v in xrange(len(contactArr) -1,0,-1):\n num = re.sub(r'\\n', \"\", contactArr[v])\n contacts.append(num)\n pass\n contacts = list(set(contacts))\n for z in range(len(contacts)):\n arg = contacts[z].split('|')\n userList.append(arg[0])\n timelist.append(arg[1])\n uL = list(set(userList))\n for ll in range(len(uL)):\n try:\n getIndexUser = userList.index(uL[ll])\n timeSeen.append(time.strftime(\"%H:%M:%S\", time.localtime(int(timelist[getIndexUser]) / 1000)))\n recheckData.append(userList[getIndexUser])\n except IndexError:\n conName.append('nones')\n pass\n contactId = cl.getContacts(recheckData)\n for v in range(len(recheckData)):\n dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')\n pass\n if len(dataResult) > 0:\n tukang = \"╔═════════════════════════\\n║ ☆☞ LIST VIEWERS ☜☆\\n╠═════════════════════════\\n╠➩\"\n grp = '\\n╠➩ '.join(str(f) for f in dataResult)\n total = '\\n╠═════════════════════════\\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + \"\\n╚═════════════════════════\"\n cl.sendText(msg.to, \"%s %s %s\" % (tukang, grp, total))\n subprocess.Popen(\"echo '' > dataSeen/\"+msg.to+\".txt\", shell=True, stdout=subprocess.PIPE)\n cl.sendText(msg.to, \"☆Auto Checkpoint☆\") \n else:\n cl.sendText(msg.to, \"☆Belum Ada Viewers☆\")\n print \"Viewseen\"\n\n\n\t elif \"Kick \" in msg.text:\n\t\tif msg.from_ in admin:\t \n\t\t if 'MENTION' in msg.contentMetadata.keys()!= None:\n\t\t names = re.findall(r'@(\\w+)', msg.text)\n\t\t mention = ast.literal_eval(msg.contentMetadata['MENTION'])\n\t\t mentionees = mention['MENTIONEES']\n\t\t print mentionees\n\t\t for mention in mentionees:\n\t\t\t ki.kickoutFromGroup(msg.to,[mention['M']])\n\n\t elif \"Set member: \" in msg.text:\n\t\tif msg.from_ in admin:\t \t \n\t\t jml = msg.text.replace(\"Set member: \",\"\")\n\t\t wait[\"memberscancel\"] = int(jml)\n\t\t cl.sendText(msg.to, \"Jumlah minimal member telah di set : \"+jml)\n\n\t elif \"Add all\" in msg.text:\n\t\t thisgroup = cl.getGroups([msg.to])\n\t\t Mids = [contact.mid for contact in thisgroup[0].members]\n\t\t mi_d = Mids[:33]\n\t\t cl.findAndAddContactsByMids(mi_d)\n\t\t cl.sendText(msg.to,\"Success Add all\")\n\n\n elif msg.text in [\"Invite\"]:\n wait[\"invite\"] = True\n cl.sendText(msg.to,\"Send Contact\")\n \n \n\n elif msg.text in [\"Auto like\"]:\n wait[\"likeOn\"] = True\n cl.sendText(msg.to,\"Shere Post Kamu Yang Mau Di Like!\") \n\n\n elif msg.text in [\"Steal contact\"]:\n wait[\"steal\"] = True\n cl.sendText(msg.to,\"Send Contact\")\n \n\n elif msg.text in [\"Giftbycontact\"]:\n wait[\"gift\"] = True\n cl.sendText(msg.to,\"Send Contact\") \n \n\n\t elif \"Recover\" in msg.text:\n\t\tthisgroup = cl.getGroups([msg.to])\n\t\tMids = [contact.mid for contact in thisgroup[0].members]\n\t\tmi_d = Mids[:33]\n\t\tcl.createGroup(\"Recover\", mi_d)\n\t\tcl.sendText(msg.to,\"Success recover\")\n\n\n\n elif (\"Gn: \" in msg.text):\n if msg.toType == 2:\n X = cl.getGroup(msg.to)\n X.name = msg.text.replace(\"Gn: \",\"\")\n cl.updateGroup(X)\n else:\n cl.sendText(msg.to,\"It can't be used besides the group.\")\n\n elif \"Kick: \" in msg.text:\n midd = msg.text.replace(\"Kick: \",\"\")\n\t\tkicker = [ki,kk,kc]\n\t\tif midd not in admin:\n\t\t random.choice(kicker).kickoutFromGroup(msg.to,[midd])\n\t\telse:\n\t\t cl.sendText(msg.to,\"Admin Detected\")\n\n elif \"Invite: \" in msg.text:\n midd = msg.text.replace(\"Invite: \",\"\")\n cl.findAndAddContactsByMid(midd)\n ki.findAndAddContactsByMid(midd)\n kk.findAndAddContactsByMid(midd)\n kc.findAndAddContactsByMid(midd)\n kr.findAndAddContactsByMid(midd)\n random.choice(KAC).inviteIntoGroup(msg.to,[midd])\n\n elif \"Invite creator\" in msg.text:\n midd = \"u71b6799e1c37868a871d442e67633182\"\n random.choice(KAC).inviteIntoGroup(msg.to,[midd])\n\n elif msg.text in [\"Welcome\",\"welcome\",\"Welkam\",\"welkam\",\"Wc\",\"wc\"]:\n gs = cl.getGroup(msg.to)\n cl.sendText(msg.to,\"Selamat Datang Di \"+ gs.name)\n msg.contentType = 7\n msg.contentMetadata={'STKID': '247',\n 'STKPKGID': '3',\n 'STKVER': '100'}\n msg.text = None\n cl.sendMessage(msg)\n\n\t elif \"Bc: \" in msg.text:\n\t\tbc = msg.text.replace(\"Bc: \",\"\")\n\t\tgid = cl.getGroupIdsJoined()\n\t\tif msg.from_ in Creator:\n\t\t for i in gid:\n\t\t\tcl.sendText(i,\"=======[BROADCAST]=======\\n\\n\"+bc+\"\\n\\nContact Me : line.me/ti/p/~a_ulul15\")\n\t\t cl.sendText(msg.to,\"Success BC BosQ\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Khusus Admin\")\n\n elif msg.text in [\"Cancel\"]:\n gid = cl.getGroupIdsInvited()\n for i in gid:\n cl.rejectGroupInvitation(i)\n cl.sendText(msg.to,\"All invitations have been refused\")\n\n elif msg.text in [\"TC1 Cancel\"]:\n gid = ki.getGroupIdsInvited()\n for i in gid:\n ki.rejectGroupInvitation(i)\n ki.sendText(msg.to,\"All invitations have been refused\")\n\n elif msg.text in [\"TC2 Cancel\"]:\n gid = kk.getGroupIdsInvited()\n for i in gid:\n kk.rejectGroupInvitation(i)\n kk.sendText(msg.to,\"All invitations have been refused\")\n\n elif msg.text in [\"TC3 Cancel\"]:\n gid = kc.getGroupIdsInvited()\n for i in gid:\n kc.rejectGroupInvitation(i)\n kc.sendText(msg.to,\"All invitations have been refused\")\n\n elif msg.text in [\"Gurl\"]:\n if msg.toType == 2:\n x = cl.getGroup(msg.to)\n if x.preventJoinByTicket == True:\n x.preventJoinByTicket = False\n cl.updateGroup(x)\n gurl = cl.reissueGroupTicket(msg.to)\n cl.sendText(msg.to,\"line://ti/g/\" + gurl)\n else:\n if wait[\"lang\"] == \"JP\":\n cl.sendText(msg.to,\"Can't be used outside the group\")\n else:\n cl.sendText(msg.to,\"Not for use less than group\")\n\n elif msg.text in [\"All join\",\"Join all\"]:\n\t\tif msg.from_ in admin:\n\t\t G = cl.getGroup(msg.to)\n ginfo = cl.getGroup(msg.to)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n invsend = 0\n Ticket = cl.reissueGroupTicket(msg.to)\n ki.acceptGroupInvitationByTicket(msg.to,Ticket)\n time.sleep(0.2)\n kk.acceptGroupInvitationByTicket(msg.to,Ticket)\n time.sleep(0.2)\n kc.acceptGroupInvitationByTicket(msg.to,Ticket)\n time.sleep(0.2)\n kr.acceptGroupInvitationByTicket(msg.to,Ticket)\n time.sleep(0.2)\n G = cl.getGroup(msg.to)\n G.preventJoinByTicket = True\n ki.updateGroup(G)\n G.preventJoinByTicket(G)\n ki.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\")\n\n elif msg.text in [\"TC1 join\"]:\n\t\tif msg.from_ in admin:\n X = cl.getGroup(msg.to)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n invsend = 0\n Ti = cl.reissueGroupTicket(msg.to)\n ki.acceptGroupInvitationByTicket(msg.to,Ti)\n G = kk.getGroup(msg.to)\n G.preventJoinByTicket = True\n ki.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\")\n\n elif msg.text in [\"TC2 join\"]:\n\t\tif msg.from_ in admin:\n X = cl.getGroup(msg.to)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n invsend = 0\n Ti = cl.reissueGroupTicket(msg.to)\n kk.acceptGroupInvitationByTicket(msg.to,Ti)\n G = ki.getGroup(msg.to)\n G.preventJoinByTicket = True\n kk.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\")\n\n elif msg.text in [\"TC3 join\"]:\n\t\tif msg.from_ in admin:\n G = cl.getGroup(msg.to)\n ginfo = cl.getGroup(msg.to)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n invsend = 0\n Ticket = cl.reissueGroupTicket(msg.to)\n kc.acceptGroupInvitationByTicket(msg.to,Ticket)\n G.preventJoinByTicket = True\n kc.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\") \n\n elif msg.text in [\"TC4 join\"]:\n\t\tif msg.from_ in admin:\n G = cl.getGroup(msg.to)\n ginfo = cl.getGroup(msg.to)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n invsend = 0\n Ticket = cl.reissueGroupTicket(msg.to)\n kr.acceptGroupInvitationByTicket(msg.to,Ticket)\n G.preventJoinByTicket = True\n kr.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\")\n\t\t \n elif msg.text in [\"Ghost join\"]:\n\t\tif msg.from_ in admin:\n G = cl.getGroup(msg.to)\n ginfo = cl.getGroup(msg.to)\n G.preventJoinByTicket = False\n cl.updateGroup(G)\n invsend = 0\n Ticket = cl.reissueGroupTicket(msg.to)\n km.acceptGroupInvitationByTicket(msg.to,Ticket)\n G.preventJoinByTicket = True\n km.updateGroup(G)\n\t\telse:\n\t\t cl.sendText(msg.to,\"Sape lu!\")\t\t \n\n\n\n elif msg.text in [\"timeline\"]:\n\t\ttry:\n url = cl.activity(limit=5)\n\t\t cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])\n\t\texcept Exception as E:\n\t\t print E\n\n elif msg.text in [\"Bye all\"]:\n if wait[\"Leave\"] == True:\t\t \n ki.leaveGroup(msg.to)\n kk.leaveGroup(msg.to)\n kc.leaveGroup(msg.to)\n kr.leaveGroup(msg.to)\n else:\n\t\t cl.sendText(msg.to,\"Leavenya Belum On\") \n\n elif msg.text in [\"@bye\",\"@Bye\"]:\n if wait[\"Leave\"] == True:\t\n\t\t cl.leaveGroup(msg.to)\n\t\t wait[\"Leave\"] = False\n else:\n\t\t cl.sendText(msg.to,\"Bilang Dulu Sama Admin Ku\")\t\t \n\t\t \n\n elif msg.text in [\"Absen\"]:\n\t\tcl.sendText(msg.to,\"Pasukan Absen!!\")\n ki.sendText(msg.to,\"TC1 Hadiir \\(ˆ▿ˆ)/\")\n kk.sendText(msg.to,\"TC2 Hadiir \\(ˆ▿ˆ)/\")\n kc.sendText(msg.to,\"TC3 Hadiir \\(ˆ▿ˆ)/\")\n kr.sendText(msg.to,\"Hadiir Semua Kapten \\(ˆ▿ˆ)/\")\n\n\n elif msg.text.lower() in [\"respon\"]:\n cl.sendText(msg.to,responsename)\n ki.sendText(msg.to,responsename2)\n kk.sendText(msg.to,responsename3)\n kc.sendText(msg.to,responsename4)\n kr.sendText(msg.to,responsename5)\n\n elif msg.text in [\"Sp\",\"Speed\",\"speed\"]:\n start = time.time()\n print(\"Speed\") \n elapsed_time = time.time() - start\n\t\tcl.sendText(msg.to, \"Tunggu Bentaar BOS....\")\n cl.sendText(msg.to, \"%sseconds\" % (elapsed_time))\n \n elif msg.text in [\"Speed test\"]:\n start = time.time()\n cl.sendText(msg.to, \"Tunggu Bentaar BOS......\")\n elapsed_time = time.time() - start\n cl.sendText(msg.to, \"%sseconds\" % (elapsed_time)) \n\n\n elif \"Nk: \" in msg.text:\n\t\tif msg.from_ in Creator:\n X = cl.getGroup(msg.to)\n X.preventJoinByTicket = False\n cl.updateGroup(X)\n invsend = 0\n Ti = cl.reissueGroupTicket(msg.to)\n kr.acceptGroupInvitationByTicket(msg.to,Ti)\n G = kk.getGroup(msg.to)\n G.preventJoinByTicket = True\n kk.updateGroup(G)\n\n nk0 = msg.text.replace(\"Nk: \",\"\")\n nk1 = nk0.lstrip()\n nk2 = nk1.replace(\"@\",\"\")\n nk3 = nk2.rstrip()\n _name = nk3\n\n targets = []\n for s in X.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets == []:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n\t\t\t if target not in admin:\n kr.kickoutFromGroup(msg.to,[target])\n kr.leaveGroup(msg.to)\n ki.sendText(msg.to,\"Succes BosQ\")\n kk.sendText(msg.to,\"Pakyu~\")\n\t\t\t else:\n\t\t\t cl.sendText(msg.to,\"Admin Detected\")\n\t\telse:\n\t\t cl.sendText(msg.to,\"Lu sape!\")\n \n elif msg.text in [\"Ban\"]:\n if msg.from_ in admin:\n wait[\"wblacklist\"] = True\n ki.sendText(msg.to,\"send contact\")\n\n elif msg.text in [\"Unban\"]:\n if msg.from_ in admin:\n wait[\"dblacklist\"] = True\n ki.sendText(msg.to,\"send contact\")\n \n elif \"Ban @\" in msg.text:\n if msg.from_ in admin:\n if msg.toType == 2:\n print \"@Ban by mention\"\n _name = msg.text.replace(\"Ban @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = ki.getGroup(msg.to)\n gs = kk.getGroup(msg.to)\n gs = kc.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n kc.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n\t\t\t if target not in admin:\n try:\n wait[\"blacklist\"][target] = True\n f=codecs.open('st2__b.json','w','utf-8')\n json.dump(wait[\"blacklist\"], f, sort_keys=True, indent=4,ensure_ascii=False)\n ki.sendText(msg.to,\"Succes BosQ\")\n except:\n ki.sendText(msg.to,\"Error\")\n\t\t\t else:\n\t\t\t\tcl.sendText(msg.to,\"Admin Detected~\")\n \n elif msg.text in [\"Banlist\",\"Ban list\"]:\n if msg.from_ in admin:\n if wait[\"blacklist\"] == {}:\n random.choice(KAC).sendText(msg.to,\"Tidak Ada\")\n else:\n mc = \"\"\n for mi_d in wait[\"blacklist\"]:\n mc += \"->\" +cl.getContact(mi_d).displayName + \"\\n\"\n random.choice(KAC).sendText(msg.to,\"===[Blacklist User]===\\n\"+mc)\n\n \n elif \"Unban @\" in msg.text:\n if msg.toType == 2:\n print \"@Unban by mention\"\n if msg.from_ in admin:\n _name = msg.text.replace(\"Unban @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = ki.getGroup(msg.to)\n gs = kk.getGroup(msg.to)\n gs = kc.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n kk.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n try:\n del wait[\"blacklist\"][target]\n f=codecs.open('st2__b.json','w','utf-8')\n json.dump(wait[\"blacklist\"], f, sort_keys=True, indent=4,ensure_ascii=False)\n ki.sendText(msg.to,\"Succes BosQ\")\n except:\n ki.sendText(msg.to,\"Succes BosQ\")\n \n \n elif msg.text.lower() == 'clear ban':\n if msg.from_ in admin:\n wait[\"blacklist\"] = {}\n cl.sendText(msg.to,\"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐\") \n\n elif msg.text.lower() in [\"sayang\",\"chucky\"]:\n ki.sendText(msg.to,\"Apa Sayang :*\") \n \n\n \n elif msg.text in [\"Kill ban\"]:\n\t\tif msg.from_ in admin:\n if msg.toType == 2:\n group = cl.getGroup(msg.to)\n gMembMids = [contact.mid for contact in group.members]\n matched_list = []\n for tag in wait[\"blacklist\"]:\n matched_list+=filter(lambda str: str == tag, gMembMids)\n if matched_list == []:\n ki.sendText(msg.to,\"There was no blacklist user\")\n return\n for jj in matched_list:\n random.choice(KAC).kickoutFromGroup(msg.to,[jj])\n ki.sendText(msg.to,\"Blacklist emang pantas tuk di usir\")\n\t\telse:\n\t\t cl.sendText(msg.to, \"Khusus creator\")\n \n elif msg.text in [\"Kill\"]:\n if msg.toType == 2:\n if msg.from_ in admin:\n group = ki.getGroup(msg.to)\n gMembMids = [contact.mid for contact in group.members]\n matched_list = []\n for tag in wait[\"blacklist\"]:\n matched_list+=filter(lambda str: str == tag, gMembMids)\n if matched_list == []:\n kk.sendText(msg.to,\"Fuck You\")\n kc.sendText(msg.to,\"Fuck You\")\n return\n for jj in matched_list:\n try:\n klist=[ki,kk,kc]\n kicker=random.choice(klist)\n kicker.kickoutFromGroup(msg.to,[jj])\n print (msg.to,[jj])\n except:\n pass\n\n \n elif \"Kickall\" == msg.text:\n\t\t if msg.from_ in Creator:\n if msg.toType == 2:\n print \"Kick all member\"\n _name = msg.text.replace(\"Kickall\",\"\")\n gs = ki.getGroup(msg.to)\n gs = kk.getGroup(msg.to)\n gs = kc.getGroup(msg.to)\n ki.sendText(msg.to,\"Sampai jumpaa~\")\n kc.sendText(msg.to,\"Dadaaah~\")\n targets = []\n for g in gs.members:\n if _name in g.displayName:\n targets.append(g.mid)\n if targets == []:\n ki.sendText(msg.to,\"Not found.\")\n else:\n for target in targets:\n\t\t\t\tif target not in admin:\n try:\n klist=[ki,kk,kc]\n kicker=random.choice(klist)\n kicker.kickoutFromGroup(msg.to,[target])\n print (msg.to,[g.mid])\n except Exception as e:\n cl.sendText(msg.to,str(e))\n\t\t\t cl.inviteIntoGroup(msg.to, targets)\n \n\n\t elif msg.text in [\"Bot restart\",\"Reboot\"]:\n\t\tif msg.from_ in Creator:\n\t\t cl.sendText(msg.to, \"Bot Has Been Restarted...\")\n\t\t restart_program()\n\t\t print \"@Restart\"\n\t\telse:\n\t\t cl.sendText(msg.to, \"No Access\")\n\t\t \n elif msg.text in [\"Turn off\"]: \n\t if msg.from_ in Creator: \n try:\n import sys\n sys.exit()\n except:\n pass \t\t \n\n\n elif 'Crash' in msg.text:\n if msg.from_ in Creator:\n msg.contentType = 13\n msg.contentMetadata = {'mid': \"NADYA,'\"}\n cl.sendMessage(msg)\n\n\n\n elif \"Kapten copy @\" in msg.text:\n print \"[COPY] Ok\"\n _name = msg.text.replace(\"Kapten copy @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to, \"Not Found...\")\n else:\n for target in targets:\n try:\n cl.CloneContactProfile(target)\n cl.sendText(msg.to, \"Copied (^_^)\")\n except Exception as e:\n print e\n\n elif \"TC1 copy @\" in msg.text:\n print \"[COPY] Ok\"\n _name = msg.text.replace(\"TC1 copy @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = ki.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n ki.sendText(msg.to, \"Not Found...\")\n else:\n for target in targets:\n try:\n ki.CloneContactProfile(target)\n ki.sendText(msg.to, \"Copied (^_^)\")\n except Exception as e:\n print e\n\n elif \"TC2 copy @\" in msg.text:\n print \"[COPY] Ok\"\n _name = msg.text.replace(\"TC2 copy @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = kk.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n kk.sendText(msg.to, \"Not Found...\")\n else:\n for target in targets:\n try:\n kk.CloneContactProfile(target)\n kk.sendText(msg.to, \"Copied (^_^)\")\n except Exception as e:\n print e\n\n \n elif \"TC3 copy @\" in msg.text:\n print \"[COPY] Ok\"\n _name = msg.text.replace(\"TC3 copy @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = kc.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n kc.sendText(msg.to, \"Not Found...\")\n else:\n for target in targets:\n try:\n kc.CloneContactProfile(target)\n kc.sendText(msg.to, \"Copied (^_^)\")\n except Exception as e:\n print e\n\n\n elif \"TC4 copy @\" in msg.text:\n print \"[COPY] Ok\"\n _name = msg.text.replace(\"TC4 copy @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = kr.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n kr.sendText(msg.to, \"Not Found...\")\n else:\n for target in targets:\n try:\n kr.CloneContactProfile(target)\n kr.sendText(msg.to, \"Copied (^_^)\")\n except Exception as e:\n print e\n\n\n elif msg.text in [\"Backup all\"]:\n try:\n ki.updateDisplayPicture(backup2.pictureStatus)\n ki.updateProfile(backup2)\n\n kk.updateDisplayPicture(backup3.pictureStatus)\n kk.updateProfile(backup3)\n\n kc.updateDisplayPicture(backup4.pictureStatus)\n kc.updateProfile(backup4)\n\n kr.updateDisplayPicture(backup5.pictureStatus)\n kr.updateProfile(backup5)\n \n cl.updateDisplayPicture(backup1.pictureStatus)\n cl.updateProfile(backup1)\n cl.sendText(msg.to, \"All Done (^_^)\")\n except Exception as e:\n cl.sendText(msg.to, str(e))\n \n \n\n \n\n\n \n\t elif \"/musik \" in msg.text:\n\t\t\t\t\tsongname = msg.text.replace(\"/musik \",\"\")\n\t\t\t\t\tparams = {\"songname\": songname}\n\t\t\t\t\tr = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))\n\t\t\t\t\tdata = r.text\n\t\t\t\t\tdata = json.loads(data)\n\t\t\t\t\tfor song in data:\n\t\t\t\t\t\tabc = song[3].replace('https://','http://')\n\t\t\t\t\t\tcl.sendText(msg.to, \"Title : \" + song[0] + \"\\nLength : \" + song[1] + \"\\nLink download : \" + song[4])\n\t\t\t\t\t\tcl.sendText(msg.to, \"Lagu \" + song[0] + \"\\nSedang Di Prosses... Tunggu Sebentar ^_^ \")\n\t\t\t\t\t\tcl.sendAudioWithURL(msg.to,abc)\n\t\t\t\t\t\tcl.sendText(msg.to, \"Selamat Mendengarkan Lagu \" + song[0])\n\t\n elif '/lirik ' in msg.text.lower():\n try:\n songname = msg.text.lower().replace('/lirik ','')\n params = {'songname': songname}\n r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))\n data = r.text\n data = json.loads(data)\n for song in data:\n hasil = 'Lyric Lagu ('\n hasil += song[0]\n hasil += ')\\n\\n'\n hasil += song[5]\n cl.sendText(msg.to, hasil)\n except Exception as wak:\n cl.sendText(msg.to, str(wak))\n \n\t elif \"/musrik \" in msg.text:\n\t\t\t\t\tsongname = msg.text.replace(\"/musrik \",\"\")\n\t\t\t\t\tparams = {\"songname\": songname}\n\t\t\t\t\tr = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))\n\t\t\t\t\tdata = r.text\n\t\t\t\t\tdata = json.loads(data)\n\t\t\t\t\tfor song in data:\n\t\t\t\t\t\tabc = song[3].replace('https://','http://')\n\t\t\t\t\t\thasil = 'Lyric Lagu ('\n\t\t\t\t\t\thasil += song[0]\n\t\t\t\t\t\thasil += ')\\n\\n'\n\t\t\t\t\t\thasil += song[5]\n\t\t\t\t\t\tcl.sendText(msg.to, \"Lagu \" + song[0] + \"\\nSedang Di Prosses... Tunggu Sebentar ^_^ \")\n\t\t\t\t\t\tcl.sendAudioWithURL(msg.to,abc)\n\t\t\t\t\t\tcl.sendText(msg.to, \"Title : \" + song[0] + \"\\nLength : \" + song[1] + \"\\nLink download : \" + song[4] +\"\\n\\n\" + hasil)\n\t\t\t\t\t\tcl.sendText(msg.to, \"Selamat Mendengarkan Lagu \" + song[0])\n \n \n \n elif \"Fancytext: \" in msg.text:\n txt = msg.text.replace(\"Fancytext: \", \"\")\n cl.kedapkedip(msg.to,txt)\n print \"[Command] Kedapkedip\"\n\n\n elif \"cover @\" in msg.text:\n if msg.toType == 2:\n cover = msg.text.replace(\"cover @\",\"\")\n _nametarget = cover.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n try:\n h = cl.channel.getHome(target)\n objId = h[\"result\"][\"homeInfo\"][\"objectId\"]\n cl.sendImageWithURL(msg.to,\"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=\" + target + \"&oid=\" + objId)\n except Exception as error:\n print error\n cl.sendText(msg.to,\"Upload image failed.\")\n\n elif \"Cover @\" in msg.text:\n if msg.toType == 2:\n cover = msg.text.replace(\"Cover @\",\"\")\n _nametarget = cover.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n try:\n h = cl.channel.getHome(target)\n objId = h[\"result\"][\"homeInfo\"][\"objectId\"]\n cl.sendImageWithURL(msg.to,\"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=\" + target + \"&oid=\" + objId)\n except Exception as error:\n print error\n cl.sendText(msg.to,\"Upload image failed.\")\n \n \n elif \"pp @\" in msg.text:\n if msg.toType == 2:\n cover = msg.text.replace(\"pp @\",\"\")\n _nametarget = cover.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n try:\n h = cl.getContact(target)\n cl.sendImageWithURL(msg.to,\"http://dl.profile.line-cdn.net/\" + h.pictureStatus)\n except Exception as error:\n print error\n cl.sendText(msg.to,\"Upload image failed.\")\n\n elif \"Pp @\" in msg.text:\n if msg.toType == 2:\n cover = msg.text.replace(\"Pp @\",\"\")\n _nametarget = cover.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to,\"Not found\")\n else:\n for target in targets:\n try:\n h = cl.getContact(target)\n cl.sendImageWithURL(msg.to,\"http://dl.profile.line-cdn.net/\" + h.pictureStatus)\n except Exception as error:\n print error\n cl.sendText(msg.to,\"Upload image failed.\")\n\n elif msg.text.lower() in [\"van\",\"yog\",\"wan\",\"gong\",\"tep\",\"pap creator\"]:\n link = [\"http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj\"]\n pilih = random.choice(link)\n ki.sendImageWithURL(msg.to,pilih)\n elif msg.text.lower() in [\"van\",\"yog\",\"wan\",\"gong\",\"tep\",\"pap owner\",\"pap creator\"]:\n link = [\"http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj\"]\n pilih = random.choice(link)\n ki.sendImageWithURL(msg.to,pilih)\n \n elif \"Spam: \" in msg.text:\n bctxt = msg.text.replace(\"Spam: \", \"\")\n t = 10\n while(t):\n random.choice(KAC).sendText(msg.to, (bctxt))\n t-=1\n\n elif \"Scbc \" in msg.text:\n bctxt = msg.text.replace(\"Scbc \", \"\")\n orang = cl.getAllContactIds()\n t = 20\n for manusia in orang:\n while(t):\n cl.sendText(manusia, (bctxt))\n t-=1\n\n elif \"Cbc \" in msg.text:\n broadcasttxt = msg.text.replace(\"Cbc \", \"\") \n orang = cl.getAllContactIds()\n for manusia in orang:\n cl.sendText(manusia, (broadcasttxt))\n\n \n elif '/ig ' in msg.text.lower():\n try:\n instagram = msg.text.lower().replace(\"/ig \",\"\")\n html = requests.get('https://www.instagram.com/' + instagram + '/?')\n soup = BeautifulSoup(html.text, 'html.parser')\n data = soup.find_all('meta', attrs={'property':'og:description'})\n text = data[0].get('content').split()\n data1 = soup.find_all('meta', attrs={'property':'og:image'})\n text1 = data1[0].get('content').split()\n nadya = text1[0].replace(\"s150x150/\",\"\")\n user = \"Name: \" + text[-2] + \"\\n\"\n user1 = \"Username: \" + text[-1] + \"\\n\"\n followers = \"Followers: \" + text[0] + \"\\n\"\n following = \"Following: \" + text[2] + \"\\n\"\n post = \"Post: \" + text[4] + \"\\n\"\n link = \"Link: \" + \"https://www.instagram.com/\" + instagram\n detail = \"========INSTAGRAM INFO ========\\n\"\n details = \"\\n========INSTAGRAM INFO ========\"\n cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)\n cl.sendImageWithURL(msg.to, nadya)\n except Exception as njer:\n \tcl.sendText(msg.to, str(njer))\n \t\n \t\n elif \"Checkig \" in msg.text:\n separate = msg.text.split(\" \")\n user = msg.text.replace(separate[0] + \" \",\"\")\n if user.startswith(\"@\"):\n user = user.replace(\"@\",\"\")\n profile = \"https://www.instagram.com/\" + user\n with requests.session() as x:\n x.headers['user-agent'] = 'Mozilla/5.0'\n end_cursor = ''\n for count in range(1, 999):\n print('PAGE: ', count)\n r = x.get(profile, params={'max_id': end_cursor})\n \n data = re.search(r'window._sharedData = (\\{.+?});', r.text).group(1)\n j = json.loads(data)\n \n for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']: \n if node['is_video']:\n page = 'https://www.instagram.com/p/' + node['code']\n r = x.get(page)\n url = re.search(r'\"video_url\": \"([^\"]+)\"', r.text).group(1)\n print(url)\n cl.sendVideoWithURL(msg.to,url)\n else:\n print (node['display_src'])\n cl.sendImageWithURL(msg.to,node['display_src'])\n end_cursor = re.search(r'\"end_cursor\": \"([^\"]+)\"', r.text).group(1) \t\n\n\n elif 'Youtubelink: ' in msg.text:\n try:\n textToSearch = (msg.text).replace('Youtube ', \"\").strip()\n query = urllib.quote(textToSearch)\n url = \"https://www.youtube.com/results?search_query=\" + query\n response = urllib2.urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"html.parser\")\n results = soup.find(attrs={'class':'yt-uix-tile-link'})\n cl.sendText(msg.to,'https://www.youtube.com' + results['href'])\n except:\n cl.sendText(msg.to,\"Could not find it\")\n \n \n elif 'Youtubevideo: ' in msg.text:\n try:\n textToSearch = (msg.text).replace('Youtubevideo: ', \"\").strip()\n query = urllib.quote(textToSearch)\n url = \"https://www.youtube.com/results?search_query=\" + query\n response = urllib2.urlopen(url)\n html = response.read()\n soup = BeautifulSoup(html, \"html.parser\")\n results = soup.find(attrs={'class': 'yt-uix-tile-link'})\n cl.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])\n except:\n cl.sendText(msg.to, \"Could not find it\") \n\n \n elif \"Say-id \" in msg.text:\n say = msg.text.replace(\"Say-id \",\"\")\n lang = 'id'\n tts = gTTS(text=say, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n\n elif \"Say-en \" in msg.text:\n say = msg.text.replace(\"Say-en \",\"\")\n lang = 'en'\n tts = gTTS(text=say, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n\n elif \"Say-jp \" in msg.text:\n say = msg.text.replace(\"Say-jp \",\"\")\n lang = 'ja'\n tts = gTTS(text=say, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n\n elif \"Say welcome\" in msg.text:\n gs = cl.getGroup(msg.to)\n say = msg.text.replace(\"Say welcome\",\"Selamat Datang Di \"+ gs.name)\n lang = 'id'\n tts = gTTS(text=say, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n \n\n elif msg.text.lower() in [\"hi\",\"hai\",\"halo\",\"hallo\"]:\n beb = \"Hi Sayang 😘 \" +cl.getContact(msg.from_).displayName + \" 􀸂􀆇starry heart􏿿\"\n kr.sendText(msg.to,beb)\n\n\n\n elif \"playstore \" in msg.text.lower():\n tob = msg.text.lower().replace(\"playstore \",\"\")\n cl.sendText(msg.to,\"Sedang Mencari...\")\n cl.sendText(msg.to,\"Title : \"+tob+\"\\nSource : Google Play\\nLink : https://play.google.com/store/search?q=\" + tob)\n cl.sendText(msg.to,\"Tuh Linknya Kak (^_^)\")\n\n\n elif \"Mid @\" in msg.text:\n _name = msg.text.replace(\"Mid @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = cl.getGroup(msg.to)\n for g in gs.members:\n if _nametarget == g.displayName:\n random.choice(KAC).sendText(msg.to, g.mid)\n else:\n pass\n\n\n elif \"/bio \" in msg.text:\n string = msg.text.replace(\"/bio \",\"\")\n if len(string.decode('utf-8')) <= 500:\n profile = cl.getProfile()\n profile.statusMessage = string\n cl.updateProfile(profile)\n ki.updateProfile(profile)\n kk.updateProfile(profile)\n kc.updateProfile(profile)\n kr.updateProfile(profile)\n cl.sendText(msg.to,\"All Done\")\n\n elif \"/cnkapten\" in msg.text:\n\t\tif msg.from_ in Creator:\n string = msg.text.replace(\"/cnkapten\",\"Mi Kapten\")\n if len(string.decode('utf-8')) <= 5000:\n profile = cl.getProfile()\n profile.displayName = string\n cl.updateProfile(profile)\n cl.sendText(msg.to,\"Done\")\n\n elif \"/cntc1\" in msg.text:\n\t\tif msg.from_ in Creator:\n string = msg.text.replace(\"/cntc1\",\"Mi TC1\")\n if len(string.decode('utf-8')) <= 5000:\n profile = ki.getProfile()\n profile.displayName = string\n ki.updateProfile(profile)\n ki.sendText(msg.to,\"Done\")\n\n elif \"/cntc2\" in msg.text:\n\t\tif msg.from_ in Creator:\n string = msg.text.replace(\"/cntc2\",\"Mi TC2\")\n if len(string.decode('utf-8')) <= 5000:\n profile = kk.getProfile()\n profile.displayName = string\n kk.updateProfile(profile)\n kk.sendText(msg.to,\"Done\")\n\n elif \"/cntc3\" in msg.text:\n\t\tif msg.from_ in Creator:\n string = msg.text.replace(\"/cntc3\",\"Mi TC3\")\n if len(string.decode('utf-8')) <= 5000:\n profile = kc.getProfile()\n profile.displayName = string\n kc.updateProfile(profile)\n kc.sendText(msg.to,\"Done\")\n\n elif \"/cntc4\" in msg.text:\n\t\tif msg.from_ in Creator:\n string = msg.text.replace(\"/cntc4\",\"Mi TC4\")\n if len(string.decode('utf-8')) <= 5000:\n profile = cl.getProfile()\n profile.displayName = string\n kr.updateProfile(profile)\n kr.sendText(msg.to,\"Done\")\n\n\n elif \"Ulti \" in msg.text:\n if msg.from_ in Creator:\n ulti0 = msg.text.replace(\"Ulti \",\"\")\n ulti1 = ulti0.rstrip()\n ulti2 = ulti1.replace(\"@\",\"\")\n ulti3 = ulti2.rstrip()\n _name = ulti3\n gs = cl.getGroup(msg.to)\n ginfo = cl.getGroup(msg.to)\n gs.preventJoinByTicket = False\n cl.updateGroup(gs)\n invsend = 0\n Ticket = cl.reissueGroupTicket(msg.to)\n km.acceptGroupInvitationByTicket(msg.to,Ticket)\n time.sleep(0.2)\n targets = []\n for s in gs.members:\n if _name in s.displayName:\n targets.append(s.mid)\n if targets ==[]:\n sendMessage(msg.to,\"user does not exist\")\n pass\n else:\n for target in targets:\n try:\n km.kickoutFromGroup(msg.to,[target])\n km.leaveGroup(msg.to)\n print (msg.to,[g.mid])\n except:\n km.sendText(msg.t,\"Ter ELIMINASI....\")\n km.sendText(msg.to,\"WOLES brooo....!!!\")\n km.leaveGroup(msg.to)\n gs = cl.getGroup(msg.to)\n gs.preventJoinByTicket = True\n cl.updateGroup(gs)\n gs.preventJoinByTicket(gs)\n cl.updateGroup(gs)\n\n\n elif msg.text.lower() in [\"mymid\",\"myid\"]:\n middd = \"Name : \" +cl.getContact(msg.from_).displayName + \"\\nMid : \" +msg.from_\n kr.sendText(msg.to,middd)\n\n elif msg.text.lower() in [\"me\"]:\n msg.contentType = 13\n msg.contentMetadata = {'mid': msg.from_}\n cl.sendMessage(msg)\n\n elif \"/apakah \" in msg.text:\n apk = msg.text.replace(\"/apakah \",\"\")\n rnd = [\"Ya\",\"Tidak\",\"Bisa Jadi\",\"Mungkin\"]\n p = random.choice(rnd)\n lang = 'id'\n tts = gTTS(text=p, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n \n elif \"/hari \" in msg.text:\n apk = msg.text.replace(\"/hari \",\"\")\n rnd = [\"Senin\",\"Selasa\",\"Rabu\",\"Kamis\",\"Jumat\",\"Sabtu\",\"Minggu\"]\n p = random.choice(rnd)\n lang = 'id'\n tts = gTTS(text=p, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\") \n\n\n elif \"/berapa \" in msg.text:\n apk = msg.text.replace(\"/berapa \",\"\")\n rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']\n p = random.choice(rnd)\n lang = 'id'\n tts = gTTS(text=p, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n \n elif \"/berapakah \" in msg.text:\n apk = msg.text.replace(\"/berapakah \",\"\")\n rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']\n p = random.choice(rnd)\n lang = 'id'\n tts = gTTS(text=p, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\") \n\n elif \"/kapan \" in msg.text:\n apk = msg.text.replace(\"/kapan \",\"\")\n rnd = [\"kapan kapan\",\"besok\",\"satu abad lagi\",\"Hari ini\",\"Tahun depan\",\"Minggu depan\",\"Bulan depan\",\"Sebentar lagi\",\"Tidak Akan Pernah\"]\n p = random.choice(rnd)\n lang = 'id'\n tts = gTTS(text=p, lang=lang)\n tts.save(\"hasil.mp3\")\n cl.sendAudio(msg.to,\"hasil.mp3\")\n\n \n elif msg.text in [\"Simisimi on\",\"Simisimi:on\"]:\n settings[\"simiSimi\"][msg.to] = True\n wait[\"Simi\"] = True\n cl.sendText(msg.to,\" Simisimi Di Aktifkan\")\n \n elif msg.text in [\"Simisimi off\",\"Simisimi:off\"]:\n settings[\"simiSimi\"][msg.to] = False\n wait[\"Simi\"] = False\n cl.sendText(msg.to,\"Simisimi Di Nonaktifkan\")\n\n \n elif \"Image \" in msg.text:\n search = msg.text.replace(\"Image \",\"\")\n url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search\n raw_html = (download_page(url))\n items = []\n items = items + (_images_get_all_items(raw_html))\n path = random.choice(items)\n print path\n try:\n cl.sendImageWithURL(msg.to,path)\n except:\n pass\n \n elif \"Youtubesearch: \" in msg.text:\n query = msg.text.replace(\"Youtube \",\"\")\n with requests.session() as s:\n s.headers['user-agent'] = 'Mozilla/5.0'\n url = 'http://www.youtube.com/results'\n params = {'search_query': query}\n r = s.get(url, params=params)\n soup = BeautifulSoup(r.content, 'html.parser')\n hasil = \"\"\n for a in soup.select('.yt-lockup-title > a[title]'):\n if '&list=' not in a['href']:\n hasil += ''.join((a['title'],'\\nUrl : http://www.youtube.com' + a['href'],'\\n\\n'))\n cl.sendText(msg.to,hasil)\n print '[Command] Youtube Search'\n\n\n \n elif \"Tr-id \" in msg.text:\n isi = msg.text.replace(\"Tr-id \",\"\")\n translator = Translator()\n hasil = translator.translate(isi, dest='id')\n A = hasil.text\n A = A.encode('utf-8')\n cl.sendText(msg.to, A)\n\n elif \"Tr-en \" in msg.text:\n isi = msg.text.replace(\"Tr-en \",\"\")\n translator = Translator()\n hasil = translator.translate(isi, dest='en')\n A = hasil.text\n A = A.encode('utf-8')\n cl.sendText(msg.to, A)\n \n elif \"Tr-th \" in msg.text:\n isi = msg.text.replace(\"Tr-th \",\"\")\n translator = Translator()\n hasil = translator.translate(isi, dest='th')\n A = hasil.text\n A = A.encode('utf-8')\n cl.sendText(msg.to, A) \n\n \n elif \"Id@en\" in msg.text:\n bahasa_awal = 'id'\n bahasa_tujuan = 'en'\n kata = msg.text.replace(\"Id@en \",\"\")\n url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(\" \", \"+\"))\n agent = {'User-Agent':'Mozilla/5.0'}\n cari_hasil = 'class=\"t0\">'\n request = urllib2.Request(url, headers=agent)\n page = urllib2.urlopen(request).read()\n result = page[page.find(cari_hasil)+len(cari_hasil):]\n result = result.split(\"<\")[0]\n cl.sendText(msg.to,\"----Dari Indonesia----\\n\" + \"\" + kata + \"\\n\\n----Ke Inggris----\\n\" + \"\" + result)\n\n\n elif \"En@id\" in msg.text:\n bahasa_awal = 'en'\n bahasa_tujuan = 'id'\n kata = msg.text.replace(\"En@id \",\"\")\n url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(\" \", \"+\"))\n agent = {'User-Agent':'Mozilla/5.0'}\n cari_hasil = 'class=\"t0\">'\n request = urllib2.Request(url, headers=agent)\n page = urllib2.urlopen(request).read()\n result = page[page.find(cari_hasil)+len(cari_hasil):]\n result = result.split(\"<\")[0]\n cl.sendText(msg.to,\"----Dari Inggris----\\n\" + \"\" + kata + \"\\n\\n----Ke Indonesia----\\n\" + \"\" + result)\n \n \n elif \"Id@th\" in msg.text:\n bahasa_awal = 'id'\n bahasa_tujuan = 'th'\n kata = msg.text.replace(\"Id@en \",\"\")\n url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(\" \", \"+\"))\n agent = {'User-Agent':'Mozilla/5.0'}\n cari_hasil = 'class=\"t0\">'\n request = urllib2.Request(url, headers=agent)\n page = urllib2.urlopen(request).read()\n result = page[page.find(cari_hasil)+len(cari_hasil):]\n result = result.split(\"<\")[0]\n cl.sendText(msg.to,\"----Dari Indonesia----\\n\" + \"\" + kata + \"\\n\\n----Ke Thailand----\\n\" + \"\" + result)\n \n \n elif \"Th@id\" in msg.text:\n bahasa_awal = 'th'\n bahasa_tujuan = 'id'\n kata = msg.text.replace(\"Id@en \",\"\")\n url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(\" \", \"+\"))\n agent = {'User-Agent':'Mozilla/5.0'}\n cari_hasil = 'class=\"t0\">'\n request = urllib2.Request(url, headers=agent)\n page = urllib2.urlopen(request).read()\n result = page[page.find(cari_hasil)+len(cari_hasil):]\n result = result.split(\"<\")[0]\n cl.sendText(msg.to,\"----Dari Thailand----\\n\" + \"\" + kata + \"\\n\\n----Ke Indonesia----\\n\" + \"\" + result) \n \n elif msg.text in [\"Friendlist\"]: \n contactlist = cl.getAllContactIds()\n kontak = cl.getContacts(contactlist)\n num=1\n msgs=\"═════════List Friend═════════\"\n for ids in kontak:\n msgs+=\"\\n[%i] %s\" % (num, ids.displayName)\n num=(num+1)\n msgs+=\"\\n═════════List Friend═════════\\n\\nTotal Friend : %i\" % len(kontak)\n cl.sendText(msg.to, msgs)\n\n elif msg.text in [\"Memlist\"]: \n kontak = cl.getGroup(msg.to)\n group = kontak.members\n num=1\n msgs=\"═════════List Member═�����═══════-\"\n for ids in group:\n msgs+=\"\\n[%i] %s\" % (num, ids.displayName)\n num=(num+1)\n msgs+=\"\\n═════════List Member═════════\\n\\nTotal Members : %i\" % len(group)\n cl.sendText(msg.to, msgs)\n\n \n\n elif msg.text in [\"Spam\"]:\n if msg.from_ in admin:\n cl.sendText(msg.to,\"Aku belum mandi\")\n ki.sendText(msg.to,\"Tak tun tuang\")\n kk.sendText(msg.to,\"Tak tun tuang\")\n cl.sendText(msg.to,\"Tapi masih cantik juga\")\n ki.sendText(msg.to,\"Tak tun tuang\")\n kk.sendText(msg.to,\"Tak tun tuang\")\n cl.sendText(msg.to,\"apalagi kalau sudah mandi\")\n ki.sendText(msg.to,\"Tak tun tuang\")\n kk.sendText(msg.to,\"Pasti cantik sekali\")\n cl.sendText(msg.to,\"yiha\")\n ki.sendText(msg.to,\"Kalau orang lain melihatku\")\n kk.sendText(msg.to,\"Tak tun tuang\")\n cl.sendText(msg.to,\"Badak aku taba bana\")\n ki.sendText(msg.to,\"Tak tun tuang\")\n kk.sendText(msg.to,\"Tak tuntuang\")\n cl.sendText(msg.to,\"Tapi kalau langsuang diidu\")\n ki.sendText(msg.to,\"Tak tun tuang\")\n kk.sendText(msg.to,\"Atagfirullah baunya\")\n cl.sendText(msg.to,\"Males lanjutin ah\")\n ki.sendText(msg.to,\"Sepi bat\")\n kk.sendText(msg.to,\"Iya sepi udah udah\")\n cl.sendText(msg.to,\"Gaada yang denger juga kita nyanyi\")\n ki.sendText(msg.to,\"Nah\")\n kk.sendText(msg.to,\"Mending gua makan dulu\")\n cl.sendText(msg.to,\"Siyap\")\n ki.sendText(msg.to,\"Okeh\")\n kk.sendText(msg.to,\"Katanya owner kita Jomblo ya\")\n cl.sendText(msg.to,\"Iya emang\")\n ki.sendText(msg.to,\"Denger denger si lagi nyari pacar doi\")\n kk.sendText(msg.to,\"Udah ah gosip mulu doain aja biar dapet\")\n \n elif \"Getvid @\" in msg.text:\n print \"[Command]dp executing\"\n _name = msg.text.replace(\"Getvid @\",\"\")\n _nametarget = _name.rstrip(' ')\n gs = cl.getGroup(msg.to)\n targets = []\n for g in gs.members:\n if _nametarget == g.displayName:\n targets.append(g.mid)\n if targets == []:\n cl.sendText(msg.to,\"Contact not found\")\n else:\n for target in targets:\n try:\n contact = cl.getContact(target)\n path = \"http://dl.profile.line-cdn.net/\" + contact.pictureStatus\n cl.sendVideoWithURL(msg.to, path)\n except Exception as e:\n raise e\n print \"[Command]dp executed\"\n\n\n elif \"Getgroup image\" in msg.text:\n group = cl.getGroup(msg.to)\n path = \"http://dl.profile.line-cdn.net/\" + group.pictureStatus\n cl.sendImageWithURL(msg.to,path)\n\n elif \"Urlgroup image\" in msg.text:\n group = cl.getGroup(msg.to)\n path = \"http://dl.profile.line-cdn.net/\" + group.pictureStatus\n cl.sendText(msg.to,path)\n \n elif \"Getname\" in msg.text:\n key = eval(msg.contentMetadata[\"MENTION\"])\n key1 = key[\"MENTIONEES\"][0][\"M\"]\n contact = cl.getContact(key1)\n cu = cl.channel.getCover(key1)\n try:\n cl.sendText(msg.to, \"===[DisplayName]===\\n\" + contact.displayName)\n except:\n cl.sendText(msg.to, \"===[DisplayName]===\\n\" + contact.displayName)\n\n\n elif \"Getprofile\" in msg.text:\n key = eval(msg.contentMetadata[\"MENTION\"])\n key1 = key[\"MENTIONEES\"][0][\"M\"]\n contact = cl.getContact(key1)\n cu = cl.channel.getCover(key1)\n path = str(cu)\n image = \"http://dl.profile.line-cdn.net/\" + contact.pictureStatus\n try:\n cl.sendText(msg.to,\"Nama :\\n\" + contact.displayName + \"\\n\\nBio :\\n\" + contact.statusMessage)\n cl.sendText(msg.to,\"Profile Picture \" + contact.displayName)\n cl.sendImageWithURL(msg.to,image)\n cl.sendText(msg.to,\"Cover \" + contact.displayName)\n cl.sendImageWithURL(msg.to,path)\n except:\n pass\n\n\n elif \"Getcontact\" in msg.text:\n key = eval(msg.contentMetadata[\"MENTION\"])\n key1 = key[\"MENTIONEES\"][0][\"M\"] \n mmid = cl.getContact(key1)\n msg.contentType = 13\n msg.contentMetadata = {\"mid\": key1}\n cl.sendMessage(msg)\n\n elif \"Getinfo\" in msg.text:\n key = eval(msg.contentMetadata[\"MENTION\"])\n key1 = key[\"MENTIONEES\"][0][\"M\"]\n contact = cl.getContact(key1)\n cu = cl.channel.getCover(key1)\n try:\n cl.sendText(msg.to,\"Nama :\\n\" + contact.displayName + \"\\n\\nMid :\\n\" + contact.mid + \"\\n\\nBio :\\n\" + contact.statusMessage + \"\\n\\nProfile Picture :\\nhttp://dl.profile.line-cdn.net/\" + contact.pictureStatus + \"\\n\\nHeader :\\n\" + str(cu))\n except:\n cl.sendText(msg.to,\"Nama :\\n\" + contact.displayName + \"\\n\\nMid :\\n\" + contact.mid + \"\\n\\nBio :\\n\" + contact.statusMessage + \"\\n\\nProfile Picture :\\n\" + str(cu))\n\n\n elif \"Getbio\" in msg.text:\n key = eval(msg.contentMetadata[\"MENTION\"])\n key1 = key[\"MENTIONEES\"][0][\"M\"]\n contact = cl.getContact(key1)\n cu = cl.channel.getCover(key1)\n try:\n cl.sendText(msg.to, \"===[StatusMessage]===\\n\" + contact.statusMessage)\n except:\n cl.sendText(msg.to, \"===[StatusMessage]===\\n\" + contact.statusMessage)\n\n\n elif msg.text.lower() == 'runtime':\n eltime = time.time() - mulai\n van = \"Bot Sudah Berjalan Selama :\\n\"+waktu(eltime)\n cl.sendText(msg.to,van)\n \n \n elif \"Checkdate \" in msg.text:\n tanggal = msg.text.replace(\"Checkdate \",\"\")\n r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)\n data=r.text\n data=json.loads(data)\n lahir = data[\"data\"][\"lahir\"]\n usia = data[\"data\"][\"usia\"]\n ultah = data[\"data\"][\"ultah\"]\n zodiak = data[\"data\"][\"zodiak\"]\n cl.sendText(msg.to,\"========== I N F O R M A S I ==========\\n\"+\"Date Of Birth : \"+lahir+\"\\nAge : \"+usia+\"\\nUltah : \"+ultah+\"\\nZodiak : \"+zodiak+\"\\n========== I N F O R M A S I ==========\")\n \n \n elif msg.text in [\"Kalender\",\"Time\",\"Waktu\"]:\n timeNow = datetime.now()\n timeHours = datetime.strftime(timeNow,\"(%H:%M)\")\n day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\",\"Friday\", \"Saturday\"]\n hari = [\"Minggu\", \"Senin\", \"Selasa\", \"Rabu\", \"Kamis\", \"Jumat\", \"Sabtu\"]\n bulan = [\"Januari\", \"Februari\", \"Maret\", \"April\", \"Mei\", \"Juni\", \"Juli\", \"Agustus\", \"September\", \"Oktober\", \"November\", \"Desember\"]\n inihari = datetime.today()\n hr = inihari.strftime('%A')\n bln = inihari.strftime('%m')\n for i in range(len(day)):\n if hr == day[i]: hasil = hari[i]\n for k in range(0, len(bulan)):\n if bln == str(k): bln = bulan[k-1]\n rst = hasil + \", \" + inihari.strftime('%d') + \" - \" + bln + \" - \" + inihari.strftime('%Y') + \"\\nJam : [ \" + inihari.strftime('%H:%M:%S') + \" ]\"\n cl.sendText(msg.to, rst) \n \n \n elif \"SearchID: \" in msg.text:\n userid = msg.text.replace(\"SearchID: \",\"\")\n contact = cl.findContactsByUserid(userid)\n msg.contentType = 13\n msg.contentMetadata = {'mid': contact.mid}\n cl.sendMessage(msg)\n \n elif \"Searchid: \" in msg.text:\n userid = msg.text.replace(\"Searchid: \",\"\")\n contact = cl.findContactsByUserid(userid)\n msg.contentType = 13\n msg.contentMetadata = {'mid': contact.mid}\n cl.sendMessage(msg) \n \n \n elif \"removechat\" in msg.text.lower():\n if msg.from_ in admin:\n try:\n cl.removeAllMessages(op.param2)\n ki.removeAllMessages(op.param2)\n kk.removeAllMessages(op.param2)\n kc.removeAllMessages(op.param2)\n kr.removeAllMessages(op.param2)\n print \"[Command] Remove Chat\"\n cl.sendText(msg.to,\"Done\")\n except Exception as error:\n print error\n cl.sendText(msg.to,\"Error\") \n \n \n elif \"Invitemeto: \" in msg.text:\n if msg.from_ in admin:\n gid = msg.text.replace(\"Invitemeto: \",\"\")\n if gid == \"\":\n cl.sendText(msg.to,\"Invalid group id\")\n else:\n try:\n cl.findAndAddContactsByMid(msg.from_)\n ki.findAndAddContactsByMid(msg.from_)\n kk.findAndAddContactsByMid(msg.from_)\n kc.findAndAddContactsByMid(msg.from_)\n kr.findAndAddContactsByMid(msg.from_)\n random.choice(KAC).inviteIntoGroup(gid,[msg.from_])\n except:\n cl.sendText(msg.to,\"Mungkin Saya Tidak Di Dalaam Grup Itu\")\n\n\n elif msg.text in [\"Glist\"]:\n cl.sendText(msg.to, \"Tunggu Sebentar. . .\") \n gid = cl.getGroupIdsJoined()\n h = \"\"\n for i in gid:\n h += \"╠➩\" + \"%s\\n\" % (cl.getGroup(i).name +\" ~> [\"+str(len(cl.getGroup(i).members))+\"]\")\n cl.sendText(msg.to,\"╔═════════════════════════\\n║ ☆☞ LIST GROUPS☜☆\\n╠═════════════════════════\\n\" + h + \"╠═════════════════════════\" + \"\\n║ Total Groups =\" +\" [\"+str(len(gid))+\"]\\n╚═════════════════════════\")\n\n elif msg.text in [\"Glistmid\"]: \n gruplist = kr.getGroupIdsJoined()\n kontak = kr.getGroups(gruplist)\n num=1\n msgs=\"═════════List GrupMid═════════\"\n for ids in kontak:\n msgs+=\"\\n[%i] %s\" % (num, ids.id)\n num=(num+1)\n msgs+=\"\\n═════════List GrupMid═════════\\n\\nTotal Grup : %i\" % len(kontak)\n kr.sendText(msg.to, msgs)\n\n\n\n elif \"Google: \" in msg.text:\n a = msg.text.replace(\"Google: \",\"\")\n b = urllib.quote(a)\n cl.sendText(msg.to,\"Sedang Mencari...\")\n cl.sendText(msg.to, \"https://www.google.com/\" + b)\n cl.sendText(msg.to,\"Itu Dia Linknya. . .\") \n\n\n elif \"Details group: \" in msg.text:\n if msg.from_ in admin:\n gid = msg.text.replace(\"Details group: \",\"\")\n if gid in [\"\",\" \"]:\n cl.sendText(msg.to,\"Grup id tidak valid\")\n else:\n try:\n groups = cl.getGroup(gid)\n if groups.members is not None:\n members = str(len(groups.members))\n else:\n members = \"0\"\n if groups.invitee is not None:\n pendings = str(len(groups.invitee))\n else:\n pendings = \"0\"\n h = \"[\" + groups.name + \"]\\n -+GroupID : \" + gid + \"\\n -+Members : \" + members + \"\\n -+MembersPending : \" + pendings + \"\\n -+Creator : \" + groups.creator.displayName + \"\\n -+GroupPicture : http://dl.profile.line.naver.jp/\" + groups.pictureStatus\n cl.sendText(msg.to,h)\n except Exception as error:\n cl.sendText(msg.to,(error))\n \n elif \"Cancel invite: \" in msg.text:\n if msg.from_ in admin:\n gids = msg.text.replace(\"Cancel invite: \",\"\")\n gid = cl.getGroup(gids)\n for i in gid:\n if i is not None:\n try:\n cl.rejectGroupInvitation(i)\n except:\n cl.sendText(msg.to,\"Error!\")\n break\n else:\n break\n if gid is not None:\n cl.sendText(msg.to,\"Berhasil tolak undangan dari grup \" + gid.name)\n else:\n cl.sendText(msg.to,\"Grup tidak ditemukan\")\n \n elif msg.text in [\"Kapten acc invite\"]:\n if msg.from_ in admin:\n gid = cl.getGroupIdsInvited()\n _list = \"\"\n for i in gid:\n if i is not None:\n gids = cl.getGroup(i)\n _list += gids.name\n cl.acceptGroupInvitation(i)\n else:\n break\n if gid is not None:\n cl.sendText(msg.to,\"Berhasil terima semua undangan dari grup :\\n\" + _list)\n else:\n cl.sendText(msg.to,\"Tidak ada grup yang tertunda saat ini\") \n \n elif msg.text in [\"TC1 acc invite\"]:\n if msg.from_ in admin:\n gid = ki.getGroupIdsInvited()\n _list = \"\"\n for i in gid:\n if i is not None:\n gids = ki.getGroup(i)\n _list += gids.name\n ki.acceptGroupInvitation(i)\n else:\n break\n if gid is not None:\n ki.sendText(msg.to,\"Berhasil terima semua undangan dari grup :\\n\" + _list)\n else:\n ki.sendText(msg.to,\"Tidak ada grup yang tertunda saat ini\") \n \n elif msg.text in [\"TC2 acc invite\"]:\n if msg.from_ in admin:\n gid = kk.getGroupIdsInvited()\n _list = \"\"\n for i in gid:\n if i is not None:\n gids = kk.getGroup(i)\n _list += gids.name\n kk.acceptGroupInvitation(i)\n else:\n break\n if gid is not None:\n kk.sendText(msg.to,\"Berhasil terima semua undangan dari grup :\\n\" + _list)\n else:\n kk.sendText(msg.to,\"Tidak ada grup yang tertunda saat ini\") \n \n elif msg.text in [\"TC3 acc invite\"]:\n if msg.from_ in admin:\n gid = kc.getGroupIdsInvited()\n _list = \"\"\n for i in gid:\n if i is not None:\n gids = kc.getGroup(i)\n _list += gids.name\n kc.acceptGroupInvitation(i)\n else:\n break\n if gid is not None:\n kc.sendText(msg.to,\"Berhasil terima semua undangan dari grup :\\n\" + _list)\n else:\n kc.sendText(msg.to,\"Tidak ada grup yang tertunda saat ini\") \n \n elif msg.text in [\"TC4 acc invite\"]:\n if msg.from_ in admin:\n gid = kr.getGroupIdsInvited()\n _list = \"\"\n for i in gid:\n if i is not None:\n gids = kr.getGroup(i)\n _list += gids.name\n kr.acceptGroupInvitation(i)\n else:\n break\n if gid is not None:\n kr.sendText(msg.to,\"Berhasil terima semua undangan dari grup :\\n\" + _list)\n else:\n kr.sendText(msg.to,\"Tidak ada grup yang tertunda saat ini\") \n\n\n elif \"Gif gore\" in msg.text:\n \tgif = (\"https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif\",\"https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif\")\n gore = random.choice(gif)\n cl.sendGifWithURL(msg.to,gore)\n\n\n\n\n if op.type == 59:\n print op\n\n\n except Exception as error:\n print error\n\n\nwhile True:\n try:\n Ops = cl.fetchOps(cl.Poll.rev, 5)\n except EOFError:\n raise Exception(\"It might be wrong revision\\n\" + str(cl.Poll.rev))\n\n for Op in Ops:\n if (Op.type != OpType.END_OF_OPERATION):\n cl.Poll.rev = max(cl.Poll.rev, Op.revision)\n bot(Op)\n\n"},"path":{"kind":"string","value":"ma.py"},"size":{"kind":"number","value":177124,"string":"177,124"},"nl_text":{"kind":"string","value":"-*- coding: utf-8 -*-Chucky_Botcl.login(qr=True)ki.login(qr=True)kk.login(qr=True)kc = LINETCR.LINE()kc.login(qr=True)kc.login(token='TOKEN_KAMU_DISINI_BEIB')kc.loginResult()print \"Kc-Login Success\\n\"kr = LINETCR.LINE()kr.login(qr=True)kr.login(token='TOKEN_KAMU_DISINI_BEIB')kr.loginResult()print \"Kr-Login Success\\n\"km = LINETCR.LINE()km.login(qr=True)km.login(token='TOKEN_KAMU_DISINI_BEIB')km.loginResult() /XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...else:"},"nl_size":{"kind":"number","value":453,"string":"453"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.06846616417169571,"string":"0.068466"}}},{"rowIdx":581,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom PIL import Image\nimport random\nimport os\nfrom sample import sample_conf\nfrom tensorflow.python.framework.errors_impl import NotFoundError\n\n# 设置以下环境变量可开启CPU识别\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\nclass TrainError(Exception):\n pass\n\n\nclass TrainModel(object):\n def __init__(self, img_path, char_set, model_save_dir, verify=False):\n # 模型路径\n self.model_save_dir = model_save_dir\n\n # 打乱文件顺序+校验图片格式\n self.img_path = img_path\n self.img_list = os.listdir(img_path)\n # 校验格式\n if verify:\n self.confirm_image_suffix()\n # 打乱文件顺序\n random.seed(time.time())\n random.shuffle(self.img_list)\n\n # 获得图片宽高和字符长度基本信息\n label, captcha_array = self.gen_captcha_text_image(self.img_list[0])\n\n captcha_shape = captcha_array.shape\n captcha_shape_len = len(captcha_shape)\n if captcha_shape_len == 3:\n image_height, image_width, channel = captcha_shape\n self.channel = channel\n elif captcha_shape_len == 2:\n image_height, image_width = captcha_shape\n else:\n raise TrainError(\"图片转换为矩阵时出错,请检查图片格式\")\n\n # 初始化变量\n # 图片尺寸\n self.image_height = image_height\n self.image_width = image_width\n # 验证码长度(位数)\n self.max_captcha = len(label)\n # 验证码字符类别\n self.char_set = char_set\n self.char_set_len = len(char_set)\n\n # 相关信息打印\n print(\"-->图片尺寸: {} X {}\".format(image_height, image_width))\n print(\"-->验证码长度: {}\".format(self.max_captcha))\n print(\"-->验证码共{}类 {}\".format(self.char_set_len, char_set))\n print(\"-->使用测试集为 {}\".format(img_path))\n\n # tf初始化占位符\n self.X = tf.placeholder(tf.float32, [None, image_height * image_width]) # 特征向量\n self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签\n self.keep_prob = tf.placeholder(tf.float32) # dropout值\n self.w_alpha = 0.01\n self.b_alpha = 0.1\n\n # test model input and output\n print(\">>> Start model test\")\n batch_x, batch_y = self.get_batch(0, size=100)\n print(\">>> input batch images shape: {}\".format(batch_x.shape))\n print(\">>> input batch labels shape: {}\".format(batch_y.shape))\n\n def gen_captcha_text_image(self, img_name):\n \"\"\"\n 返回一个验证码的array形式和对应的字符串标签\n :return:tuple (str, numpy.array)\n \"\"\"\n # 标签\n label = img_name.split(\"_\")[0]\n # 文件\n img_file = os.path.join(self.img_path, img_name)\n captcha_image = Image.open(img_file)\n captcha_array = np.array(captcha_image) # 向量化\n return label, captcha_array\n\n @staticmethod\n def convert2gray(img):\n \"\"\"\n 图片转为灰度图,如果是3通道图则计算,单通道图则直接返回\n :param img:\n :return:\n \"\"\"\n if len(img.shape) > 2:\n r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n else:\n return img\n\n def text2vec(self, text):\n \"\"\"\n 转标签为oneHot编码\n :param text: str\n :return: numpy.array\n \"\"\"\n text_len = len(text)\n if text_len > self.max_captcha:\n raise ValueError('验证码最长{}个字符'.format(self.max_captcha))\n\n vector = np.zeros(self.max_captcha * self.char_set_len)\n\n for i, ch in enumerate(text):\n idx = i * self.char_set_len + self.char_set.index(ch)\n vector[idx] = 1\n return vector\n\n def get_batch(self, n, size=128):\n batch_x = np.zeros([size, self.image_height * self.image_width]) # 初始化\n batch_y = np.zeros([size, self.max_captcha * self.char_set_len]) # 初始化\n\n max_batch = int(len(self.img_list) / size)\n # print(max_batch)\n if max_batch - 1 < 0:\n raise TrainError(\"训练集图片数量需要大于每批次训练的图片数量\")\n if n > max_batch - 1:\n n = n % max_batch\n s = n * size\n e = (n + 1) * size\n this_batch = self.img_list[s:e]\n # print(\"{}:{}\".format(s, e))\n\n for i, img_name in enumerate(this_batch):\n label, image_array = self.gen_captcha_text_image(img_name)\n image_array = self.convert2gray(image_array) # 灰度化图片\n batch_x[i, :] = image_array.flatten() / 255 # flatten 转为一维\n batch_y[i, :] = self.text2vec(label) # 生成 oneHot\n return batch_x, batch_y\n\n def confirm_image_suffix(self):\n # 在训练前校验所有文件格式\n print(\"开始校验所有图片后缀\")\n for index, img_name in enumerate(self.img_list):\n print(\"{} image pass\".format(index), end='\\r')\n if not img_name.endswith(sample_conf['image_suffix']):\n raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]'\n .format(sample_conf['image_suffix'], img_name))\n print(\"所有图片格式校验通过\")\n\n def model(self):\n x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])\n print(\">>> input x: {}\".format(x))\n\n # 卷积层1\n wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))\n conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))\n conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv1 = tf.nn.dropout(conv1, self.keep_prob)\n\n # 卷积层2\n wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))\n conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))\n conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv2 = tf.nn.dropout(conv2, self.keep_prob)\n\n # 卷积层3\n wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))\n conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))\n conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n conv3 = tf.nn.dropout(conv3, self.keep_prob)\n print(\">>> convolution 3: \", conv3.shape)\n next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]\n\n # 全连接层1\n wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))\n dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])\n dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))\n dense = tf.nn.dropout(dense, self.keep_prob)\n\n # 全连接层2\n wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))\n y_predict = tf.add(tf.matmul(dense, wout), bout)\n return y_predict\n\n def train_cnn(self):\n y_predict = self.model()\n print(\">>> input batch predict shape: {}\".format(y_predict.shape))\n print(\">>> End model test\")\n # 计算概率 损失\n cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))\n # 梯度下降\n optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)\n # 计算准确率\n predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]) # 预测结果\n max_idx_p = tf.argmax(predict, 2) # 预测结果\n max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2) # 标签\n # 计算准确率\n correct_pred = tf.equal(max_idx_p, max_idx_l)\n accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))\n # 模型保存对象\n saver = tf.train.Saver()\n with tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n # 恢复模型\n if os.path.exists(self.model_save_dir):\n try:\n saver.restore(sess, self.model_save_dir)\n # 判断捕获model文件夹中没有模型文件的错误\n except NotFoundError:\n print(\"model文件夹为空,将创建新模型\")\n else:\n pass\n step = 1\n for i in range(3000):\n batch_x, batch_y = self.get_batch(i, size=128)\n _, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})\n if step % 10 == 0:\n batch_x_test, batch_y_test = self.get_batch(i, size=100)\n acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})\n print(\"第{}次训练 >>> 准确率为 {} >>> loss {}\".format(step, acc, cost_))\n # 准确率达到99%后保存并停止\n if acc > 0.99:\n saver.save(sess, self.model_save_dir)\n break\n # 每训练500轮就保存一次\n if i % 500 == 0:\n saver.save(sess, self.model_save_dir)\n step += 1\n saver.save(sess, self.model_save_dir)\n\n def recognize_captcha(self):\n label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list))\n\n f = plt.figure()\n ax = f.add_subplot(111)\n ax.text(0.1, 0.9, \"origin:\" + label, ha='center', va='center', transform=ax.transAxes)\n plt.imshow(captcha_array)\n # 预测图片\n image = self.convert2gray(captcha_array)\n image = image.flatten() / 255\n\n y_predict = self.model()\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, self.model_save_dir)\n predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2)\n text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.})\n predict_text = text_list[0].tolist()\n\n print(\"正确: {} 预测: {}\".format(label, predict_text))\n # 显示图片和预测结果\n p_text = \"\"\n for p in predict_text:\n p_text += str(self.char_set[p])\n print(p_text)\n plt.text(20, 1, 'predict:{}'.format(p_text))\n plt.show()\n\n\ndef main():\n train_image_dir = sample_conf[\"train_image_dir\"]\n char_set = sample_conf[\"char_set\"]\n model_save_dir = sample_conf[\"model_save_dir\"]\n tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False)\n tm.train_cnn() # 开始训练模型\n # tm.recognize_captcha() # 识别图片示例\n\n\nif __name__ == '__main__':\n main()\n"},"path":{"kind":"string","value":"train_model.py"},"size":{"kind":"number","value":12141,"string":"12,141"},"nl_text":{"kind":"string","value":"图片转为灰度图,如果是3通道图则计算,单通道图则直接返回\n:param img:\n:return:\n返回一个验证码的array形式和对应的字符串标签\n:return:tuple (str, numpy.array)\n转标签为oneHot编码\n:param text: str\n:return: numpy.array\n\n -*- coding: utf-8 -*- 设置以下环境变量可开启CPU识别 os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\" os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\" 模型路径 打乱文件顺序+校验图片格式 校验格式 打乱文件顺序 获得图片宽高和字符长度基本信息 初始化变量 图片尺寸 验证码长度(位数) 验证码字符类别 相关信息打印 tf初始化占位符 特征向量 标签 dropout值 test model input and output 标签 文件 向量化 初始化 初始化 print(max_batch) print(\"{}:{}\".format(s, e)) 灰度化图片 flatten 转为一维 生成 oneHot 在训练前校验所有文件格式 卷积层1 卷积层2 卷积层3 全连接层1 全连接层2 计算概率 损失 梯度下降 计算准确率 预测结果 预测结果 标签 计算准确率 模型保存对象 恢复模型 判断捕获model文件夹中没有模型文件的错误 准确率达到99%后保存并停止 每训练500轮就保存一次 预测图片 显示图片和预测结果 开始训练模型 tm.recognize_captcha() 识别图片示例"},"nl_size":{"kind":"number","value":712,"string":"712"},"nl_language":{"kind":"string","value":"zh"},"nl_language_score":{"kind":"number","value":0.9033854007720947,"string":"0.903385"}}},{"rowIdx":582,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# The idea for this module (but no code) was borrowed from the\n# quantities (http://pythonhosted.org/quantities/) package.\n\"\"\"Helper functions for Quantity.\n\nIn particular, this implements the logic that determines scaling and result\nunits for a given ufunc, given input units.\n\"\"\"\n\nfrom fractions import Fraction\n\nimport numpy as np\n\nfrom . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS\nfrom ..core import (UnitsError, UnitConversionError, UnitTypeError,\n dimensionless_unscaled, get_current_unit_registry)\n\n\ndef _d(unit):\n if unit is None:\n return dimensionless_unscaled\n else:\n return unit\n\n\ndef get_converter(from_unit, to_unit):\n \"\"\"Like Unit._get_converter, except returns None if no scaling is needed,\n i.e., if the inferred scale is unity.\"\"\"\n try:\n scale = from_unit._to(to_unit)\n except UnitsError:\n return from_unit._apply_equivalencies(\n from_unit, to_unit, get_current_unit_registry().equivalencies)\n except AttributeError:\n raise UnitTypeError(\"Unit '{0}' cannot be converted to '{1}'\"\n .format(from_unit, to_unit))\n if scale == 1.:\n return None\n else:\n return lambda val: scale * val\n\n\ndef get_converters_and_unit(f, unit1, unit2):\n converters = [None, None]\n # By default, we try adjusting unit2 to unit1, so that the result will\n # be unit1 as well. But if there is no second unit, we have to try\n # adjusting unit1 (to dimensionless, see below).\n if unit2 is None:\n if unit1 is None:\n # No units for any input -- e.g., np.add(a1, a2, out=q)\n return converters, dimensionless_unscaled\n changeable = 0\n # swap units.\n unit2 = unit1\n unit1 = None\n elif unit2 is unit1:\n # ensure identical units is fast (\"==\" is slow, so avoid that).\n return converters, unit1\n else:\n changeable = 1\n\n # Try to get a converter from unit2 to unit1.\n if unit1 is None:\n try:\n converters[changeable] = get_converter(unit2,\n dimensionless_unscaled)\n except UnitsError:\n # special case: would be OK if unitless number is zero, inf, nan\n converters[1-changeable] = False\n return converters, unit2\n else:\n return converters, dimensionless_unscaled\n else:\n try:\n converters[changeable] = get_converter(unit2, unit1)\n except UnitsError:\n raise UnitConversionError(\n \"Can only apply '{0}' function to quantities \"\n \"with compatible dimensions\"\n .format(f.__name__))\n\n return converters, unit1\n\n\n# SINGLE ARGUMENT UFUNC HELPERS\n#\n# The functions below take a single argument, which is the quantity upon which\n# the ufunc is being used. The output of the helper function should be two\n# values: a list with a single converter to be used to scale the input before\n# it is being passed to the ufunc (or None if no conversion is needed), and\n# the unit the output will be in.\n\ndef helper_onearg_test(f, unit):\n return ([None], None)\n\n\ndef helper_invariant(f, unit):\n return ([None], _d(unit))\n\n\ndef helper_square(f, unit):\n return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)\n\n\ndef helper_reciprocal(f, unit):\n return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)\n\n\none_half = 0.5 # faster than Fraction(1, 2)\none_third = Fraction(1, 3)\n\n\ndef helper_sqrt(f, unit):\n return ([None], unit ** one_half if unit is not None\n else dimensionless_unscaled)\n\n\ndef helper_cbrt(f, unit):\n return ([None], (unit ** one_third if unit is not None\n else dimensionless_unscaled))\n\n\ndef helper_modf(f, unit):\n if unit is None:\n return [None], (dimensionless_unscaled, dimensionless_unscaled)\n\n try:\n return ([get_converter(unit, dimensionless_unscaled)],\n (dimensionless_unscaled, dimensionless_unscaled))\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"dimensionless quantities\"\n .format(f.__name__))\n\n\ndef helper__ones_like(f, unit):\n return [None], dimensionless_unscaled\n\n\ndef helper_dimensionless_to_dimensionless(f, unit):\n if unit is None:\n return [None], dimensionless_unscaled\n\n try:\n return ([get_converter(unit, dimensionless_unscaled)],\n dimensionless_unscaled)\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"dimensionless quantities\"\n .format(f.__name__))\n\n\ndef helper_dimensionless_to_radian(f, unit):\n from ..si import radian\n if unit is None:\n return [None], radian\n\n try:\n return [get_converter(unit, dimensionless_unscaled)], radian\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"dimensionless quantities\"\n .format(f.__name__))\n\n\ndef helper_degree_to_radian(f, unit):\n from ..si import degree, radian\n try:\n return [get_converter(unit, degree)], radian\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"quantities with angle units\"\n .format(f.__name__))\n\n\ndef helper_radian_to_degree(f, unit):\n from ..si import degree, radian\n try:\n return [get_converter(unit, radian)], degree\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"quantities with angle units\"\n .format(f.__name__))\n\n\ndef helper_radian_to_dimensionless(f, unit):\n from ..si import radian\n try:\n return [get_converter(unit, radian)], dimensionless_unscaled\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"quantities with angle units\"\n .format(f.__name__))\n\n\ndef helper_frexp(f, unit):\n if not unit.is_unity():\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"unscaled dimensionless quantities\"\n .format(f.__name__))\n return [None], (None, None)\n\n\n# TWO ARGUMENT UFUNC HELPERS\n#\n# The functions below take a two arguments. The output of the helper function\n# should be two values: a tuple of two converters to be used to scale the\n# inputs before being passed to the ufunc (None if no conversion is needed),\n# and the unit the output will be in.\n\ndef helper_multiplication(f, unit1, unit2):\n return [None, None], _d(unit1) * _d(unit2)\n\n\ndef helper_division(f, unit1, unit2):\n return [None, None], _d(unit1) / _d(unit2)\n\n\ndef helper_power(f, unit1, unit2):\n # TODO: find a better way to do this, currently need to signal that one\n # still needs to raise power of unit1 in main code\n if unit2 is None:\n return [None, None], False\n\n try:\n return [None, get_converter(unit2, dimensionless_unscaled)], False\n except UnitsError:\n raise UnitTypeError(\"Can only raise something to a \"\n \"dimensionless quantity\")\n\n\ndef helper_ldexp(f, unit1, unit2):\n if unit2 is not None:\n raise TypeError(\"Cannot use ldexp with a quantity \"\n \"as second argument.\")\n else:\n return [None, None], _d(unit1)\n\n\ndef helper_copysign(f, unit1, unit2):\n # if first arg is not a quantity, just return plain array\n if unit1 is None:\n return [None, None], None\n else:\n return [None, None], unit1\n\n\ndef helper_heaviside(f, unit1, unit2):\n try:\n converter2 = (get_converter(unit2, dimensionless_unscaled)\n if unit2 is not None else None)\n except UnitsError:\n raise UnitTypeError(\"Can only apply 'heaviside' function with a \"\n \"dimensionless second argument.\")\n return ([None, converter2], dimensionless_unscaled)\n\n\ndef helper_two_arg_dimensionless(f, unit1, unit2):\n try:\n converter1 = (get_converter(unit1, dimensionless_unscaled)\n if unit1 is not None else None)\n converter2 = (get_converter(unit2, dimensionless_unscaled)\n if unit2 is not None else None)\n except UnitsError:\n raise UnitTypeError(\"Can only apply '{0}' function to \"\n \"dimensionless quantities\"\n .format(f.__name__))\n return ([converter1, converter2], dimensionless_unscaled)\n\n\n# This used to be a separate function that just called get_converters_and_unit.\n# Using it directly saves a few us; keeping the clearer name.\nhelper_twoarg_invariant = get_converters_and_unit\n\n\ndef helper_twoarg_comparison(f, unit1, unit2):\n converters, _ = get_converters_and_unit(f, unit1, unit2)\n return converters, None\n\n\ndef helper_twoarg_invtrig(f, unit1, unit2):\n from ..si import radian\n converters, _ = get_converters_and_unit(f, unit1, unit2)\n return converters, radian\n\n\ndef helper_twoarg_floor_divide(f, unit1, unit2):\n converters, _ = get_converters_and_unit(f, unit1, unit2)\n return converters, dimensionless_unscaled\n\n\ndef helper_divmod(f, unit1, unit2):\n converters, result_unit = get_converters_and_unit(f, unit1, unit2)\n return converters, (dimensionless_unscaled, result_unit)\n\n\n# list of ufuncs:\n# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs\n\nUNSUPPORTED_UFUNCS |= {\n np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,\n np.right_shift, np.logical_and, np.logical_or, np.logical_xor,\n np.logical_not}\nfor name in 'isnat', 'gcd', 'lcm':\n # isnat was introduced in numpy 1.14, gcd+lcm in 1.15\n ufunc = getattr(np, name, None)\n if isinstance(ufunc, np.ufunc):\n UNSUPPORTED_UFUNCS |= {ufunc}\n\n# SINGLE ARGUMENT UFUNCS\n\n# ufuncs that return a boolean and do not care about the unit\nonearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)\nfor ufunc in onearg_test_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_onearg_test\n\n# ufuncs that return a value with the same unit as the input\ninvariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,\n np.spacing, np.rint, np.floor, np.ceil, np.trunc,\n np.positive)\nfor ufunc in invariant_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_invariant\n\n# ufuncs that require dimensionless input and and give dimensionless output\ndimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,\n np.log10, np.log2, np.log1p)\n# As found out in gh-7058, some numpy 1.13 conda installations also provide\n# np.erf, even though upstream doesn't have it. We include it if present.\nif isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):\n dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)\nfor ufunc in dimensionless_to_dimensionless_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless\n\n# ufuncs that require dimensionless input and give output in radians\ndimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,\n np.arcsinh, np.arctanh)\nfor ufunc in dimensionless_to_radian_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian\n\n# ufuncs that require input in degrees and give output in radians\ndegree_to_radian_ufuncs = (np.radians, np.deg2rad)\nfor ufunc in degree_to_radian_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_degree_to_radian\n\n# ufuncs that require input in radians and give output in degrees\nradian_to_degree_ufuncs = (np.degrees, np.rad2deg)\nfor ufunc in radian_to_degree_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_radian_to_degree\n\n# ufuncs that require input in radians and give dimensionless output\nradian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,\n np.tanh)\nfor ufunc in radian_to_dimensionless_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless\n\n# ufuncs handled as special cases\nUFUNC_HELPERS[np.sqrt] = helper_sqrt\nUFUNC_HELPERS[np.square] = helper_square\nUFUNC_HELPERS[np.reciprocal] = helper_reciprocal\nUFUNC_HELPERS[np.cbrt] = helper_cbrt\nUFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like\nUFUNC_HELPERS[np.modf] = helper_modf\nUFUNC_HELPERS[np.frexp] = helper_frexp\n\n\n# TWO ARGUMENT UFUNCS\n\n# two argument ufuncs that require dimensionless input and and give\n# dimensionless output\ntwo_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)\nfor ufunc in two_arg_dimensionless_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless\n\n# two argument ufuncs that return a value with the same unit as the input\ntwoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,\n np.minimum, np.fmin, np.fmax, np.nextafter,\n np.remainder, np.mod, np.fmod)\nfor ufunc in twoarg_invariant_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_twoarg_invariant\n\n# two argument ufuncs that need compatible inputs and return a boolean\ntwoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,\n np.less_equal, np.not_equal, np.equal)\nfor ufunc in twoarg_comparison_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_twoarg_comparison\n\n# two argument ufuncs that do inverse trigonometry\ntwoarg_invtrig_ufuncs = (np.arctan2,)\n# another private function in numpy; use getattr in case it disappears\nif isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):\n twoarg_invtrig_ufuncs += (np.core.umath._arg,)\nfor ufunc in twoarg_invtrig_ufuncs:\n UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig\n\n# ufuncs handled as special cases\nUFUNC_HELPERS[np.multiply] = helper_multiplication\nUFUNC_HELPERS[np.divide] = helper_division\nUFUNC_HELPERS[np.true_divide] = helper_division\nUFUNC_HELPERS[np.power] = helper_power\nUFUNC_HELPERS[np.ldexp] = helper_ldexp\nUFUNC_HELPERS[np.copysign] = helper_copysign\nUFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide\nUFUNC_HELPERS[np.heaviside] = helper_heaviside\nUFUNC_HELPERS[np.float_power] = helper_power\nUFUNC_HELPERS[np.divmod] = helper_divmod\n"},"path":{"kind":"string","value":"astropy/units/quantity_helper/helpers.py"},"size":{"kind":"number","value":14433,"string":"14,433"},"nl_text":{"kind":"string","value":"Like Unit._get_converter, except returns None if no scaling is needed,\ni.e., if the inferred scale is unity.\nHelper functions for Quantity.\n\nIn particular, this implements the logic that determines scaling and result\nunits for a given ufunc, given input units.\n\n -*- coding: utf-8 -*- Licensed under a 3-clause BSD style license - see LICENSE.rst The idea for this module (but no code) was borrowed from the quantities (http://pythonhosted.org/quantities/) package. By default, we try adjusting unit2 to unit1, so that the result will be unit1 as well. But if there is no second unit, we have to try adjusting unit1 (to dimensionless, see below). No units for any input -- e.g., np.add(a1, a2, out=q) swap units. ensure identical units is fast (\"==\" is slow, so avoid that). Try to get a converter from unit2 to unit1. special case: would be OK if unitless number is zero, inf, nan SINGLE ARGUMENT UFUNC HELPERS The functions below take a single argument, which is the quantity upon which the ufunc is being used. The output of the helper function should be two values: a list with a single converter to be used to scale the input before it is being passed to the ufunc (or None if no conversion is needed), and the unit the output will be in. faster than Fraction(1, 2) TWO ARGUMENT UFUNC HELPERS The functions below take a two arguments. The output of the helper function should be two values: a tuple of two converters to be used to scale the inputs before being passed to the ufunc (None if no conversion is needed), and the unit the output will be in. TODO: find a better way to do this, currently need to signal that one still needs to raise power of unit1 in main code if first arg is not a quantity, just return plain array This used to be a separate function that just called get_converters_and_unit. Using it directly saves a few us; keeping the clearer name. list of ufuncs: http://docs.scipy.org/doc/numpy/reference/ufuncs.htmlavailable-ufuncs isnat was introduced in numpy 1.14, gcd+lcm in 1.15 SINGLE ARGUMENT UFUNCS ufuncs that return a boolean and do not care about the unit ufuncs that return a value with the same unit as the input ufuncs that require dimensionless input and and give dimensionless output As found out in gh-7058, some numpy 1.13 conda installations also provide np.erf, even though upstream doesn't have it. We include it if present. ufuncs that require dimensionless input and give output in radians ufuncs that require input in degrees and give output in radians ufuncs that require input in radians and give output in degrees ufuncs that require input in radians and give dimensionless output ufuncs handled as special cases TWO ARGUMENT UFUNCS two argument ufuncs that require dimensionless input and and give dimensionless output two argument ufuncs that return a value with the same unit as the input two argument ufuncs that need compatible inputs and return a boolean two argument ufuncs that do inverse trigonometry another private function in numpy; use getattr in case it disappears ufuncs handled as special cases"},"nl_size":{"kind":"number","value":3062,"string":"3,062"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7916404008865356,"string":"0.79164"}}},{"rowIdx":583,"cells":{"content":{"kind":"string","value":"# Uses python3\nimport sys\n\ndef get_change(money, coins):\n t = [j+1 for j in range(money+1)]\n \n # boundary condition\n t[0] = 0\n for j in range(1, money+1):\n for c in coins:\n if c <= j:\n t[j] = min(t[j], 1+t[j-c])\n\n return t[money]\n\nif __name__ == '__main__':\n coins = [1, 3, 4]\n money = int(input())\n\n print(get_change(money, coins))\n"},"path":{"kind":"string","value":"1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py"},"size":{"kind":"number","value":393,"string":"393"},"nl_text":{"kind":"string","value":"Uses python3 boundary condition"},"nl_size":{"kind":"number","value":31,"string":"31"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6852896809577942,"string":"0.68529"}}},{"rowIdx":584,"cells":{"content":{"kind":"string","value":"from sepal_ui import sepalwidgets as sw\nfrom ipywidgets import dlink\n\nfrom component import parameter as cp\n\n\nclass ParamTile(sw.Card):\n def __init__(self, model):\n\n # read the model\n self.model = model\n\n # add the base widgets\n self.close = sw.Icon(children=[\"mdi-close\"], small=True)\n self.title = sw.CardTitle(\n class_=\"pa-0 ma-0\", children=[sw.Spacer(), self.close]\n )\n\n # create the widgets\n self.w_target = sw.Select(\n small=True,\n items=[{\"text\": f\"{i+1}0%\", \"value\": i + 1} for i in range(cp.nb_target)],\n v_model=model.target,\n label=\"target\",\n dense=True,\n )\n self.w_weight = sw.Select(\n small=True,\n items=[i + 1 for i in range(cp.nb_weight)],\n v_model=model.weight,\n label=\"weight\",\n dense=True,\n )\n\n # link the widgets to the model\n self.model.bind(self.w_target, \"target\").bind(self.w_weight, \"weight\")\n\n # create the object\n super().__init__(\n max_width=\"500px\",\n class_=\"pa-1\",\n children=[self.title, self.w_target, self.w_weight],\n viz=False,\n disabled=False,\n )\n\n # add javascript events\n self.close.on_event(\"click\", lambda *args: self.hide())\n dlink((self, \"disabled\"), (self, \"loading\"))\n\n def reset(self):\n\n self.w_target.v_model = None\n self.w_weight.v_model = None\n\n self.hide()\n\n return\n"},"path":{"kind":"string","value":"component/tile/param_tile.py"},"size":{"kind":"number","value":1555,"string":"1,555"},"nl_text":{"kind":"string","value":"read the model add the base widgets create the widgets link the widgets to the model create the object add javascript events"},"nl_size":{"kind":"number","value":124,"string":"124"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.5820462107658386,"string":"0.582046"}}},{"rowIdx":585,"cells":{"content":{"kind":"string","value":"#! /usr/bin/env python\n\"\"\"Functions for working with the DLRN API\"\"\"\n\nimport csv\nimport os.path\nimport requests\n\nfrom toolchest import yaml\n\nfrom atkinson.config.manager import ConfigManager\nfrom atkinson.logging.logger import getLogger\n\n\ndef _raw_fetch(url, logger):\n \"\"\"\n Fetch remote data and return the text output.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Raw text data, None otherwise\n \"\"\"\n ret_data = None\n try:\n req = requests.get(url)\n if req.status_code == requests.codes.ok:\n ret_data = req.text\n except requests.exceptions.ConnectionError as error:\n logger.warning(error.request)\n\n return ret_data\n\n\ndef _fetch_yaml(url, logger):\n \"\"\"\n Fetch remote data and process the text as yaml.\n\n :param url: The URL to fetch the data from\n :param logger: A logger instance to use.\n :return: Parsed yaml data in the form of a dictionary\n \"\"\"\n ret_data = None\n raw_data = _raw_fetch(url, logger)\n if raw_data is not None:\n ret_data = yaml.parse(raw_data)\n\n return ret_data\n\n\ndef dlrn_http_factory(host, config_file=None, link_name=None,\n logger=getLogger()):\n \"\"\"\n Create a DlrnData instance based on a host.\n\n :param host: A host name string to build instances\n :param config_file: A dlrn config file(s) to use in addition to\n the default.\n :param link_name: A dlrn symlink to use. This overrides the config files\n link parameter.\n :param logger: An atkinson logger to use. Default is the base logger.\n :return: A DlrnData instance\n \"\"\"\n manager = None\n files = ['dlrn.yml']\n if config_file is not None:\n if isinstance(config_file, list):\n files.extend(config_file)\n else:\n files.append(config_file)\n\n local_path = os.path.realpath(os.path.dirname(__file__))\n manager = ConfigManager(filenames=files, paths=local_path)\n\n if manager is None:\n return None\n\n config = manager.config\n if host not in config:\n return None\n\n link = config[host]['link']\n if link_name is not None:\n link = link_name\n\n return DlrnHttpData(config[host]['url'],\n config[host]['release'],\n link_name=link,\n logger=logger)\n\n\nclass DlrnHttpData():\n \"\"\"A class used to interact with the dlrn API\"\"\"\n def __init__(self, url, release, link_name='current', logger=getLogger()):\n \"\"\"\n Class constructor\n\n :param url: The URL to the host to obtain data.\n :param releases: The release name to use for lookup.\n :param link_name: The name of the dlrn symlink to fetch data from.\n :param logger: An atkinson logger to use. Default is the base logger.\n \"\"\"\n self.url = os.path.join(url, release)\n self.release = release\n self._logger = logger\n self._link_name = link_name\n self._commit_data = {}\n self._fetch_commit()\n\n def _fetch_commit(self):\n \"\"\"\n Fetch the commit data from dlrn\n \"\"\"\n full_url = os.path.join(self.url,\n self._link_name,\n 'commit.yaml')\n data = _fetch_yaml(full_url, self._logger)\n if data is not None and 'commits' in data:\n pkg = data['commits'][0]\n if pkg['status'] == 'SUCCESS':\n self._commit_data = {'name': pkg['project_name'],\n 'dist_hash': pkg['distro_hash'],\n 'commit_hash': pkg['commit_hash'],\n 'extended_hash': pkg.get('extended_hash')}\n else:\n msg = '{0} has a status of error'.format(str(pkg))\n self._logger.warning(msg)\n\n def _build_url(self):\n \"\"\"\n Generate a url given a commit hash and distgit hash to match the format\n base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987\n is a portion of the distgit hash.\n\n :return: A string with the full URL.\n \"\"\"\n first = self._commit_data['commit_hash'][0:2]\n second = self._commit_data['commit_hash'][2:4]\n third = self._commit_data['commit_hash']\n for key in ['dist_hash', 'extended_hash']:\n if self._commit_data.get(key, 'None') != 'None':\n third += '_' + self._commit_data[key][0:8]\n return os.path.join(self.url,\n first,\n second,\n third)\n\n @property\n def commit(self):\n \"\"\"\n Get the dlrn commit information\n\n :return: A dictionary of name, dist-git hash, commit hash and\n extended hash.\n An empty dictionary is returned otherwise.\n \"\"\"\n return self._commit_data\n\n @property\n def versions(self):\n \"\"\"\n Get the version data for the versions.csv file and return the\n data in a dictionary\n\n :return: A dictionary of packages with commit and dist-git hashes\n \"\"\"\n ret_dict = {}\n full_url = os.path.join(self._build_url(), 'versions.csv')\n data = _raw_fetch(full_url, self._logger)\n if data is not None:\n data = data.replace(' ', '_')\n split_data = data.split()\n reader = csv.DictReader(split_data)\n for row in reader:\n ret_dict[row['Project']] = {'source': row['Source_Sha'],\n 'state': row['Status'],\n 'distgit': row['Dist_Sha'],\n 'nvr': row['Pkg_NVR']}\n else:\n msg = 'Could not fetch {0}'.format(full_url)\n self._logger.error(msg)\n\n return ret_dict\n"},"path":{"kind":"string","value":"atkinson/dlrn/http_data.py"},"size":{"kind":"number","value":5950,"string":"5,950"},"nl_text":{"kind":"string","value":"A class used to interact with the dlrn API\nClass constructor\n\n:param url: The URL to the host to obtain data.\n:param releases: The release name to use for lookup.\n:param link_name: The name of the dlrn symlink to fetch data from.\n:param logger: An atkinson logger to use. Default is the base logger.\nGenerate a url given a commit hash and distgit hash to match the format\nbase/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987\nis a portion of the distgit hash.\n\n:return: A string with the full URL.\nFetch the commit data from dlrn\nFetch remote data and process the text as yaml.\n\n:param url: The URL to fetch the data from\n:param logger: A logger instance to use.\n:return: Parsed yaml data in the form of a dictionary\nFetch remote data and return the text output.\n\n:param url: The URL to fetch the data from\n:param logger: A logger instance to use.\n:return: Raw text data, None otherwise\nGet the dlrn commit information\n\n:return: A dictionary of name, dist-git hash, commit hash and\n extended hash.\n An empty dictionary is returned otherwise.\nCreate a DlrnData instance based on a host.\n\n:param host: A host name string to build instances\n:param config_file: A dlrn config file(s) to use in addition to\n the default.\n:param link_name: A dlrn symlink to use. This overrides the config files\n link parameter.\n:param logger: An atkinson logger to use. Default is the base logger.\n:return: A DlrnData instance\nGet the version data for the versions.csv file and return the\ndata in a dictionary\n\n:return: A dictionary of packages with commit and dist-git hashes\nFunctions for working with the DLRN API\n\n! /usr/bin/env python"},"nl_size":{"kind":"number","value":1685,"string":"1,685"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7002001404762268,"string":"0.7002"}}},{"rowIdx":586,"cells":{"content":{"kind":"string","value":"\"\"\"\r\nModule for the selection of machine learning models.\r\n\r\nThere are several different functions which can perform the model selection: all of them have an intuitive interface, but\r\nare also powerful and flexible.\r\nIn addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way.\r\n\r\nThese different functions perform the model selection in different contexts, i.e. each function is specifically meant for a\r\nspecific scenario. Certain contexts are more specific, and other are more general.\r\nOn the whole, there are six different model selection functions, divided into two main groups:\r\n 1. functions that perform the model selection with respect to a **single dataset**;\r\n 2. functions that perform the model selection with respect to **multiple datasets**.\r\n\r\nThe six functions, sorted from the most specific context to the most general one, are:\r\n - *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset);\r\n - *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple\r\n datasets).\r\n\r\nThis module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array.\r\nMoreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module:\r\n- the machine learning models are represented as sklearn models (i.e. sklearn estimators);\r\n- under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e.\r\nGridSearchCV);\r\n- several other operations are done using the functionalities provided by sklearn.\r\n\r\nThis module, besides the model selection functions, contains also some utilities:\r\n- the PolynomialRegression class;\r\n- some utility functions.\r\n\r\n\"\"\"\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn.utils import resample\r\nfrom sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit, GridSearchCV\r\nfrom sklearn.metrics import mean_squared_error, accuracy_score\r\nfrom sklearn.preprocessing import MinMaxScaler, PolynomialFeatures\r\nfrom sklearn.base import BaseEstimator\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------\r\n# POLYNOMIAL REGRESSOR MODEL\r\n\r\nclass PolynomialRegression(BaseEstimator):\r\n \"\"\"\r\n Polynomial regression model.\r\n\r\n It's a sklearn model: it's compliant to the sklearn estimators interface.\r\n `Example `_\r\n\r\n Parameters\r\n ----------\r\n degree: int\r\n Degree to apply for the polynomial transformation.\r\n\r\n Notes\r\n ----------\r\n The polynomial transformation is performed using the sklearn PolynomialFeatures.\r\n \"\"\"\r\n\r\n def __init__(self, degree=1):\r\n self.degree=degree\r\n\r\n def fit(self, X, y):\r\n self.poly_transformer = PolynomialFeatures(self.degree, include_bias=False)\r\n self.poly_transformer.fit(X)\r\n X = self.poly_transformer.transform(X)\r\n self.model = LinearRegression(fit_intercept=True)\r\n self.model.fit(X,y)\r\n return self\r\n\r\n def predict(self, X):\r\n X = self.poly_transformer.transform(X)\r\n return self.model.predict(X)\r\n\r\n def get_params(self, deep=True):\r\n return {\"degree\": self.degree}\r\n\r\n def set_params(self, **parameters):\r\n for parameter, value in parameters.items():\r\n setattr(self, parameter, value)\r\n return self\r\n\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------\r\n# UTILITY FUNCTIONS\r\n\r\n\r\ndef compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5,\r\n regr=True):\r\n \"\"\"\r\n Compute the training-validation-test scores for the given model on the given dataset.\r\n\r\n The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation\r\n score is performed applying the cross validation on the training set.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model to evaluate.\r\n scale: bool\r\n Indicates whether to scale or not the features in `X`.\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set.\r\n time_series: bool\r\n Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).\r\n (This affects the computing of the scores).\r\n random_state: int\r\n Used in the training-test splitting of the dataset.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n\r\n Returns\r\n ----------\r\n train_score: float\r\n val_score: float\r\n test_score: float\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).\r\n Otherwise, the returned scores are accuracy measures.\r\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n if regr:\r\n scoring=\"neg_mean_squared_error\"\r\n else:\r\n scoring=\"accuracy\"\r\n\r\n # Split into training e test.\r\n if not time_series : # Random splitting (not time series)\r\n X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\r\n else: # time series splitting\r\n train_len = int(X.shape[0]*(1-test_size))\r\n X_train_80 = X[:train_len]\r\n y_train_80 = y[:train_len]\r\n X_test = X[train_len:]\r\n y_test = y[train_len:]\r\n\r\n if(scale): # Scale the features in X\r\n scaler = MinMaxScaler()\r\n scaler.fit(X_train_80)\r\n X_train_80 = scaler.transform(X_train_80)\r\n X_test = scaler.transform(X_test)\r\n\r\n # Cross validation\r\n if not time_series: # k-fold cross validation\r\n cv = n_folds\r\n else: # cross validation for time series\r\n cv = TimeSeriesSplit(n_splits = n_folds)\r\n scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)\r\n val_score = scores.mean() # validation score\r\n if regr:\r\n val_score = -val_score\r\n\r\n model.fit(X_train_80,y_train_80) # Fit the model using all the training\r\n\r\n # Compute training and test scores\r\n train_score=0\r\n test_score=0\r\n if regr:\r\n train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))\r\n test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))\r\n else:\r\n train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))\r\n test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))\r\n\r\n return train_score, val_score, test_score # Return a triple\r\n\r\n\r\ndef compute_bias_variance_error(X, y, model, scale=False, N_TESTS = 20, sample_size=0.67):\r\n \"\"\"\r\n Compute the bias^2-variance-error scores for the given model on the given dataset.\r\n\r\n These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the\r\n dataset.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model to evaluate.\r\n scale: bool\r\n Indicates whether to scale or not the features in `X`.\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n N_TESTS: int\r\n Number of samples that are made in order to compute the measures.\r\n sample_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the sample.\r\n\r\n Returns\r\n ----------\r\n bias: float\r\n variance: float\r\n error: float\r\n \"\"\"\r\n\r\n # Scale the features in `X`\r\n if(scale):\r\n scaler = MinMaxScaler()\r\n scaler.fit(X)\r\n X = scaler.transform(X)\r\n\r\n # Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list).\r\n # In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many\r\n # columns as the number of instances in `X` (each column is a point of the dataset).\r\n # Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points.\r\n # Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples.\r\n vector_ypred = []\r\n\r\n # Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it.\r\n for i in range(N_TESTS):\r\n # Extract a new sample (sample 'i')\r\n Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) )\r\n\r\n # Fit the model on this sample 'i'\r\n model.fit(Xs,ys)\r\n\r\n # Add the predictions made by the model on all the dataset points\r\n vector_ypred.append(list(model.predict(X)))\r\n\r\n vector_ypred = np.array(vector_ypred) # Transform into numpy array\r\n\r\n # Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on\r\n # the `N_TEST` samples.\r\n vector_bias = (y - np.mean(vector_ypred, axis=0))**2\r\n\r\n # Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on\r\n # the `N_TEST` samples.\r\n vector_variance = np.var(vector_ypred, axis=0)\r\n\r\n # Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on\r\n # the `N_TEST` samples.\r\n vector_error = np.sum((vector_ypred - y)**2, axis=0)/N_TESTS\r\n\r\n bias = np.mean(vector_bias) # Total bias^2 of the model\r\n variance = np.mean(vector_variance) # Total variance of the model\r\n error = np.mean(vector_error) # Total error of the model\r\n\r\n return bias,variance,error # Return a triple\r\n\r\n\r\ndef plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel=\"Index\",\r\n title=\"Actual vs Predicted values\", figsize=(6,6)):\r\n \"\"\"\r\n Plot the predictions made by the given model on the given dataset, versus its actual values.\r\n\r\n The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are\r\n made.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model used to make the predictions.\r\n scale: bool\r\n Indicates whether to scale or not the features in `X`.\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set.\r\n plot_type: int\r\n Indicates the type of the plot.\r\n - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis\r\n the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed\r\n predicted values.\r\n - 1 -> On the x axis the actual values are put, on the y axis the predicted ones.\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the plot.\r\n (It's used only if `plot_type` is 0).\r\n xlabel: str\r\n Label of the x axis of the plot.\r\n (It's used only if `plot_type` is 0).\r\n title: str\r\n Title of the plot.\r\n figsize: tuple\r\n Two dimensions of the plot.\r\n\r\n Returns\r\n ----------\r\n matplotlib.axes.Axes\r\n The matplotlib Axes where the plot has been made.\r\n\r\n Notes\r\n ----------\r\n The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous\r\n sequences.\r\n I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify\r\n the visualization).\r\n For this reason, typically this function is applied on time series datasets.\r\n \"\"\"\r\n\r\n train_len = int(X.shape[0]*(1-test_size))\r\n X_train_80 = X[:train_len]\r\n y_train_80 = y[:train_len]\r\n X_test = X[train_len:]\r\n y_test = y[train_len:]\r\n\r\n if(scale): # Scale the features in X\r\n scaler = MinMaxScaler()\r\n scaler.fit(X_train_80)\r\n X_train_80 = scaler.transform(X_train_80)\r\n X_test = scaler.transform(X_test)\r\n\r\n model.fit(X_train_80,y_train_80) # Fit using all the training set\r\n\r\n predictions = model.predict(X_test)\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n\r\n if plot_type==0:\r\n if xvalues is None:\r\n xvalues=range(len(X))\r\n ax.plot(xvalues,y, 'o:', label='actual values')\r\n ax.plot(xvalues[train_len:],predictions, 'o:', label='predicted values')\r\n ax.legend()\r\n elif plot_type==1:\r\n ax.plot(y[train_len:],predictions,'o')\r\n ax.plot([0, 1], [0, 1], 'r-',transform=ax.transAxes)\r\n xlabel=\"Actual values\"\r\n ax.set_ylabel(\"Predicted values\")\r\n\r\n ax.set_xlabel(xlabel)\r\n ax.set_title(title)\r\n ax.grid()\r\n\r\n return ax\r\n\r\n\r\ndef _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6,6), bar=False):\r\n \"\"\"\r\n Plot the given list of training-validation scores.\r\n\r\n This function is an auxiliary function for the model selection functions. It's meant to be private in the\r\n module.\r\n\r\n Parameters\r\n ----------\r\n xvalues: list (in general iterable)\r\n Values to put in the x axis of the plot.\r\n train_val_scores: np.array\r\n Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation\r\n scores.\r\n Basically, it is a list of training-validation scores.\r\n plot_train: bool\r\n Indicates whether to plot also the training scores or to plot only the validation ones.\r\n xlabel: str\r\n Label of the x axis.\r\n title: str\r\n Title of the plot.\r\n figsize: tuple\r\n Two dimensions of the plot.\r\n bar: bool\r\n Indicates whether to plot the scores using bars or using points.\r\n If `bar` it's True, `xvalues` must contain string (i.e. labels).\r\n Returns\r\n ----------\r\n matplotlib.axes.Axes\r\n The matplotlib Axes where the plot has been made.\r\n \"\"\"\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n\r\n if not bar: # Points\r\n if plot_train: # Plot also the training scores\r\n ax.plot(xvalues,train_val_scores[:,0], 'o:', label='Train')\r\n ax.plot(xvalues,train_val_scores[:,1], 'o:', label='Validation') # Validation scores\r\n else: # Bars\r\n if plot_train: # Plot also the training scores\r\n x = np.arange(len(xvalues)) # The label locations\r\n width = 0.35 # The width of the bars\r\n ax.bar(x-width/2,train_val_scores[:,0], width=width, label='Train')\r\n ax.bar(x+width/2,train_val_scores[:,1], width=width, label='Validation') # Validation scores\r\n ax.set_xticks(x)\r\n ax.set_xticklabels(xvalues)\r\n else:\r\n ax.bar(xvalues,train_val_scores[:,1],label='Validation')\r\n\r\n\r\n ax.set_xlabel(xlabel)\r\n ax.set_title(title)\r\n ax.grid()\r\n ax.legend()\r\n\r\n return ax\r\n\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------\r\n# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET\r\n\r\n\r\ndef hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2,\r\n time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False,\r\n xvalues=None, xlabel=None, title=\"Hyperparameter validation\", figsize=(6,6)):\r\n \"\"\"\r\n Select the best value for the specified hyperparameter of the specified model on the given dataset.\r\n\r\n In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.\r\n\r\n This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation\r\n score).\r\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the\r\n selection.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model which has the specified `hyperparameter`.\r\n hyperparameter: str\r\n The name of the hyperparameter that has to be validated.\r\n hyperparameter_values: list\r\n List of values for `hyperparameter` that have to be taken into account in the selection.\r\n scale: bool\r\n Indicates whether to scale or not the features in `X`.\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set.\r\n time_series: bool\r\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\r\n (This affects the computing of the validation score).\r\n random_state: int\r\n Used in the training-test splitting of the dataset.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n plot: bool\r\n Indicates whether to plot or not the validation score values.\r\n plot_train: bool\r\n Indicates whether to plot also the training scores.\r\n (It's considered only if `plot` is True).\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the plot.\r\n xlabel: str\r\n Label of the x axis of the plot.\r\n title: str\r\n Title of the plot.\r\n figsize: tuple\r\n Two dimensions of the plot.\r\n\r\n Returns\r\n ----------\r\n train_val_scores: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).\r\n best_index: int\r\n Index of `hyperparameter_values` that indicates which is the best hyperparameter value.\r\n test_score: float\r\n Test score associated with the best hyperparameter value.\r\n ax: matplotlib.axes.Axes\r\n The matplotlib Axes where the plot has been made.\r\n If `plot` is False, then it is None.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n hyperparameter value is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated\r\n with the maximum validation score.\r\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n param_grid = {hyperparameter:hyperparameter_values} # Create the hyperparameter grid\r\n # Call the function for the validation of an arbitrary number of hyperparameters\r\n params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,\r\n test_size=test_size,\r\n time_series=time_series,\r\n random_state=random_state, n_folds=n_folds,\r\n regr=regr)\r\n\r\n ax = None\r\n\r\n if(plot): # Make the plot\r\n if not xvalues: # Default values on the x axis\r\n xvalues = hyperparameter_values\r\n if not xlabel: # Default label on the x axis\r\n xlabel = hyperparameter\r\n ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize)\r\n\r\n return train_val_scores, best_index, test_score, ax\r\n\r\n\r\ndef hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123,\r\n n_folds=5, regr=True):\r\n \"\"\"\r\n Select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\r\n\r\n In other words, perform the tuning of multiple hyperparameters.\r\n The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the\r\n associated values to test.\r\n\r\n All the possible combinations of values are tested, in an exhaustive way (i.e. grid search).\r\n\r\n This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with\r\n the best validation score).\r\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model which has the specified hyperparameters.\r\n param_grid: dict\r\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\r\n values to test.\r\n scale: bool\r\n Indicates whether to scale or not the features in `X`.\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set.\r\n time_series: bool\r\n Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).\r\n (This affects the computing of the validation score).\r\n random_state: int\r\n Used in the training-test splitting of the dataset.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n\r\n Returns\r\n ----------\r\n params: list\r\n List which enumerates all the possible combinations of hyperparameters values.\r\n It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a\r\n dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).\r\n train_val_scores: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of possible combinations of the hyperparameters values.\r\n (It has as many rows as the elements of `params`).\r\n best_index: int\r\n Index of `params` that indicates which is the best combination of hyperparameters values.\r\n test_score: float\r\n Test score associated with the best combination of hyperparameters values.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n combination of hyperparameters values is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the\r\n one associated with the maximum validation score.\r\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n if regr:\r\n scoring=\"neg_mean_squared_error\"\r\n else:\r\n scoring=\"accuracy\"\r\n\r\n # Split into training-test sets\r\n if not time_series : # Random splitting\r\n X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\r\n else: # Time series splitting\r\n train_len = int(X.shape[0]*(1-test_size))\r\n X_train_80 = X[:train_len]\r\n y_train_80 = y[:train_len]\r\n X_test = X[train_len:]\r\n y_test = y[train_len:]\r\n\r\n if(scale): # Scale the features in `X`\r\n scaler = MinMaxScaler()\r\n scaler.fit(X_train_80)\r\n X_train_80 = scaler.transform(X_train_80)\r\n X_test = scaler.transform(X_test)\r\n\r\n # Cross validation strategy\r\n if not time_series: # The strategy is the classic k-fold cross validation\r\n cv = n_folds\r\n else: # Time series cross validation strategy\r\n cv = TimeSeriesSplit(n_splits = n_folds)\r\n\r\n # Grid search\r\n grid_search = GridSearchCV(model,param_grid,scoring=scoring,cv=cv,return_train_score=True)\r\n grid_search.fit(X_train_80,y_train_80)\r\n\r\n params = grid_search.cv_results_[\"params\"] # List of all the possible combinations of hyperparameters values\r\n # List where for all the possible combinations of hyperparameters values there is the associated training score\r\n train_scores = grid_search.cv_results_[\"mean_train_score\"]\r\n # List where for all the possible combinations of hyperparameters values there is the associated validation score\r\n val_scores = grid_search.cv_results_[\"mean_test_score\"]\r\n # Index of `params`, corresponding to the best combination of hyperparameters values\r\n best_index = grid_search.best_index_\r\n # Model with the best combination of hyperparameters values\r\n best_model = grid_search.best_estimator_\r\n\r\n if regr: # The scores are negative: moltiply by -1\r\n train_scores = train_scores*(-1)\r\n val_scores = val_scores*(-1)\r\n train_val_scores = np.concatenate((train_scores.reshape(-1,1), val_scores.reshape(-1,1)), axis=1)\r\n\r\n # Fit the best model on all the training set\r\n best_model.fit(X_train_80,y_train_80)\r\n\r\n # Compute the test score of the best model\r\n test_score=0\r\n if regr:\r\n test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test))\r\n else:\r\n test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test))\r\n\r\n return params, train_val_scores, best_index, test_score\r\n\r\n\r\ndef models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123,\r\n n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel=\"Models\",\r\n title=\"Models validation\", figsize=(6,6)):\r\n \"\"\"\r\n Select the best model on the given dataset.\r\n\r\n The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of\r\n hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each\r\n specified hyperparameter of the model).\r\n (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See\r\n `hyperparameters_validation`).\r\n\r\n For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid\r\n search).\r\n Actually, the function `hyperparameters_validation` is used.\r\n (See `hyperparameters_validation`).\r\n\r\n The selection of the best model is made using the validation score (i.e. the best model is the one with the best\r\n validation score).\r\n The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the\r\n selection.\r\n\r\n Parameters\r\n ----------\r\n X: np.array\r\n Two-dimensional np.array, containing the explanatory features of the dataset.\r\n y: np.array\r\n Mono dimensional np.array, containing the response feature of the dataset.\r\n model_paramGrid_list: list\r\n List that specifies the models and the relative grids of hyperparameters to be tested.\r\n It's a list of triples (i.e. tuples), where each triple represents a model:\r\n - the first element is a string, which is a mnemonic name of that model;\r\n - the second element is the sklearn model;\r\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\r\n structure of the parameter `param_grid` of the function `hyperparameters_validation`.\r\n scale_list: list or bool\r\n List of booleans, which has as many elements as the models to test (i.e. as the elements of the\r\n `model_paramGrid_list` list).\r\n This list indicates, for each different model, if the features in `X` have to be scaled or not.\r\n `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be\r\n True: in this case the `X` features are scaled for all the models.\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set.\r\n time_series: bool\r\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\r\n (This affects the computing of the validation score).\r\n random_state: int\r\n Used in the training-test splitting of the dataset.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n plot: bool\r\n Indicates whether to plot or not the validation score values.\r\n plot_train: bool\r\n Indicates whether to plot also the training scores.\r\n (It's considered only if `plot` is True).\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the plot.\r\n xlabel: str\r\n Label of the x axis of the plot.\r\n title: str\r\n Title of the plot.\r\n figsize: tuple\r\n Two dimensions of the plot.\r\n\r\n Returns\r\n ----------\r\n models_train_val_score: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).\r\n models_best_params: list\r\n List which indicates, for each model, the best combination of the hyperparameters values for that model.\r\n It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it\r\n contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the\r\n associated model.\r\n best_index: int\r\n Index of `model_paramGrid_list` that indicates which is the best model.\r\n test_score: float\r\n Test score associated with the best model.\r\n ax: matplotlib.axes.Axes\r\n The matplotlib Axes where the plot has been made.\r\n If `plot` is False, then it is None.\r\n\r\n See also\r\n ----------\r\n hyperparameters_validation:\r\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n model is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the\r\n maximum validation score.\r\n - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n if not scale_list: # `scale_list` is either None or False\r\n scale_list = [False]*len(model_paramGrid_list)\r\n elif scale_list is True: # `scale_list` is True\r\n scale_list = [True]*len(model_paramGrid_list)\r\n\r\n # Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and\r\n # the other for the validation scores. At the beginning it is a list of tuples.\r\n models_train_val_score = []\r\n # List which has as many elements as the models: for each model there is the dictionary of the best combination of\r\n # hyperparameters values.\r\n models_best_params = []\r\n # List which has as many elements as the models: for each model there is the test score (associated with the best\r\n # combination of hyperparameters values).\r\n models_test_score = []\r\n\r\n for i,triple in enumerate(model_paramGrid_list): # Iterate through all the cuples model-param_grid\r\n model,param_grid = triple[1:]\r\n\r\n # Apply the grid search on model-param_grid\r\n params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid,\r\n scale=scale_list[i],\r\n test_size=test_size,\r\n time_series=time_series,\r\n random_state=random_state,\r\n n_folds=n_folds, regr=regr)\r\n\r\n models_train_val_score.append(tuple(train_val_scores[best_index])) # Add the row for that model\r\n models_best_params.append(params[best_index]) # Add the element for that model\r\n models_test_score.append(test_score) # Add the element for that model\r\n\r\n models_train_val_score = np.array(models_train_val_score) # Transform into numpy matrix (i.e. np.array)\r\n\r\n # Find the best index (i.e. the best model)\r\n if regr:\r\n best_index = np.argmin(models_train_val_score,axis=0)[1]\r\n else:\r\n best_index = np.argmax(models_train_val_score,axis=0)[1]\r\n\r\n # Test score of the best model\r\n test_score = models_test_score[best_index]\r\n\r\n ax = None\r\n if(plot): # Make the plot\r\n if not xvalues: # Default values for the x axis\r\n xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))]\r\n ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True)\r\n\r\n return models_train_val_score, models_best_params, best_index, test_score, ax\r\n\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------------------------------------------------------\r\n# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS\r\n\r\n\r\ndef datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False,\r\n test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False,\r\n plot_train=False, xvalues=None, xlabel=\"Datasets\", title=\"Datasets validation\",\r\n figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)):\r\n \"\"\"\r\n Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best\r\n couple dataset-hyperparameter value).\r\n\r\n For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified\r\n `hyperparameter` of `model`.\r\n In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function\r\n `hyperparameter_validation` is applied. (See `hyperparameter_validation`).\r\n In the end, the best couple dataset-hyperparameter value is selected.\r\n\r\n Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to\r\n the datasets. It's a validation focused on the datasets.\r\n In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected\r\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\r\n dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.\r\n It's a two-levels selection.\r\n\r\n This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the\r\n best validation score).\r\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\r\n selection. This is the 'main' plot.\r\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the\r\n `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.\r\n (As the plot made by the `hyperparameter_validation` function).\r\n\r\n Parameters\r\n ----------\r\n dataset_list: list\r\n List of couples, where each couple is a dataset.\r\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\r\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model which has the specified `hyperparameter`.\r\n hyperparameter: str\r\n The name of the hyperparameter that has to be validated.\r\n hyperparameter_values: list\r\n List of values for `hyperparameter` that have to be taken into account in the selection.\r\n scale: bool\r\n Indicates whether to scale or not the features in 'X' (for all the datasets).\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\r\n time_series: bool\r\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\r\n (This affects the computing of the validation scores).\r\n random_state: int\r\n Used in the training-test splitting of the datasets.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n plot: bool\r\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\r\n plot_train: bool\r\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the 'main' plot.\r\n xlabel: str\r\n Label of the x axis of the 'main' plot.\r\n title: str\r\n Title of the 'main' plot.\r\n figsize: tuple\r\n Two dimensions of the 'main' plot.\r\n verbose: bool\r\n If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'\r\n plots).\r\n (See 'hyperparameter_validation').\r\n figsize_verbose: tuple\r\n Two dimensions of the 'secondary' plots.\r\n\r\n Returns\r\n ----------\r\n datasets_train_val_score: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\r\n datasets_best_hyperparameter_value: list\r\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\r\n each dataset, it contains the best `hyperparameter` value on that dataset.\r\n best_index: int\r\n Index of `dataset_list` that indicates which is the best dataset.\r\n test_score: float\r\n Test score associated with the best couple dataset-hyperparameter value.\r\n axes: list\r\n List of the matplotlib Axes where the plots have been made.\r\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\r\n If no plot has been made, `axes` is an empty list.\r\n\r\n See also\r\n ----------\r\n hyperparameter_validation:\r\n select the best value for the specified hyperparameter of the specified model on the given dataset.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n couple dataset-hyperparameter value is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\r\n maximum validation score.\r\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as\r\n # columns. At the beginning it is a list.\r\n datasets_train_val_score = []\r\n # List which contains, for each dataset, the best hyperparameter value\r\n datasets_best_hyperparameter_value = []\r\n # List which contains, for each dataset, its test score (associated with the best hyperparameter value)\r\n datasets_test_score = []\r\n # List of axes\r\n axes = []\r\n\r\n for i,dataset in enumerate(dataset_list): # Iterate through all the datasets\r\n\r\n X,y = dataset\r\n\r\n # Perform the hyperparameter tuning on the current dataset\r\n train_val_scores, best_index, test_score, ax = hyperparameter_validation(X, y, model, hyperparameter,\r\n hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series,\r\n random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train,\r\n xvalues=hyperparameter_values, xlabel=hyperparameter,\r\n title=\"Dataset \"+str(i)+\" : hyperparameter validation\", figsize=figsize_verbose)\r\n\r\n datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset\r\n datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) # Add the element related to that dataset\r\n datasets_test_score.append(test_score) # Add the row related to that dataset\r\n if ax:\r\n axes.append(ax)\r\n\r\n datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy\r\n\r\n # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value)\r\n if regr:\r\n best_index = np.argmin(datasets_train_val_score,axis=0)[1]\r\n else:\r\n best_index = np.argmax(datasets_train_val_score,axis=0)[1]\r\n\r\n # Test score of the best couple dataset-hyperparameter value\r\n test_score = datasets_test_score[best_index]\r\n\r\n if(plot): # Make the plot\r\n if not xvalues: # Default values on the x axis\r\n xvalues = range(len(dataset_list))\r\n ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)\r\n axes.append(ax)\r\n\r\n return datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes\r\n\r\n\r\ndef datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False,\r\n random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,\r\n xlabel=\"Datasets\", title=\"Datasets validation\",figsize=(6,6)):\r\n \"\"\"\r\n Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.\r\n select the best couple dataset-combination of hyperparameters values).\r\n\r\n For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified\r\n with `param_grid`) are tested.\r\n In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,\r\n on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).\r\n In the end, the best couple dataset-combination of hyperparameters values is selected.\r\n\r\n Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused\r\n with respect to the datasets. It's a validation focused on the datasets.\r\n In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of\r\n values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other\r\n words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is\r\n selected. It's a two-levels selection.\r\n\r\n This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is\r\n the one with best validation score).\r\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\r\n selection.\r\n\r\n Parameters\r\n ----------\r\n dataset_list: list\r\n List of couple, where each couple is a dataset.\r\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\r\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\r\n model: sklearn.base.BaseEstimator\r\n Model which has the specified hyperparameters.\r\n param_grid: dict\r\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\r\n values to test.\r\n scale: bool\r\n Indicates whether to scale or not the features in 'X' (for all the datasets).\r\n (The scaling is performed using the sklearn MinMaxScaler).\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\r\n time_series: bool\r\n Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).\r\n (This affects the computing of the validation score).\r\n random_state: int\r\n Used in the training-test splitting of the datasets.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n plot: bool\r\n Indicates whether to plot or not the validation score values of the datasets.\r\n plot_train: bool\r\n Indicates whether to plot also the training scores.\r\n (It's considered only if `plot` is True).\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the plot.\r\n xlabel: str\r\n Label of the x axis of the plot.\r\n title: str\r\n Title of the plot.\r\n figsize: tuple\r\n Two dimensions of the plot.\r\n\r\n Returns\r\n ----------\r\n datasets_train_val_score: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\r\n datasets_best_params: list\r\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\r\n each dataset, it contains the best combination of hyperparameters values on that dataset.\r\n Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated\r\n values.\r\n best_index: int\r\n Index of `dataset_list` that indicates which is the best dataset.\r\n test_score: float\r\n Test score associated with the best couple dataset-combination of hyperparameters values.\r\n ax: matplotlib.axes.Axes\r\n The matplotlib Axes where the plot has been made.\r\n\r\n See also\r\n ----------\r\n hyperparameters_validation:\r\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\r\n maximum validation score.\r\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as\r\n # columns . At the beginning it is a list.\r\n datasets_train_val_score = []\r\n # List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary)\r\n datasets_best_params = []\r\n # List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values)\r\n datasets_test_score = []\r\n\r\n for X,y in dataset_list: # Iterate through all the datasets\r\n\r\n # Perform the exaustive hyperparameters tuning on the current dataset\r\n params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,\r\n test_size=test_size,\r\n time_series=time_series,\r\n random_state=random_state,\r\n n_folds=n_folds, regr=regr)\r\n\r\n datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset\r\n datasets_best_params.append(params[best_index]) # Add the element related to that dataset\r\n datasets_test_score.append(test_score) # Add the row related to that dataset\r\n\r\n datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy\r\n\r\n # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters\r\n # values)\r\n if regr:\r\n best_index = np.argmin(datasets_train_val_score,axis=0)[1]\r\n else:\r\n best_index = np.argmax(datasets_train_val_score,axis=0)[1]\r\n\r\n # Test score of the best couple dataset-combination of hyperparameters values\r\n test_score = datasets_test_score[best_index]\r\n\r\n ax = None\r\n if(plot): # Make the plot\r\n if not xvalues: # Default values on the x axis\r\n xvalues = range(len(dataset_list))\r\n ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)\r\n\r\n return datasets_train_val_score, datasets_best_params, best_index, test_score, ax\r\n\r\n\r\ndef datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False,\r\n random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,\r\n xlabel=\"Datasets\", title=\"Datasets validation\", figsize=(6,6) ,verbose=False,\r\n figsize_verbose=(6,6)):\r\n \"\"\"\r\n Select the best dataset and the best model (i.e. select the best couple dataset-model).\r\n\r\n For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing\r\n an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the\r\n grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for\r\n each specified hyperparameter of the model).\r\n In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function\r\n `models_validation` is applied. (See `models_validation`).\r\n In the end, the best couple dataset-model is selected.\r\n\r\n Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.\r\n It's a validation focused on the datasets.\r\n In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected\r\n and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\r\n dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.\r\n It's a two-levels selection.\r\n\r\n This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation\r\n score).\r\n The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\r\n validation on the training set.\r\n Additionally, the training and test scores are also computed.\r\n\r\n Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\r\n selection. This is the 'main' plot.\r\n Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are\r\n plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the\r\n `models_validation` function).\r\n\r\n Parameters\r\n ----------\r\n dataset_list: list\r\n List of couples, where each couple is a dataset.\r\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\r\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\r\n model_paramGrid_list: list\r\n List that specifies the models and the relative grid of hyperparameters to be tested.\r\n It's a list of triples (i.e. tuples), where each triple represents a model:\r\n - the first element is a string, which is a mnemonic name of that model;\r\n - the second element is the sklearn model;\r\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\r\n structure of parameter `param_grid` of the function `hyperparameters_validation`.\r\n scale_list: list or bool\r\n List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the\r\n `model_paramGrid_list` list).\r\n This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).\r\n `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be\r\n True: in this case the 'X' features are scaled for all the models.\r\n test_size: float\r\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\r\n time_series: bool\r\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\r\n (This affects the computing of the validation score).\r\n random_state: int\r\n Used in the training-test splitting of the datasets.\r\n n_folds: int\r\n Indicates how many folds are made in order to compute the k-fold cross validation.\r\n (It's used only if `time_series` is False).\r\n regr: bool\r\n Indicates if it's either a regression or a classification problem.\r\n plot: bool\r\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\r\n plot_train: bool\r\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\r\n xvalues: list (in general, iterable)\r\n Values that have to be put in the x axis of the 'main' plot.\r\n xlabel: str\r\n Label of the x axis of the 'main' plot.\r\n title: str\r\n Title of the 'main' plot.\r\n figsize: tuple\r\n Two dimensions of the 'main' plot.\r\n verbose: bool\r\n If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).\r\n (See 'models_validation').\r\n figsize_verbose: tuple\r\n Two dimensions of the 'secondary' plots.\r\n\r\n Returns\r\n ----------\r\n datasets_train_val_score: np.array\r\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\r\n scores.\r\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\r\n datasets_best_model: list\r\n List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For\r\n each dataset, it contains the best model for that dataset.\r\n More precisely, it is a list of triple:\r\n - the first element is the index of `model_paramGrid_list` which indicates the best model;\r\n - the second element is the mnemonic name of the best model;\r\n - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary\r\n which has as keys the hyperparameters names and as values their associated values).\r\n best_index: int\r\n Index of `dataset_list` that indicates which is the best dataset.\r\n test_score: float\r\n Test score associated with the best couple dataset-model.\r\n axes: list\r\n List of the matplotlib Axes where the plots have been made.\r\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\r\n If no plot has been made, `axes` is an empty list.\r\n\r\n See also\r\n ----------\r\n models_validation: select the best model on the given dataset.\r\n\r\n Notes\r\n ----------\r\n - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\r\n couple dataset-model is the one associated with the minimum validation score.\r\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\r\n maximum validation score.\r\n - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\r\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\r\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\r\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\r\n \"\"\"\r\n\r\n # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as\r\n # columns. At the beginning it is a list.\r\n datasets_train_val_score = []\r\n # List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of\r\n # hyperparameters values\r\n datasets_best_model = []\r\n # List which contains, for each dataset, its test score (associated to the best model)\r\n datasets_test_score = []\r\n # List of axes\r\n axes = []\r\n\r\n for i,dataset in enumerate(dataset_list): # Iterate through all the datasets\r\n\r\n X,y = dataset\r\n\r\n # Perform the models validation on the current dataset\r\n models_train_val_score, models_best_params, best_index, test_score, ax = models_validation(X, y,\r\n model_paramGrid_list,\r\n scale_list=scale_list,\r\n test_size=test_size,\r\n time_series=time_series,\r\n random_state=random_state,\r\n n_folds=n_folds,\r\n regr=regr, plot=verbose,\r\n plot_train=plot_train,\r\n xlabel=\"Models\",\r\n title=(\"Dataset \"+str(i)+\r\n \" : models validation\"),\r\n figsize=figsize_verbose)\r\n\r\n datasets_train_val_score.append(tuple(models_train_val_score[best_index,:])) # Add the row related to that dataset\r\n # Add the element related to that dataset\r\n datasets_best_model.append((best_index,model_paramGrid_list[best_index][0],models_best_params[best_index]))\r\n datasets_test_score.append(test_score) # Add the element related to that dataset\r\n if ax:\r\n axes.append(ax)\r\n\r\n datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy\r\n\r\n # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model)\r\n if regr:\r\n best_index = np.argmin(datasets_train_val_score,axis=0)[1]\r\n else:\r\n best_index = np.argmax(datasets_train_val_score,axis=0)[1]\r\n\r\n # Test score of the best couple dataset-model\r\n test_score = datasets_test_score[best_index]\r\n\r\n if(plot): # Make the plot\r\n if not xvalues: # Default values on the x axis\r\n xvalues = range(len(dataset_list))\r\n ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)\r\n axes.append(ax)\r\n\r\n return datasets_train_val_score, datasets_best_model, best_index, test_score, axes\r\n"},"path":{"kind":"string","value":"model_selection.py"},"size":{"kind":"number","value":67692,"string":"67,692"},"nl_text":{"kind":"string","value":"Polynomial regression model.\n\nIt's a sklearn model: it's compliant to the sklearn estimators interface.\n`Example `_\n\nParameters\n----------\ndegree: int\n Degree to apply for the polynomial transformation.\n\nNotes\n----------\nThe polynomial transformation is performed using the sklearn PolynomialFeatures.\nPlot the given list of training-validation scores.\n\nThis function is an auxiliary function for the model selection functions. It's meant to be private in the\nmodule.\n\nParameters\n----------\nxvalues: list (in general iterable)\n Values to put in the x axis of the plot.\ntrain_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation\n scores.\n Basically, it is a list of training-validation scores.\nplot_train: bool\n Indicates whether to plot also the training scores or to plot only the validation ones.\nxlabel: str\n Label of the x axis.\ntitle: str\n Title of the plot.\nfigsize: tuple\n Two dimensions of the plot.\nbar: bool\n Indicates whether to plot the scores using bars or using points.\n If `bar` it's True, `xvalues` must contain string (i.e. labels).\nReturns\n----------\nmatplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\nCompute the bias^2-variance-error scores for the given model on the given dataset.\n\nThese measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the\ndataset.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model to evaluate.\nscale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\nN_TESTS: int\n Number of samples that are made in order to compute the measures.\nsample_size: float\n Decimal number between 0 and 1, which indicates the proportion of the sample.\n\nReturns\n----------\nbias: float\nvariance: float\nerror: float\nCompute the training-validation-test scores for the given model on the given dataset.\n\nThe training and test scores are simply computed by splitting the dataset into the training and test sets. The validation\nscore is performed applying the cross validation on the training set.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model to evaluate.\nscale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\ntime_series: bool\n Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the scores).\nrandom_state: int\n Used in the training-test splitting of the dataset.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\n\nReturns\n----------\ntrain_score: float\nval_score: float\ntest_score: float\n\nNotes\n----------\n- If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).\n Otherwise, the returned scores are accuracy measures.\n- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best\ncouple dataset-hyperparameter value).\n\nFor each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified\n`hyperparameter` of `model`.\nIn other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function\n`hyperparameter_validation` is applied. (See `hyperparameter_validation`).\nIn the end, the best couple dataset-hyperparameter value is selected.\n\nDespite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to\nthe datasets. It's a validation focused on the datasets.\nIn fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected\nand its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\ndataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.\nIt's a two-levels selection.\n\nThis selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the\nbest validation score).\nThe validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nOptionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\nselection. This is the 'main' plot.\nMoreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the\n`hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.\n(As the plot made by the `hyperparameter_validation` function).\n\nParameters\n----------\ndataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\nhyperparameter: str\n The name of the hyperparameter that has to be validated.\nhyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\nscale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\ntime_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation scores).\nrandom_state: int\n Used in the training-test splitting of the datasets.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\nplot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\nplot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\nxlabel: str\n Label of the x axis of the 'main' plot.\ntitle: str\n Title of the 'main' plot.\nfigsize: tuple\n Two dimensions of the 'main' plot.\nverbose: bool\n If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'\n plots).\n (See 'hyperparameter_validation').\nfigsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\nReturns\n----------\ndatasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\ndatasets_best_hyperparameter_value: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best `hyperparameter` value on that dataset.\nbest_index: int\n Index of `dataset_list` that indicates which is the best dataset.\ntest_score: float\n Test score associated with the best couple dataset-hyperparameter value.\naxes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\nSee also\n----------\nhyperparameter_validation:\n select the best value for the specified hyperparameter of the specified model on the given dataset.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.\nselect the best couple dataset-combination of hyperparameters values).\n\nFor each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified\nwith `param_grid`) are tested.\nIn other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,\non each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).\nIn the end, the best couple dataset-combination of hyperparameters values is selected.\n\nDespite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused\nwith respect to the datasets. It's a validation focused on the datasets.\nIn fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of\nvalues is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other\nwords, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is\nselected. It's a two-levels selection.\n\nThis selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is\nthe one with best validation score).\nThe validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nOptionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\nselection.\n\nParameters\n----------\ndataset_list: list\n List of couple, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\nparam_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\nscale: bool\n Indicates whether to scale or not the features in 'X' (for all the datasets).\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\ntime_series: bool\n Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\nrandom_state: int\n Used in the training-test splitting of the datasets.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\nplot: bool\n Indicates whether to plot or not the validation score values of the datasets.\nplot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\nxlabel: str\n Label of the x axis of the plot.\ntitle: str\n Title of the plot.\nfigsize: tuple\n Two dimensions of the plot.\n\nReturns\n----------\ndatasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\ndatasets_best_params: list\n List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For\n each dataset, it contains the best combination of hyperparameters values on that dataset.\n Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated\n values.\nbest_index: int\n Index of `dataset_list` that indicates which is the best dataset.\ntest_score: float\n Test score associated with the best couple dataset-combination of hyperparameters values.\nax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\nSee also\n----------\nhyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best dataset and the best model (i.e. select the best couple dataset-model).\n\nFor each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing\nan exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the\ngrid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for\neach specified hyperparameter of the model).\nIn other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function\n`models_validation` is applied. (See `models_validation`).\nIn the end, the best couple dataset-model is selected.\n\nDespite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.\nIt's a validation focused on the datasets.\nIn fact, first of all, for each dataset the model selection is performed: in this way the best model is selected\nand its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each\ndataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.\nIt's a two-levels selection.\n\nThis selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation\nscore).\nThe validation score is computed by splitting each dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nOptionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset\nselection. This is the 'main' plot.\nMoreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are\nplotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the\n`models_validation` function).\n\nParameters\n----------\ndataset_list: list\n List of couples, where each couple is a dataset.\n - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.\n - The second element is y, the mono dimensional np.array containing the response feature of the dataset.\nmodel_paramGrid_list: list\n List that specifies the models and the relative grid of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of parameter `param_grid` of the function `hyperparameters_validation`.\nscale_list: list or bool\n List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).\n `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be\n True: in this case the 'X' features are scaled for all the models.\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).\ntime_series: bool\n Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).\n (This affects the computing of the validation score).\nrandom_state: int\n Used in the training-test splitting of the datasets.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\nplot: bool\n Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).\nplot_train: bool\n Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the 'main' plot.\nxlabel: str\n Label of the x axis of the 'main' plot.\ntitle: str\n Title of the 'main' plot.\nfigsize: tuple\n Two dimensions of the 'main' plot.\nverbose: bool\n If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).\n (See 'models_validation').\nfigsize_verbose: tuple\n Two dimensions of the 'secondary' plots.\n\nReturns\n----------\ndatasets_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.\ndatasets_best_model: list\n List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For\n each dataset, it contains the best model for that dataset.\n More precisely, it is a list of triple:\n - the first element is the index of `model_paramGrid_list` which indicates the best model;\n - the second element is the mnemonic name of the best model;\n - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary\n which has as keys the hyperparameters names and as values their associated values).\nbest_index: int\n Index of `dataset_list` that indicates which is the best dataset.\ntest_score: float\n Test score associated with the best couple dataset-model.\naxes: list\n List of the matplotlib Axes where the plots have been made.\n Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).\n If no plot has been made, `axes` is an empty list.\n\nSee also\n----------\nmodels_validation: select the best model on the given dataset.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n couple dataset-model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the\n maximum validation score.\n- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best value for the specified hyperparameter of the specified model on the given dataset.\n\nIn other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.\n\nThis selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation\nscore).\nThe validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nOptionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the\nselection.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model which has the specified `hyperparameter`.\nhyperparameter: str\n The name of the hyperparameter that has to be validated.\nhyperparameter_values: list\n List of values for `hyperparameter` that have to be taken into account in the selection.\nscale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\ntime_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\nrandom_state: int\n Used in the training-test splitting of the dataset.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\nplot: bool\n Indicates whether to plot or not the validation score values.\nplot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\nxlabel: str\n Label of the x axis of the plot.\ntitle: str\n Title of the plot.\nfigsize: tuple\n Two dimensions of the plot.\n\nReturns\n----------\ntrain_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).\nbest_index: int\n Index of `hyperparameter_values` that indicates which is the best hyperparameter value.\ntest_score: float\n Test score associated with the best hyperparameter value.\nax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n hyperparameter value is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated\n with the maximum validation score.\n- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\nIn other words, perform the tuning of multiple hyperparameters.\nThe parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the\nassociated values to test.\n\nAll the possible combinations of values are tested, in an exhaustive way (i.e. grid search).\n\nThis selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with\nthe best validation score).\nThe validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model which has the specified hyperparameters.\nparam_grid: dict\n Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of\n values to test.\nscale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\ntime_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).\n (This affects the computing of the validation score).\nrandom_state: int\n Used in the training-test splitting of the dataset.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\n\nReturns\n----------\nparams: list\n List which enumerates all the possible combinations of hyperparameters values.\n It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a\n dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).\ntrain_val_scores: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of possible combinations of the hyperparameters values.\n (It has as many rows as the elements of `params`).\nbest_index: int\n Index of `params` that indicates which is the best combination of hyperparameters values.\ntest_score: float\n Test score associated with the best combination of hyperparameters values.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n combination of hyperparameters values is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the\n one associated with the maximum validation score.\n- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nSelect the best model on the given dataset.\n\nThe parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of\nhyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each\nspecified hyperparameter of the model).\n(That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See\n`hyperparameters_validation`).\n\nFor each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid\nsearch).\nActually, the function `hyperparameters_validation` is used.\n(See `hyperparameters_validation`).\n\nThe selection of the best model is made using the validation score (i.e. the best model is the one with the best\nvalidation score).\nThe validation score is computed by splitting the dataset into the training-test sets and then by applying the cross\nvalidation on the training set.\nAdditionally, the training and test scores are also computed.\n\nOptionally, the validation scores of the different models can be plotted, making a graphical visualization of the\nselection.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel_paramGrid_list: list\n List that specifies the models and the relative grids of hyperparameters to be tested.\n It's a list of triples (i.e. tuples), where each triple represents a model:\n - the first element is a string, which is a mnemonic name of that model;\n - the second element is the sklearn model;\n - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same\n structure of the parameter `param_grid` of the function `hyperparameters_validation`.\nscale_list: list or bool\n List of booleans, which has as many elements as the models to test (i.e. as the elements of the\n `model_paramGrid_list` list).\n This list indicates, for each different model, if the features in `X` have to be scaled or not.\n `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be\n True: in this case the `X` features are scaled for all the models.\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\ntime_series: bool\n Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).\n (This affects the computing of the validation score).\nrandom_state: int\n Used in the training-test splitting of the dataset.\nn_folds: int\n Indicates how many folds are made in order to compute the k-fold cross validation.\n (It's used only if `time_series` is False).\nregr: bool\n Indicates if it's either a regression or a classification problem.\nplot: bool\n Indicates whether to plot or not the validation score values.\nplot_train: bool\n Indicates whether to plot also the training scores.\n (It's considered only if `plot` is True).\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\nxlabel: str\n Label of the x axis of the plot.\ntitle: str\n Title of the plot.\nfigsize: tuple\n Two dimensions of the plot.\n\nReturns\n----------\nmodels_train_val_score: np.array\n Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation\n scores.\n It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).\nmodels_best_params: list\n List which indicates, for each model, the best combination of the hyperparameters values for that model.\n It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it\n contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the\n associated model.\nbest_index: int\n Index of `model_paramGrid_list` that indicates which is the best model.\ntest_score: float\n Test score associated with the best model.\nax: matplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n If `plot` is False, then it is None.\n\nSee also\n----------\nhyperparameters_validation:\n select the best combination of values for the specified hyperparameters of the specified model on the given dataset.\n\nNotes\n----------\n- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best\n model is the one associated with the minimum validation score.\n Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the\n maximum validation score.\n- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross\n validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.\n Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two\n contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.\nPlot the predictions made by the given model on the given dataset, versus its actual values.\n\nThe dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are\nmade.\n\nParameters\n----------\nX: np.array\n Two-dimensional np.array, containing the explanatory features of the dataset.\ny: np.array\n Mono dimensional np.array, containing the response feature of the dataset.\nmodel: sklearn.base.BaseEstimator\n Model used to make the predictions.\nscale: bool\n Indicates whether to scale or not the features in `X`.\n (The scaling is performed using the sklearn MinMaxScaler).\ntest_size: float\n Decimal number between 0 and 1, which indicates the proportion of the test set.\nplot_type: int\n Indicates the type of the plot.\n - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis\n the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed\n predicted values.\n - 1 -> On the x axis the actual values are put, on the y axis the predicted ones.\nxvalues: list (in general, iterable)\n Values that have to be put in the x axis of the plot.\n (It's used only if `plot_type` is 0).\nxlabel: str\n Label of the x axis of the plot.\n (It's used only if `plot_type` is 0).\ntitle: str\n Title of the plot.\nfigsize: tuple\n Two dimensions of the plot.\n\nReturns\n----------\nmatplotlib.axes.Axes\n The matplotlib Axes where the plot has been made.\n\nNotes\n----------\nThe splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous\nsequences.\nI.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify\nthe visualization).\nFor this reason, typically this function is applied on time series datasets.\nModule for the selection of machine learning models.\n\nThere are several different functions which can perform the model selection: all of them have an intuitive interface, but\nare also powerful and flexible.\nIn addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way.\n\nThese different functions perform the model selection in different contexts, i.e. each function is specifically meant for a\nspecific scenario. Certain contexts are more specific, and other are more general.\nOn the whole, there are six different model selection functions, divided into two main groups:\n 1. functions that perform the model selection with respect to a **single dataset**;\n 2. functions that perform the model selection with respect to **multiple datasets**.\n\nThe six functions, sorted from the most specific context to the most general one, are:\n - *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset);\n - *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple\n datasets).\n\nThis module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array.\nMoreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module:\n- the machine learning models are represented as sklearn models (i.e. sklearn estimators);\n- under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e.\nGridSearchCV);\n- several other operations are done using the functionalities provided by sklearn.\n\nThis module, besides the model selection functions, contains also some utilities:\n- the PolynomialRegression class;\n- some utility functions.\n\n---------------------------------------------------------------------------------------------------------------------------- POLYNOMIAL REGRESSOR MODEL---------------------------------------------------------------------------------------------------------------------------- UTILITY FUNCTIONS Split into training e test. Random splitting (not time series) time series splitting Scale the features in X Cross validation k-fold cross validation cross validation for time series validation score Fit the model using all the training Compute training and test scores Return a triple Scale the features in `X` Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list). In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many columns as the number of instances in `X` (each column is a point of the dataset). Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points. Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples. Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it. Extract a new sample (sample 'i') Fit the model on this sample 'i' Add the predictions made by the model on all the dataset points Transform into numpy array Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on the `N_TEST` samples. Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on the `N_TEST` samples. Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on the `N_TEST` samples. Total bias^2 of the model Total variance of the model Total error of the model Return a triple Scale the features in X Fit using all the training set Points Plot also the training scores Validation scores Bars Plot also the training scores The label locations The width of the bars Validation scores---------------------------------------------------------------------------------------------------------------------------- FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET Create the hyperparameter grid Call the function for the validation of an arbitrary number of hyperparameters Make the plot Default values on the x axis Default label on the x axis Split into training-test sets Random splitting Time series splitting Scale the features in `X` Cross validation strategy The strategy is the classic k-fold cross validation Time series cross validation strategy Grid search List of all the possible combinations of hyperparameters values List where for all the possible combinations of hyperparameters values there is the associated training score List where for all the possible combinations of hyperparameters values there is the associated validation score Index of `params`, corresponding to the best combination of hyperparameters values Model with the best combination of hyperparameters values The scores are negative: moltiply by -1 Fit the best model on all the training set Compute the test score of the best model `scale_list` is either None or False `scale_list` is True Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and the other for the validation scores. At the beginning it is a list of tuples. List which has as many elements as the models: for each model there is the dictionary of the best combination of hyperparameters values. List which has as many elements as the models: for each model there is the test score (associated with the best combination of hyperparameters values). Iterate through all the cuples model-param_grid Apply the grid search on model-param_grid Add the row for that model Add the element for that model Add the element for that model Transform into numpy matrix (i.e. np.array) Find the best index (i.e. the best model) Test score of the best model Make the plot Default values for the x axis---------------------------------------------------------------------------------------------------------------------------- FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns. At the beginning it is a list. List which contains, for each dataset, the best hyperparameter value List which contains, for each dataset, its test score (associated with the best hyperparameter value) List of axes Iterate through all the datasets Perform the hyperparameter tuning on the current dataset Add the row related to that dataset Add the element related to that dataset Add the row related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value) Test score of the best couple dataset-hyperparameter value Make the plot Default values on the x axis numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns . At the beginning it is a list. List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary) List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values) Iterate through all the datasets Perform the exaustive hyperparameters tuning on the current dataset Add the row related to that dataset Add the element related to that dataset Add the row related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters values) Test score of the best couple dataset-combination of hyperparameters values Make the plot Default values on the x axis numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns. At the beginning it is a list. List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of hyperparameters values List which contains, for each dataset, its test score (associated to the best model) List of axes Iterate through all the datasets Perform the models validation on the current dataset Add the row related to that dataset Add the element related to that dataset Add the element related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model) Test score of the best couple dataset-model Make the plot Default values on the x axis"},"nl_size":{"kind":"number","value":45537,"string":"45,537"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8381555676460266,"string":"0.838156"}}},{"rowIdx":587,"cells":{"content":{"kind":"string","value":"#!/usr/bin/env python3\nimport importlib.machinery as imm\nimport logging\nimport pathlib\nimport re\n\nimport configargparse\n\n\nclass ModuleInfo:\n def __init__(self, path):\n self.path = pathlib.Path(path)\n name = str(self.path.parent / self.path.stem)\n name = name.replace(\"/\", \".\")\n self.name = re.sub(r\"^[\\.]+\", \"\", name)\n self.module = imm.SourceFileLoader(self.name, path).load_module()\n if not hasattr(self.module, \"get_parser\"):\n raise ValueError(f\"{path} does not have get_parser()\")\n\n\ndef get_parser():\n parser = configargparse.ArgumentParser(\n description='generate RST from argparse options',\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('src', type=str, nargs='+',\n help='source python files that contain get_parser() func')\n return parser\n\n\n# parser\nargs = get_parser().parse_args()\n\n\nmodinfo = []\n\nfor p in args.src:\n if \"__init__.py\" in p:\n continue\n modinfo.append(ModuleInfo(p))\n\n\n# print refs\nfor m in modinfo:\n logging.info(f\"processing: {m.path.name}\")\n d = m.module.get_parser().description\n assert d is not None\n print(f\"- :ref:`{m.path.name}`: {d}\")\n\nprint()\n\n# print argparse\nfor m in modinfo:\n cmd = m.path.name\n sep = \"~\" * len(cmd)\n print(f\"\"\"\n\n.. _{cmd}:\n\n{cmd}\n{sep}\n\n.. argparse::\n :module: {m.name}\n :func: get_parser\n :prog: {cmd}\n\n\"\"\")\n"},"path":{"kind":"string","value":"doc/argparse2rst.py"},"size":{"kind":"number","value":1513,"string":"1,513"},"nl_text":{"kind":"string","value":"!/usr/bin/env python3 parser print refs print argparse"},"nl_size":{"kind":"number","value":54,"string":"54"},"nl_language":{"kind":"string","value":"de"},"nl_language_score":{"kind":"number","value":0.0805329754948616,"string":"0.080533"}}},{"rowIdx":588,"cells":{"content":{"kind":"string","value":"\"\"\"\nutil_list module. Contains the mflist class.\n This classes encapsulates modflow-style list inputs away\n from the individual packages. The end-user should not need to\n instantiate this class directly.\n\n some more info\n\n\"\"\"\nfrom __future__ import division, print_function\n\nimport os\nimport warnings\nimport numpy as np\nfrom ..datbase import DataInterface, DataListInterface, DataType\nfrom ..utils.recarray_utils import create_empty_recarray\n\ntry:\n from numpy.lib import NumpyVersion\n\n numpy114 = NumpyVersion(np.__version__) >= \"1.14.0\"\nexcept ImportError:\n numpy114 = False\n\n\nclass MfList(DataInterface, DataListInterface):\n \"\"\"\n a generic object for handling transient boundary condition lists\n\n Parameters\n ----------\n package : package object\n The package object (of type :class:`flopy.pakbase.Package`) to which\n this MfList will be added.\n data : varies\n the data of the transient list (optional). (the default is None)\n\n Attributes\n ----------\n mxact : int\n the max number of active bc for any stress period\n\n Methods\n -------\n add_record(kper,index,value) : None\n add a record to stress period kper at index location\n write_transient(f) : None\n write the transient sequence to the model input file f\n check_kij() : None\n checks for boundaries outside of model domain - issues warnings only\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n\n \"\"\"\n\n def __init__(\n self,\n package,\n data=None,\n dtype=None,\n model=None,\n list_free_format=None,\n binary=False,\n ):\n\n if isinstance(data, MfList):\n for attr in data.__dict__.items():\n setattr(self, attr[0], attr[1])\n if model is None:\n self._model = package.parent\n else:\n self._model = model\n self._package = package\n return\n\n self._package = package\n if model is None:\n self._model = package.parent\n else:\n self._model = model\n if dtype is None:\n assert isinstance(self.package.dtype, np.dtype)\n self.__dtype = self.package.dtype\n else:\n self.__dtype = dtype\n self.__binary = binary\n self.__vtype = {}\n self.__data = {}\n if data is not None:\n self.__cast_data(data)\n self.__df = None\n if list_free_format is None:\n if package.parent.version == \"mf2k\":\n list_free_format = False\n self.list_free_format = list_free_format\n return\n\n @property\n def name(self):\n return self.package.name\n\n @property\n def mg(self):\n return self._model.modelgrid\n\n @property\n def sr(self):\n return self.mg.sr\n\n @property\n def model(self):\n return self._model\n\n @property\n def package(self):\n return self._package\n\n @property\n def data_type(self):\n return DataType.transientlist\n\n @property\n def plotable(self):\n return True\n\n def get_empty(self, ncell=0):\n d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)\n return d\n\n def export(self, f, **kwargs):\n from flopy import export\n\n return export.utils.mflist_export(f, self, **kwargs)\n\n def append(self, other):\n \"\"\" append the recarrays from one MfList to another\n Parameters\n ----------\n other: variable: an item that can be cast in to an MfList\n that corresponds with self\n Returns\n -------\n dict of {kper:recarray}\n \"\"\"\n if not isinstance(other, MfList):\n other = MfList(\n self.package,\n data=other,\n dtype=self.dtype,\n model=self._model,\n list_free_format=self.list_free_format,\n )\n msg = (\n \"MfList.append(): other arg must be \"\n + \"MfList or dict, not {0}\".format(type(other))\n )\n assert isinstance(other, MfList), msg\n\n other_kpers = list(other.data.keys())\n other_kpers.sort()\n\n self_kpers = list(self.data.keys())\n self_kpers.sort()\n\n new_dict = {}\n for kper in range(self._model.nper):\n other_data = other[kper].copy()\n self_data = self[kper].copy()\n\n other_len = other_data.shape[0]\n self_len = self_data.shape[0]\n\n if (other_len == 0 and self_len == 0) or (\n kper not in self_kpers and kper not in other_kpers\n ):\n continue\n elif self_len == 0:\n new_dict[kper] = other_data\n elif other_len == 0:\n new_dict[kper] = self_data\n else:\n new_len = other_data.shape[0] + self_data.shape[0]\n new_data = np.recarray(new_len, dtype=self.dtype)\n new_data[:self_len] = self_data\n new_data[self_len : self_len + other_len] = other_data\n new_dict[kper] = new_data\n\n return new_dict\n\n def drop(self, fields):\n \"\"\"drop fields from an MfList\n\n Parameters\n ----------\n fields : list or set of field names to drop\n\n Returns\n -------\n dropped : MfList without the dropped fields\n \"\"\"\n if not isinstance(fields, list):\n fields = [fields]\n names = [n for n in self.dtype.names if n not in fields]\n dtype = np.dtype(\n [(k, d) for k, d in self.dtype.descr if k not in fields]\n )\n spd = {}\n for k, v in self.data.items():\n # because np 1.9 doesn't support indexing by list of columns\n newarr = np.array([self.data[k][n] for n in names]).transpose()\n newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(\n np.recarray\n )\n for n in dtype.names:\n newarr[n] = self.data[k][n]\n spd[k] = newarr\n return MfList(self.package, spd, dtype=dtype)\n\n @property\n def data(self):\n return self.__data\n\n @property\n def df(self):\n if self.__df is None:\n self.__df = self.get_dataframe()\n return self.__df\n\n @property\n def vtype(self):\n return self.__vtype\n\n @property\n def dtype(self):\n return self.__dtype\n\n # Get the itmp for a given kper\n def get_itmp(self, kper):\n if kper not in list(self.__data.keys()):\n return None\n if self.__vtype[kper] is None:\n return -1\n # If an external file, have to load it\n if self.__vtype[kper] == str:\n return self.__fromfile(self.__data[kper]).shape[0]\n if self.__vtype[kper] == np.recarray:\n return self.__data[kper].shape[0]\n # If not any of the above, it must be an int\n return self.__data[kper]\n\n @property\n def mxact(self):\n mxact = 0\n for kper in list(self.__data.keys()):\n mxact = max(mxact, self.get_itmp(kper))\n return mxact\n\n @property\n def fmt_string(self):\n \"\"\"Returns a C-style fmt string for numpy savetxt that corresponds to\n the dtype\"\"\"\n if self.list_free_format is not None:\n use_free = self.list_free_format\n else:\n use_free = True\n if self.package.parent.has_package(\"bas6\"):\n use_free = self.package.parent.bas6.ifrefm\n # mt3d list data is fixed format\n if \"mt3d\" in self.package.parent.version.lower():\n use_free = False\n fmts = []\n for field in self.dtype.descr:\n vtype = field[1][1].lower()\n if vtype in (\"i\", \"b\"):\n if use_free:\n fmts.append(\"%9d\")\n else:\n fmts.append(\"%10d\")\n elif vtype == \"f\":\n if use_free:\n if numpy114:\n # Use numpy's floating-point formatter (Dragon4)\n fmts.append(\"%15s\")\n else:\n fmts.append(\"%15.7E\")\n else:\n fmts.append(\"%10G\")\n elif vtype == \"o\":\n if use_free:\n fmts.append(\"%9s\")\n else:\n fmts.append(\"%10s\")\n elif vtype == \"s\":\n msg = (\n \"MfList.fmt_string error: 'str' type found in dtype. \"\n \"This gives unpredictable results when \"\n \"recarray to file - change to 'object' type\"\n )\n raise TypeError(msg)\n else:\n raise TypeError(\n \"MfList.fmt_string error: unknown vtype in \"\n \"field: {}\".format(field)\n )\n if use_free:\n fmt_string = \" \" + \" \".join(fmts)\n else:\n fmt_string = \"\".join(fmts)\n return fmt_string\n\n # Private method to cast the data argument\n # Should only be called by the constructor\n def __cast_data(self, data):\n # If data is a list, then all we can do is try to cast it to\n # an ndarray, then cast again to a recarray\n if isinstance(data, list):\n # warnings.warn(\"MfList casting list to array\")\n try:\n data = np.array(data)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list to ndarray: \" + str(e)\n )\n\n # If data is a dict, the we have to assume it is keyed on kper\n if isinstance(data, dict):\n if not list(data.keys()):\n raise Exception(\"MfList error: data dict is empty\")\n for kper, d in data.items():\n try:\n kper = int(kper)\n except Exception as e:\n raise Exception(\n \"MfList error: data dict key \"\n + \"{0:s} not integer: \".format(kper)\n + str(type(kper))\n + \"\\n\"\n + str(e)\n )\n # Same as before, just try...\n if isinstance(d, list):\n # warnings.warn(\"MfList: casting list to array at \" +\\\n # \"kper {0:d}\".format(kper))\n try:\n d = np.array(d)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list \"\n + \"to ndarray: \"\n + str(e)\n )\n\n # super hack - sick of recarrays already\n # if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):\n # d = d.view(np.recarray)\n\n if isinstance(d, np.recarray):\n self.__cast_recarray(kper, d)\n elif isinstance(d, np.ndarray):\n self.__cast_ndarray(kper, d)\n elif isinstance(d, int):\n self.__cast_int(kper, d)\n elif isinstance(d, str):\n self.__cast_str(kper, d)\n elif d is None:\n self.__data[kper] = -1\n self.__vtype[kper] = None\n else:\n raise Exception(\n \"MfList error: unsupported data type: \"\n + str(type(d))\n + \" at kper \"\n + \"{0:d}\".format(kper)\n )\n\n # A single recarray - same MfList for all stress periods\n elif isinstance(data, np.recarray):\n self.__cast_recarray(0, data)\n # A single ndarray\n elif isinstance(data, np.ndarray):\n self.__cast_ndarray(0, data)\n # A single filename\n elif isinstance(data, str):\n self.__cast_str(0, data)\n else:\n raise Exception(\n \"MfList error: unsupported data type: \" + str(type(data))\n )\n\n def __cast_str(self, kper, d):\n # If d is a string, assume it is a filename and check that it exists\n assert os.path.exists(d), (\n \"MfList error: dict filename (string) '\"\n + d\n + \"' value for \"\n + \"kper {0:d} not found\".format(kper)\n )\n self.__data[kper] = d\n self.__vtype[kper] = str\n\n def __cast_int(self, kper, d):\n # If d is an integer, then it must be 0 or -1\n if d > 0:\n raise Exception(\n \"MfList error: dict integer value for \"\n \"kper {0:10d} must be 0 or -1, \"\n \"not {1:10d}\".format(kper, d)\n )\n if d == 0:\n self.__data[kper] = 0\n self.__vtype[kper] = None\n else:\n self.__data[kper] = -1\n self.__vtype[kper] = None\n\n def __cast_recarray(self, kper, d):\n assert d.dtype == self.__dtype, (\n \"MfList error: recarray dtype: \"\n + str(d.dtype)\n + \" doesn't match \"\n + \"self dtype: \"\n + str(self.dtype)\n )\n self.__data[kper] = d\n self.__vtype[kper] = np.recarray\n\n def __cast_ndarray(self, kper, d):\n d = np.atleast_2d(d)\n if d.dtype != self.__dtype:\n assert d.shape[1] == len(self.dtype), (\n \"MfList error: ndarray \"\n + \"shape \"\n + str(d.shape)\n + \" doesn't match dtype \"\n + \"len: \"\n + str(len(self.dtype))\n )\n # warnings.warn(\"MfList: ndarray dtype does not match self \" +\\\n # \"dtype, trying to cast\")\n try:\n self.__data[kper] = np.core.records.fromarrays(\n d.transpose(), dtype=self.dtype\n )\n except Exception as e:\n raise Exception(\n \"MfList error: casting ndarray to recarray: \" + str(e)\n )\n self.__vtype[kper] = np.recarray\n\n def get_dataframe(self, squeeze=True):\n \"\"\"\n Cast recarrays for stress periods into single\n dataframe containing all stress periods.\n\n Parameters\n ----------\n squeeze : bool\n Reduce number of columns in dataframe to only include\n stress periods where a variable changes.\n\n Returns\n -------\n df : dataframe\n Dataframe of shape nrow = ncells, ncol = nvar x nper. If\n the squeeze option is chosen, nper is the number of\n stress periods where at least one cells is different,\n otherwise it is equal to the number of keys in MfList.data.\n\n Notes\n -----\n Requires pandas.\n\n \"\"\"\n try:\n import pandas as pd\n except Exception as e:\n msg = \"MfList.get_dataframe() requires pandas\"\n raise ImportError(msg)\n\n # make a dataframe of all data for all stress periods\n names = [\"k\", \"i\", \"j\"]\n if \"MNW2\" in self.package.name:\n names += [\"wellid\"]\n\n # find relevant variable names\n # may have to iterate over the first stress period\n for per in range(self._model.nper):\n if hasattr(self.data[per], \"dtype\"):\n varnames = list(\n [n for n in self.data[per].dtype.names if n not in names]\n )\n break\n\n # create list of dataframes for each stress period\n # each with index of k, i, j\n dfs = []\n for per in self.data.keys():\n recs = self.data[per]\n if recs is None or len(recs) == 0:\n # add an empty dataframe if a stress period is\n # empty (e.g. no pumping during a predevelopment\n # period)\n columns = names + list(\n [\"{}{}\".format(c, per) for c in varnames]\n )\n dfi = pd.DataFrame(data=None, columns=columns)\n dfi = dfi.set_index(names)\n else:\n dfi = pd.DataFrame.from_records(recs)\n dfg = dfi.groupby(names)\n count = dfg[varnames[0]].count().rename(\"n\")\n if (count > 1).values.any():\n print(\n \"Duplicated list entry locations aggregated \"\n \"for kper {}\".format(per)\n )\n for kij in count[count > 1].index.values:\n print(\" (k,i,j) {}\".format(kij))\n dfi = dfg.sum() # aggregate\n dfi.columns = list([\"{}{}\".format(c, per) for c in varnames])\n dfs.append(dfi)\n df = pd.concat(dfs, axis=1)\n if squeeze:\n keep = []\n for var in varnames:\n diffcols = list([n for n in df.columns if var in n])\n diff = df[diffcols].fillna(0).diff(axis=1)\n diff[\n \"{}0\".format(var)\n ] = 1 # always return the first stress period\n changed = diff.sum(axis=0) != 0\n keep.append(df.loc[:, changed.index[changed]])\n df = pd.concat(keep, axis=1)\n df = df.reset_index()\n df.insert(len(names), \"node\", df.i * self._model.ncol + df.j)\n return df\n\n def add_record(self, kper, index, values):\n # Add a record to possible already set list for a given kper\n # index is a list of k,i,j or nodes.\n # values is a list of floats.\n # The length of index + values must be equal to the number of names\n # in dtype\n assert len(index) + len(values) == len(self.dtype), (\n \"MfList.add_record() error: length of index arg +\"\n + \"length of value arg != length of self dtype\"\n )\n # If we already have something for this kper, then add to it\n if kper in list(self.__data.keys()):\n if self.vtype[kper] == int:\n # If a 0 or -1, reset\n self.__data[kper] = self.get_empty(1)\n self.__vtype[kper] = np.recarray\n elif self.vtype[kper] == str:\n # If filename, load into recarray\n d = self.__fromfile(self.data[kper])\n d.resize(d.shape[0], d.shape[1])\n self.__data[kper] = d\n self.__vtype[kper] = np.recarray\n elif self.vtype[kper] == np.recarray:\n # Extend the recarray\n self.__data[kper] = np.append(\n self.__data[kper], self.get_empty(1)\n )\n else:\n self.__data[kper] = self.get_empty(1)\n self.__vtype[kper] = np.recarray\n rec = list(index)\n rec.extend(list(values))\n try:\n self.__data[kper][-1] = tuple(rec)\n except Exception as e:\n raise Exception(\n \"MfList.add_record() error: adding record to \"\n + \"recarray: \"\n + str(e)\n )\n\n def __getitem__(self, kper):\n # Get the recarray for a given kper\n # If the data entry for kper is a string,\n # return the corresponding recarray,\n # but don't reset the value in the data dict\n # assert kper in list(self.data.keys()), \"MfList.__getitem__() kper \" + \\\n # str(kper) + \" not in data.keys()\"\n try:\n kper = int(kper)\n except Exception as e:\n raise Exception(\n \"MfList error: _getitem__() passed invalid kper index:\"\n + str(kper)\n )\n if kper not in list(self.data.keys()):\n if kper == 0:\n return self.get_empty()\n else:\n return self.data[self.__find_last_kper(kper)]\n if self.vtype[kper] == int:\n if self.data[kper] == 0:\n return self.get_empty()\n else:\n return self.data[self.__find_last_kper(kper)]\n if self.vtype[kper] == str:\n return self.__fromfile(self.data[kper])\n if self.vtype[kper] == np.recarray:\n return self.data[kper]\n\n def __setitem__(self, kper, data):\n if kper in list(self.__data.keys()):\n if self._model.verbose:\n print(\"removing existing data for kper={}\".format(kper))\n self.data.pop(kper)\n # If data is a list, then all we can do is try to cast it to\n # an ndarray, then cast again to a recarray\n if isinstance(data, list):\n # warnings.warn(\"MfList casting list to array\")\n try:\n data = np.array(data)\n except Exception as e:\n raise Exception(\n \"MfList error: casting list to ndarray: \" + str(e)\n )\n # cast data\n if isinstance(data, int):\n self.__cast_int(kper, data)\n elif isinstance(data, np.recarray):\n self.__cast_recarray(kper, data)\n # A single ndarray\n elif isinstance(data, np.ndarray):\n self.__cast_ndarray(kper, data)\n # A single filename\n elif isinstance(data, str):\n self.__cast_str(kper, data)\n else:\n raise Exception(\n \"MfList error: unsupported data type: \" + str(type(data))\n )\n\n # raise NotImplementedError(\"MfList.__setitem__() not implemented\")\n\n def __fromfile(self, f):\n # d = np.fromfile(f,dtype=self.dtype,count=count)\n try:\n d = np.genfromtxt(f, dtype=self.dtype)\n except Exception as e:\n raise Exception(\n \"MfList.__fromfile() error reading recarray \"\n + \"from file \"\n + str(e)\n )\n return d\n\n def get_filenames(self):\n kpers = list(self.data.keys())\n kpers.sort()\n filenames = []\n first = kpers[0]\n for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):\n # Fill missing early kpers with 0\n if kper < first:\n itmp = 0\n kper_vtype = int\n elif kper in kpers:\n kper_vtype = self.__vtype[kper]\n\n if (\n self._model.array_free_format\n and self._model.external_path is not None\n ):\n # py_filepath = ''\n # py_filepath = os.path.join(py_filepath,\n # self._model.external_path)\n filename = self.package.name[0] + \"_{0:04d}.dat\".format(kper)\n filenames.append(filename)\n return filenames\n\n def get_filename(self, kper):\n ext = \"dat\"\n if self.binary:\n ext = \"bin\"\n return self.package.name[0] + \"_{0:04d}.{1}\".format(kper, ext)\n\n @property\n def binary(self):\n return bool(self.__binary)\n\n def write_transient(self, f, single_per=None, forceInternal=False):\n # forceInternal overrides isExternal (set below) for cases where\n # external arrays are not supported (oh hello MNW1!)\n # write the transient sequence described by the data dict\n nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()\n assert hasattr(f, \"read\"), (\n \"MfList.write() error: \" + \"f argument must be a file handle\"\n )\n kpers = list(self.data.keys())\n kpers.sort()\n first = kpers[0]\n if single_per is None:\n loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))\n else:\n if not isinstance(single_per, list):\n single_per = [single_per]\n loop_over_kpers = single_per\n\n for kper in loop_over_kpers:\n # Fill missing early kpers with 0\n if kper < first:\n itmp = 0\n kper_vtype = int\n elif kper in kpers:\n kper_data = self.__data[kper]\n kper_vtype = self.__vtype[kper]\n if kper_vtype == str:\n if not self._model.array_free_format:\n kper_data = self.__fromfile(kper_data)\n kper_vtype = np.recarray\n itmp = self.get_itmp(kper)\n if kper_vtype == np.recarray:\n itmp = kper_data.shape[0]\n elif (kper_vtype == int) or (kper_vtype is None):\n itmp = kper_data\n # Fill late missing kpers with -1\n else:\n itmp = -1\n kper_vtype = int\n\n f.write(\n \" {0:9d} {1:9d} # stress period {2:d}\\n\".format(\n itmp, 0, kper + 1\n )\n )\n\n isExternal = False\n if (\n self._model.array_free_format\n and self._model.external_path is not None\n and forceInternal is False\n ):\n isExternal = True\n if self.__binary:\n isExternal = True\n if isExternal:\n if kper_vtype == np.recarray:\n py_filepath = \"\"\n if self._model.model_ws is not None:\n py_filepath = self._model.model_ws\n if self._model.external_path is not None:\n py_filepath = os.path.join(\n py_filepath, self._model.external_path\n )\n filename = self.get_filename(kper)\n py_filepath = os.path.join(py_filepath, filename)\n model_filepath = filename\n if self._model.external_path is not None:\n model_filepath = os.path.join(\n self._model.external_path, filename\n )\n self.__tofile(py_filepath, kper_data)\n kper_vtype = str\n kper_data = model_filepath\n\n if kper_vtype == np.recarray:\n name = f.name\n if self.__binary or not numpy114:\n f.close()\n # switch file append mode to binary\n with open(name, \"ab+\") as f:\n self.__tofile(f, kper_data)\n # continue back to non-binary\n f = open(name, \"a\")\n else:\n self.__tofile(f, kper_data)\n elif kper_vtype == str:\n f.write(\" open/close \" + kper_data)\n if self.__binary:\n f.write(\" (BINARY)\")\n f.write(\"\\n\")\n\n def __tofile(self, f, data):\n # Write the recarray (data) to the file (or file handle) f\n assert isinstance(data, np.recarray), (\n \"MfList.__tofile() data arg \" + \"not a recarray\"\n )\n\n # Add one to the kij indices\n lnames = [name.lower() for name in self.dtype.names]\n # --make copy of data for multiple calls\n d = data.copy()\n for idx in [\"k\", \"i\", \"j\", \"node\"]:\n if idx in lnames:\n d[idx] += 1\n if self.__binary:\n dtype2 = []\n for name in self.dtype.names:\n dtype2.append((name, np.float32))\n dtype2 = np.dtype(dtype2)\n d = np.array(d, dtype=dtype2)\n d.tofile(f)\n else:\n np.savetxt(f, d, fmt=self.fmt_string, delimiter=\"\")\n\n def check_kij(self):\n names = self.dtype.names\n if (\"k\" not in names) or (\"i\" not in names) or (\"j\" not in names):\n warnings.warn(\n \"MfList.check_kij(): index fieldnames 'k,i,j' \"\n + \"not found in self.dtype names: \"\n + str(names)\n )\n return\n nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()\n if nl == 0:\n warnings.warn(\n \"MfList.check_kij(): unable to get dis info from \" + \"model\"\n )\n return\n for kper in list(self.data.keys()):\n out_idx = []\n data = self[kper]\n if data is not None:\n k = data[\"k\"]\n k_idx = np.where(np.logical_or(k < 0, k >= nl))\n if k_idx[0].shape[0] > 0:\n out_idx.extend(list(k_idx[0]))\n i = data[\"i\"]\n i_idx = np.where(np.logical_or(i < 0, i >= nr))\n if i_idx[0].shape[0] > 0:\n out_idx.extend(list(i_idx[0]))\n j = data[\"j\"]\n j_idx = np.where(np.logical_or(j < 0, j >= nc))\n if j_idx[0].shape[0]:\n out_idx.extend(list(j_idx[0]))\n\n if len(out_idx) > 0:\n warn_str = (\n \"MfList.check_kij(): warning the following \"\n + \"indices are out of bounds in kper \"\n + str(kper)\n + \":\\n\"\n )\n for idx in out_idx:\n d = data[idx]\n warn_str += \" {0:9d} {1:9d} {2:9d}\\n\".format(\n d[\"k\"] + 1, d[\"i\"] + 1, d[\"j\"] + 1\n )\n warnings.warn(warn_str)\n\n def __find_last_kper(self, kper):\n kpers = list(self.data.keys())\n kpers.sort()\n last = 0\n for kkper in kpers[::-1]:\n # if this entry is valid\n if self.vtype[kkper] != int or self.data[kkper] != -1:\n last = kkper\n if kkper <= kper:\n break\n return kkper\n\n def get_indices(self):\n \"\"\"\n a helper function for plotting - get all unique indices\n \"\"\"\n names = self.dtype.names\n lnames = []\n [lnames.append(name.lower()) for name in names]\n if \"k\" not in lnames or \"j\" not in lnames:\n raise NotImplementedError(\"MfList.get_indices requires kij\")\n kpers = list(self.data.keys())\n kpers.sort()\n indices = []\n for i, kper in enumerate(kpers):\n kper_vtype = self.__vtype[kper]\n if (kper_vtype != int) or (kper_vtype is not None):\n d = self.data[kper]\n if not indices:\n indices = list(zip(d[\"k\"], d[\"i\"], d[\"j\"]))\n else:\n new_indices = list(zip(d[\"k\"], d[\"i\"], d[\"j\"]))\n for ni in new_indices:\n if ni not in indices:\n indices.append(ni)\n return indices\n\n def attribute_by_kper(self, attr, function=np.mean, idx_val=None):\n assert attr in self.dtype.names\n if idx_val is not None:\n assert idx_val[0] in self.dtype.names\n kpers = list(self.data.keys())\n kpers.sort()\n values = []\n for kper in range(0, max(self._model.nper, max(kpers))):\n\n if kper < min(kpers):\n values.append(0)\n elif kper > max(kpers) or kper not in kpers:\n values.append(values[-1])\n else:\n kper_data = self.__data[kper]\n if idx_val is not None:\n kper_data = kper_data[\n np.where(kper_data[idx_val[0]] == idx_val[1])\n ]\n # kper_vtype = self.__vtype[kper]\n v = function(kper_data[attr])\n values.append(v)\n return values\n\n def plot(\n self,\n key=None,\n names=None,\n kper=0,\n filename_base=None,\n file_extension=None,\n mflay=None,\n **kwargs\n ):\n \"\"\"\n Plot stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n key : str\n MfList dictionary key. (default is None)\n names : list\n List of names for figure titles. (default is None)\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.stress_period_data.plot(ml.wel, kper=1)\n\n \"\"\"\n\n from flopy.plot import PlotUtilities\n\n axes = PlotUtilities._plot_mflist_helper(\n self,\n key=key,\n names=names,\n kper=kper,\n filename_base=filename_base,\n file_extension=file_extension,\n mflay=mflay,\n **kwargs\n )\n\n return axes\n\n def to_shapefile(self, filename, kper=None):\n \"\"\"\n Export stress period boundary condition (MfList) data for a specified\n stress period\n\n Parameters\n ----------\n filename : str\n Shapefile name to write\n kper : int\n MODFLOW zero-based stress period number to return. (default is None)\n\n Returns\n ----------\n None\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> ml.wel.to_shapefile('test_hk.shp', kper=1)\n \"\"\"\n import warnings\n\n warnings.warn(\n \"Deprecation warning: to_shapefile() is deprecated. use .export()\"\n )\n\n # if self.sr is None:\n # raise Exception(\"MfList.to_shapefile: SpatialReference not set\")\n # import flopy.utils.flopy_io as fio\n # if kper is None:\n # keys = self.data.keys()\n # keys.sort()\n # else:\n # keys = [kper]\n # array_dict = {}\n # for kk in keys:\n # arrays = self.to_array(kk)\n # for name, array in arrays.items():\n # for k in range(array.shape[0]):\n # #aname = name+\"{0:03d}_{1:02d}\".format(kk, k)\n # n = fio.shape_attr_name(name, length=4)\n # aname = \"{}{:03d}{:03d}\".format(n, k+1, int(kk)+1)\n # array_dict[aname] = array[k]\n # fio.write_grid_shapefile(filename, self.sr, array_dict)\n self.export(filename, kper=kper)\n\n def to_array(self, kper=0, mask=False):\n \"\"\"\n Convert stress period boundary condition (MfList) data for a\n specified stress period to a 3-D numpy array\n\n Parameters\n ----------\n kper : int\n MODFLOW zero-based stress period number to return. (default is zero)\n mask : boolean\n return array with np.NaN instead of zero\n Returns\n ----------\n out : dict of numpy.ndarrays\n Dictionary of 3-D numpy arrays containing the stress period data for\n a selected stress period. The dictionary keys are the MfList dtype\n names for the stress period data ('cond', 'flux', 'bhead', etc.).\n\n See Also\n --------\n\n Notes\n -----\n\n Examples\n --------\n >>> import flopy\n >>> ml = flopy.modflow.Modflow.load('test.nam')\n >>> v = ml.wel.stress_period_data.to_array(kper=1)\n\n \"\"\"\n i0 = 3\n unstructured = False\n if \"inode\" in self.dtype.names:\n raise NotImplementedError()\n\n if \"node\" in self.dtype.names:\n if \"i\" not in self.dtype.names and \"j\" not in self.dtype.names:\n i0 = 1\n unstructured = True\n\n arrays = {}\n for name in self.dtype.names[i0:]:\n if not self.dtype.fields[name][0] == object:\n if unstructured:\n arr = np.zeros((self._model.nlay * self._model.ncpl,))\n else:\n arr = np.zeros(\n (self._model.nlay, self._model.nrow, self._model.ncol)\n )\n arrays[name] = arr.copy()\n\n # if this kper is not found\n if kper not in self.data.keys():\n kpers = list(self.data.keys())\n kpers.sort()\n # if this kper is before the first entry,\n # (maybe) mask and return\n if kper < kpers[0]:\n if mask:\n for name, arr in arrays.items():\n arrays[name][:] = np.NaN\n return arrays\n # find the last kper\n else:\n kper = self.__find_last_kper(kper)\n\n sarr = self.data[kper]\n\n if np.isscalar(sarr):\n # if there are no entries for this kper\n if sarr == 0:\n if mask:\n for name, arr in arrays.items():\n arrays[name][:] = np.NaN\n return arrays\n else:\n raise Exception(\"MfList: something bad happened\")\n\n for name, arr in arrays.items():\n if unstructured:\n cnt = np.zeros(\n (self._model.nlay * self._model.ncpl,), dtype=np.float\n )\n else:\n cnt = np.zeros(\n (self._model.nlay, self._model.nrow, self._model.ncol),\n dtype=np.float,\n )\n # print(name,kper)\n for rec in sarr:\n if unstructured:\n arr[rec[\"node\"]] += rec[name]\n cnt[rec[\"node\"]] += 1.0\n else:\n arr[rec[\"k\"], rec[\"i\"], rec[\"j\"]] += rec[name]\n cnt[rec[\"k\"], rec[\"i\"], rec[\"j\"]] += 1.0\n # average keys that should not be added\n if name not in (\"cond\", \"flux\"):\n idx = cnt > 0.0\n arr[idx] /= cnt[idx]\n if mask:\n arr = np.ma.masked_where(cnt == 0.0, arr)\n arr[cnt == 0.0] = np.NaN\n\n arrays[name] = arr.copy()\n # elif mask:\n # for name, arr in arrays.items():\n # arrays[name][:] = np.NaN\n return arrays\n\n @property\n def masked_4D_arrays(self):\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n # initialize these big arrays\n m4ds = {}\n for name, array in arrays.items():\n m4d = np.zeros(\n (\n self._model.nper,\n self._model.nlay,\n self._model.nrow,\n self._model.ncol,\n )\n )\n m4d[0, :, :, :] = array\n m4ds[name] = m4d\n for kper in range(1, self._model.nper):\n arrays = self.to_array(kper=kper, mask=True)\n for name, array in arrays.items():\n m4ds[name][kper, :, :, :] = array\n return m4ds\n\n def masked_4D_arrays_itr(self):\n # get the first kper\n arrays = self.to_array(kper=0, mask=True)\n\n # initialize these big arrays\n for name, array in arrays.items():\n m4d = np.zeros(\n (\n self._model.nper,\n self._model.nlay,\n self._model.nrow,\n self._model.ncol,\n )\n )\n m4d[0, :, :, :] = array\n for kper in range(1, self._model.nper):\n arrays = self.to_array(kper=kper, mask=True)\n for tname, array in arrays.items():\n if tname == name:\n m4d[kper, :, :, :] = array\n yield name, m4d\n\n @property\n def array(self):\n return self.masked_4D_arrays\n\n @classmethod\n def from_4d(cls, model, pak_name, m4ds):\n \"\"\"construct an MfList instance from a dict of\n (attribute_name,masked 4D ndarray\n Parameters\n ----------\n model : mbase derived type\n pak_name : str package name (e.g GHB)\n m4ds : {attribute name:4d masked numpy.ndarray}\n Returns\n -------\n MfList instance\n \"\"\"\n sp_data = MfList.masked4D_arrays_to_stress_period_data(\n model.get_package(pak_name).get_default_dtype(), m4ds\n )\n return cls(model.get_package(pak_name), data=sp_data)\n\n @staticmethod\n def masked4D_arrays_to_stress_period_data(dtype, m4ds):\n \"\"\" convert a dictionary of 4-dim masked arrays to\n a stress_period_data style dict of recarray\n Parameters\n ----------\n dtype : numpy dtype\n\n m4ds : dict {name:masked numpy 4-dim ndarray}\n Returns\n -------\n dict {kper:recarray}\n \"\"\"\n assert isinstance(m4ds, dict)\n for name, m4d in m4ds.items():\n assert isinstance(m4d, np.ndarray)\n assert name in dtype.names\n assert m4d.ndim == 4\n keys = list(m4ds.keys())\n\n for i1, key1 in enumerate(keys):\n a1 = np.isnan(m4ds[key1])\n for i2, key2 in enumerate(keys[i1:]):\n a2 = np.isnan(m4ds[key2])\n if not np.array_equal(a1, a2):\n raise Exception(\n \"Transient2d error: masking not equal\"\n + \" for {0} and {1}\".format(key1, key2)\n )\n\n sp_data = {}\n for kper in range(m4d.shape[0]):\n vals = {}\n for name, m4d in m4ds.items():\n arr = m4d[kper, :, :, :]\n isnan = np.argwhere(~np.isnan(arr))\n v = []\n for k, i, j in isnan:\n v.append(arr[k, i, j])\n vals[name] = v\n kk = isnan[:, 0]\n ii = isnan[:, 1]\n jj = isnan[:, 2]\n\n spd = np.recarray(shape=isnan.shape[0], dtype=dtype)\n spd[\"i\"] = ii\n spd[\"k\"] = kk\n spd[\"j\"] = jj\n for n, v in vals.items():\n spd[n] = v\n sp_data[kper] = spd\n return sp_data\n"},"path":{"kind":"string","value":"flopy/utils/util_list.py"},"size":{"kind":"number","value":44483,"string":"44,483"},"nl_text":{"kind":"string","value":"a generic object for handling transient boundary condition lists\n\nParameters\n----------\npackage : package object\n The package object (of type :class:`flopy.pakbase.Package`) to which\n this MfList will be added.\ndata : varies\n the data of the transient list (optional). (the default is None)\n\nAttributes\n----------\nmxact : int\n the max number of active bc for any stress period\n\nMethods\n-------\nadd_record(kper,index,value) : None\n add a record to stress period kper at index location\nwrite_transient(f) : None\n write the transient sequence to the model input file f\ncheck_kij() : None\n checks for boundaries outside of model domain - issues warnings only\n\nSee Also\n--------\n\nNotes\n-----\n\nExamples\n--------\nappend the recarrays from one MfList to another\nParameters\n----------\n other: variable: an item that can be cast in to an MfList\n that corresponds with self\nReturns\n-------\n dict of {kper:recarray}\ndrop fields from an MfList\n\nParameters\n----------\nfields : list or set of field names to drop\n\nReturns\n-------\ndropped : MfList without the dropped fields\nReturns a C-style fmt string for numpy savetxt that corresponds to\nthe dtype\nconstruct an MfList instance from a dict of\n(attribute_name,masked 4D ndarray\nParameters\n----------\n model : mbase derived type\n pak_name : str package name (e.g GHB)\n m4ds : {attribute name:4d masked numpy.ndarray}\nReturns\n-------\n MfList instance\nCast recarrays for stress periods into single\ndataframe containing all stress periods.\n\nParameters\n----------\nsqueeze : bool\n Reduce number of columns in dataframe to only include\n stress periods where a variable changes.\n\nReturns\n-------\ndf : dataframe\n Dataframe of shape nrow = ncells, ncol = nvar x nper. If\n the squeeze option is chosen, nper is the number of\n stress periods where at least one cells is different,\n otherwise it is equal to the number of keys in MfList.data.\n\nNotes\n-----\nRequires pandas.\na helper function for plotting - get all unique indices\nconvert a dictionary of 4-dim masked arrays to\n a stress_period_data style dict of recarray\nParameters\n----------\n dtype : numpy dtype\n\n m4ds : dict {name:masked numpy 4-dim ndarray}\nReturns\n-------\n dict {kper:recarray}\nPlot stress period boundary condition (MfList) data for a specified\nstress period\n\nParameters\n----------\nkey : str\n MfList dictionary key. (default is None)\nnames : list\n List of names for figure titles. (default is None)\nkper : int\n MODFLOW zero-based stress period number to return. (default is zero)\nfilename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\nfile_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\nmflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n**kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\nReturns\n----------\nout : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n\nSee Also\n--------\n\nNotes\n-----\n\nExamples\n--------\n>>> import flopy\n>>> ml = flopy.modflow.Modflow.load('test.nam')\n>>> ml.wel.stress_period_data.plot(ml.wel, kper=1)\nConvert stress period boundary condition (MfList) data for a\nspecified stress period to a 3-D numpy array\n\nParameters\n----------\nkper : int\n MODFLOW zero-based stress period number to return. (default is zero)\nmask : boolean\n return array with np.NaN instead of zero\nReturns\n----------\nout : dict of numpy.ndarrays\n Dictionary of 3-D numpy arrays containing the stress period data for\n a selected stress period. The dictionary keys are the MfList dtype\n names for the stress period data ('cond', 'flux', 'bhead', etc.).\n\nSee Also\n--------\n\nNotes\n-----\n\nExamples\n--------\n>>> import flopy\n>>> ml = flopy.modflow.Modflow.load('test.nam')\n>>> v = ml.wel.stress_period_data.to_array(kper=1)\nExport stress period boundary condition (MfList) data for a specified\nstress period\n\nParameters\n----------\nfilename : str\n Shapefile name to write\nkper : int\n MODFLOW zero-based stress period number to return. (default is None)\n\nReturns\n----------\nNone\n\nSee Also\n--------\n\nNotes\n-----\n\nExamples\n--------\n>>> import flopy\n>>> ml = flopy.modflow.Modflow.load('test.nam')\n>>> ml.wel.to_shapefile('test_hk.shp', kper=1)\nutil_list module. Contains the mflist class.\n This classes encapsulates modflow-style list inputs away\n from the individual packages. The end-user should not need to\n instantiate this class directly.\n\n some more info\n\n because np 1.9 doesn't support indexing by list of columns Get the itmp for a given kper If an external file, have to load it If not any of the above, it must be an int mt3d list data is fixed format Use numpy's floating-point formatter (Dragon4) Private method to cast the data argument Should only be called by the constructor If data is a list, then all we can do is try to cast it to an ndarray, then cast again to a recarray warnings.warn(\"MfList casting list to array\") If data is a dict, the we have to assume it is keyed on kper Same as before, just try... warnings.warn(\"MfList: casting list to array at \" +\\ \"kper {0:d}\".format(kper)) super hack - sick of recarrays already if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1): d = d.view(np.recarray) A single recarray - same MfList for all stress periods A single ndarray A single filename If d is a string, assume it is a filename and check that it exists If d is an integer, then it must be 0 or -1 warnings.warn(\"MfList: ndarray dtype does not match self \" +\\ \"dtype, trying to cast\") make a dataframe of all data for all stress periods find relevant variable names may have to iterate over the first stress period create list of dataframes for each stress period each with index of k, i, j add an empty dataframe if a stress period is empty (e.g. no pumping during a predevelopment period) aggregate always return the first stress period Add a record to possible already set list for a given kper index is a list of k,i,j or nodes. values is a list of floats. The length of index + values must be equal to the number of names in dtype If we already have something for this kper, then add to it If a 0 or -1, reset If filename, load into recarray Extend the recarray Get the recarray for a given kper If the data entry for kper is a string, return the corresponding recarray, but don't reset the value in the data dict assert kper in list(self.data.keys()), \"MfList.__getitem__() kper \" + \\ str(kper) + \" not in data.keys()\" If data is a list, then all we can do is try to cast it to an ndarray, then cast again to a recarray warnings.warn(\"MfList casting list to array\") cast data A single ndarray A single filename raise NotImplementedError(\"MfList.__setitem__() not implemented\") d = np.fromfile(f,dtype=self.dtype,count=count) Fill missing early kpers with 0 py_filepath = '' py_filepath = os.path.join(py_filepath, self._model.external_path) forceInternal overrides isExternal (set below) for cases where external arrays are not supported (oh hello MNW1!) write the transient sequence described by the data dict Fill missing early kpers with 0 Fill late missing kpers with -1 switch file append mode to binary continue back to non-binary Write the recarray (data) to the file (or file handle) f Add one to the kij indices --make copy of data for multiple calls if this entry is valid kper_vtype = self.__vtype[kper] if self.sr is None: raise Exception(\"MfList.to_shapefile: SpatialReference not set\") import flopy.utils.flopy_io as fio if kper is None: keys = self.data.keys() keys.sort() else: keys = [kper] array_dict = {} for kk in keys: arrays = self.to_array(kk) for name, array in arrays.items(): for k in range(array.shape[0]): aname = name+\"{0:03d}_{1:02d}\".format(kk, k) n = fio.shape_attr_name(name, length=4) aname = \"{}{:03d}{:03d}\".format(n, k+1, int(kk)+1) array_dict[aname] = array[k] fio.write_grid_shapefile(filename, self.sr, array_dict) if this kper is not found if this kper is before the first entry, (maybe) mask and return find the last kper if there are no entries for this kper print(name,kper) average keys that should not be added elif mask: for name, arr in arrays.items(): arrays[name][:] = np.NaN get the first kper initialize these big arrays get the first kper initialize these big arrays"},"nl_size":{"kind":"number","value":9859,"string":"9,859"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6297732591629028,"string":"0.629773"}}},{"rowIdx":589,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\nimport requests\n\nfrom webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists\n\n\nclass RequestMixin(object):\n CODE_EXCEPTION_MSG = {\n 400: InvalidAPIRequest,\n 404: RecordNotFound,\n 409: RecordAlreadyExists,\n 422: InvalidAPIRequest,\n 500: ServerError,\n }\n\n def __init__(self):\n self.session = requests.Session()\n\n @property\n def _headers(self):\n return {\n \"Content-Type\": \"application/json\",\n }\n\n def request(self, server, method, url, json=None, params=None, timeout=60):\n try:\n response = self.session.request(\n method, url, json=json, params=params,\n timeout=timeout, headers=self._headers\n )\n except requests.exceptions.ConnectTimeout:\n raise self.CODE_EXCEPTION_MSG[500](f\"{server}服务器连接超时!\")\n except requests.exceptions.ConnectionError:\n raise self.CODE_EXCEPTION_MSG[500](f\"{server}服务器连接错误!\")\n\n try:\n response_data = response.json()\n except Exception as e:\n raise ServerError(f\"{server}服务器参数解析失败!\")\n\n if not (200 <= response.status_code < 300):\n exception = self.CODE_EXCEPTION_MSG[response.status_code] \\\n if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]\n raise exception(f\"{server} Response:{response_data.get('error').get('message')}\")\n\n return response_data\n\n\nweb_client = RequestMixin()\n"},"path":{"kind":"string","value":"services/engine/webs/core/requests/request.py"},"size":{"kind":"number","value":1624,"string":"1,624"},"nl_text":{"kind":"string","value":"-*- coding: utf-8 -*-"},"nl_size":{"kind":"number","value":21,"string":"21"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7672811150550842,"string":"0.767281"}}},{"rowIdx":590,"cells":{"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, jsonify\n\nfrom flask_service.swagger import spec\n\n__all__ = ['main_app']\n\nmain_app = Blueprint('main_app', __name__)\n\n\n@main_app.route('/api')\ndef swagger():\n \"\"\"\n Responds with the OpenAPI specification for this application.\n \"\"\"\n return jsonify(spec.to_dict())\n\n\n@main_app.route('/health')\ndef health():\n \"\"\"\n Responds with the current's service health.\n\n Could be used by the liveness probe of a Kubernetes cluster for instance.\n \"\"\"\n # put some logic here to decide if your app is doing well or not\n # by default, we'll always return everything is okay!\n return \"\"\n\n\n@main_app.route('/status')\ndef status():\n \"\"\"\n Responds with the current's service status.\n\n Could be used by the readiness probe of a Kubernetes cluster.\n \"\"\"\n # put some logic here to decide if your app is doing well or not\n # by default, we'll always return everything is okay!\n return \"\""},"path":{"kind":"string","value":"flask_service/views.py"},"size":{"kind":"number","value":968,"string":"968"},"nl_text":{"kind":"string","value":"Responds with the current's service health.\n\nCould be used by the liveness probe of a Kubernetes cluster for instance.\nResponds with the current's service status.\n\nCould be used by the readiness probe of a Kubernetes cluster.\nResponds with the OpenAPI specification for this application.\n\n -*- coding: utf-8 -*- put some logic here to decide if your app is doing well or not by default, we'll always return everything is okay! put some logic here to decide if your app is doing well or not by default, we'll always return everything is okay!"},"nl_size":{"kind":"number","value":541,"string":"541"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9404661655426025,"string":"0.940466"}}},{"rowIdx":591,"cells":{"content":{"kind":"string","value":"\"\"\"Principal Component Analysis Base Classes\"\"\"\n\n# Author: Alexandre Gramfort \n# Olivier Grisel \n# Mathieu Blondel \n# Denis A. Engemann \n# Kyle Kastner \n#\n# License: BSD 3 clause\n\nimport numpy as np\nfrom scipy import linalg\n\nfrom ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin\nfrom ..utils.validation import check_is_fitted\nfrom abc import ABCMeta, abstractmethod\n\n\nclass _BasePCA(\n _ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta\n):\n \"\"\"Base class for PCA methods.\n\n Warning: This class should not be used directly.\n Use derived classes instead.\n \"\"\"\n\n def get_covariance(self):\n \"\"\"Compute data covariance with the generative model.\n\n ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\n where S**2 contains the explained variances, and sigma2 contains the\n noise variances.\n\n Returns\n -------\n cov : array of shape=(n_features, n_features)\n Estimated covariance of data.\n \"\"\"\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n cov = np.dot(components_.T * exp_var_diff, components_)\n cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace\n return cov\n\n def get_precision(self):\n \"\"\"Compute data precision matrix with the generative model.\n\n Equals the inverse of the covariance but computed with\n the matrix inversion lemma for efficiency.\n\n Returns\n -------\n precision : array, shape=(n_features, n_features)\n Estimated precision of data.\n \"\"\"\n n_features = self.components_.shape[1]\n\n # handle corner cases first\n if self.n_components_ == 0:\n return np.eye(n_features) / self.noise_variance_\n if self.n_components_ == n_features:\n return linalg.inv(self.get_covariance())\n\n # Get precision using matrix inversion lemma\n components_ = self.components_\n exp_var = self.explained_variance_\n if self.whiten:\n components_ = components_ * np.sqrt(exp_var[:, np.newaxis])\n exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)\n precision = np.dot(components_, components_.T) / self.noise_variance_\n precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff\n precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))\n precision /= -(self.noise_variance_ ** 2)\n precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_\n return precision\n\n @abstractmethod\n def fit(self, X, y=None):\n \"\"\"Placeholder for fit. Subclasses should implement this method!\n\n Fit the model with X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n self : object\n Returns the instance itself.\n \"\"\"\n\n def transform(self, X):\n \"\"\"Apply dimensionality reduction to X.\n\n X is projected on the first principal components previously extracted\n from a training set.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Returns\n -------\n X_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\n \"\"\"\n check_is_fitted(self)\n\n X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)\n if self.mean_ is not None:\n X = X - self.mean_\n X_transformed = np.dot(X, self.components_.T)\n if self.whiten:\n X_transformed /= np.sqrt(self.explained_variance_)\n return X_transformed\n\n def inverse_transform(self, X):\n \"\"\"Transform data back to its original space.\n\n In other words, return an input `X_original` whose transform would be X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\n Returns\n -------\n X_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n Notes\n -----\n If whitening is enabled, inverse_transform will compute the\n exact inverse operation, which includes reversing whitening.\n \"\"\"\n if self.whiten:\n return (\n np.dot(\n X,\n np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,\n )\n + self.mean_\n )\n else:\n return np.dot(X, self.components_) + self.mean_\n\n @property\n def _n_features_out(self):\n \"\"\"Number of transformed output features.\"\"\"\n return self.components_.shape[0]\n"},"path":{"kind":"string","value":"sklearn/decomposition/_base.py"},"size":{"kind":"number","value":5716,"string":"5,716"},"nl_text":{"kind":"string","value":"Base class for PCA methods.\n\nWarning: This class should not be used directly.\nUse derived classes instead.\nNumber of transformed output features.\nPlaceholder for fit. Subclasses should implement this method!\n\nFit the model with X.\n\nParameters\n----------\nX : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\nReturns\n-------\nself : object\n Returns the instance itself.\nCompute data covariance with the generative model.\n\n``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``\nwhere S**2 contains the explained variances, and sigma2 contains the\nnoise variances.\n\nReturns\n-------\ncov : array of shape=(n_features, n_features)\n Estimated covariance of data.\nCompute data precision matrix with the generative model.\n\nEquals the inverse of the covariance but computed with\nthe matrix inversion lemma for efficiency.\n\nReturns\n-------\nprecision : array, shape=(n_features, n_features)\n Estimated precision of data.\nTransform data back to its original space.\n\nIn other words, return an input `X_original` whose transform would be X.\n\nParameters\n----------\nX : array-like of shape (n_samples, n_components)\n New data, where `n_samples` is the number of samples\n and `n_components` is the number of components.\n\nReturns\n-------\nX_original array-like of shape (n_samples, n_features)\n Original data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\nNotes\n-----\nIf whitening is enabled, inverse_transform will compute the\nexact inverse operation, which includes reversing whitening.\nApply dimensionality reduction to X.\n\nX is projected on the first principal components previously extracted\nfrom a training set.\n\nParameters\n----------\nX : array-like of shape (n_samples, n_features)\n New data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\nReturns\n-------\nX_new : array-like of shape (n_samples, n_components)\n Projection of X in the first principal components, where `n_samples`\n is the number of samples and `n_components` is the number of the components.\nPrincipal Component Analysis Base Classes\n\n Author: Alexandre Gramfort Olivier Grisel Mathieu Blondel Denis A. Engemann Kyle Kastner License: BSD 3 clause modify diag inplace handle corner cases first Get precision using matrix inversion lemma"},"nl_size":{"kind":"number","value":2599,"string":"2,599"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6983745694160461,"string":"0.698375"}}},{"rowIdx":592,"cells":{"content":{"kind":"string","value":"\"\"\"\nenCount tasks and analyses.\n\nenCount is a Python library for processing RNA-Seq data from ENCODE.\n\n\"\"\"\n\n\n# from ._version import __version__\nfrom . import config # load from myconfig.py if it exists\n\nfrom . import db\nfrom . import queues\nfrom . import encode\nfrom . import externals\n\nfrom . import gtfs\nfrom . import fastqs\nfrom . import experiments\nfrom . import mappings\nfrom . import integration"},"path":{"kind":"string","value":"enCount/__init__.py"},"size":{"kind":"number","value":403,"string":"403"},"nl_text":{"kind":"string","value":"enCount tasks and analyses.\n\nenCount is a Python library for processing RNA-Seq data from ENCODE.\n\n from ._version import __version__ load from myconfig.py if it exists"},"nl_size":{"kind":"number","value":168,"string":"168"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.6367589235305786,"string":"0.636759"}}},{"rowIdx":593,"cells":{"content":{"kind":"string","value":"from asgiref.sync import sync_to_async\nfrom channels.layers import get_channel_layer\nfrom ....models import Participant\nimport humps\n\nchannel_layer = get_channel_layer()\n\ndef get_participant(room_channel_name, channel_name):\n participant = Participant.objects.get(\n channel_room__channel_name=room_channel_name,\n channel_name=channel_name\n )\n return participant\n\ndef get_participant_id(participant):\n return participant.id\n\n\nasync def broadcast_avatar_position(room_channel_name, channel_name, json_data):\n \"\"\"\n Sends the new avatar's position to the users of the room.\n \"\"\"\n\n type = json_data['type']\n payload = json_data['payload']\n position = payload[\"position\"]\n animate = payload[\"animate\"]\n\n # receive the participant that sent this message\n participant = await sync_to_async(get_participant)(room_channel_name, channel_name)\n participant_id = await sync_to_async(get_participant_id)(participant)\n\n # if this was for an avatar, then set participant's position to the payload data\n def set_participant_position():\n participant.x = position[\"x\"]\n participant.y = position[\"y\"]\n participant.direction_x = position[\"directionX\"]\n participant.save()\n await sync_to_async(set_participant_position)()\n\n await channel_layer.group_send(\n room_channel_name,\n {\n 'type': type,\n 'payload': {\n \"participant_id\": participant_id,\n \"position\": position,\n \"animate\": animate,\n }\n }\n )\n\nasync def broadcast_avatar_state(room_channel_name, channel_name, json_data):\n \"\"\"\n Sends the new avatar's state to the users of the room.\n \"\"\"\n\n type = json_data['type']\n payload = json_data['payload']\n state = payload['value']\n\n # receive the participant that sent this message\n participant = await sync_to_async(get_participant)(room_channel_name, channel_name)\n participant_id = await sync_to_async(get_participant_id)(participant)\n \n await channel_layer.group_send(\n room_channel_name,\n {\n 'type': humps.decamelize(type),\n 'payload': {\n \"participant_id\": participant_id,\n \"state\": state\n }\n }\n )"},"path":{"kind":"string","value":"server/websockets/consumers/world/broadcasts/avatar.py"},"size":{"kind":"number","value":2293,"string":"2,293"},"nl_text":{"kind":"string","value":"receive the participant that sent this message if this was for an avatar, then set participant's position to the payload data receive the participant that sent this message"},"nl_size":{"kind":"number","value":172,"string":"172"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.9102018475532532,"string":"0.910202"}}},{"rowIdx":594,"cells":{"content":{"kind":"string","value":"\"\"\"Plot graphs from human-readable file formats.\"\"\"\n"},"path":{"kind":"string","value":"uniplot/__init__.py"},"size":{"kind":"number","value":52,"string":"52"},"nl_text":{"kind":"string","value":"Plot graphs from human-readable file formats."},"nl_size":{"kind":"number","value":45,"string":"45"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.783492386341095,"string":"0.783492"}}},{"rowIdx":595,"cells":{"content":{"kind":"string","value":"# https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem\n\n\ndef height(root):\n \"\"\"\n DFS\n\n v = Vertices\n e = Edges\n d = Depth\n\n Time complexity: O(v + e)\n Space complexity: O(d)\n \"\"\"\n if root:\n return 1 + max(height(root.left), height(root.right))\n else:\n return -1\n"},"path":{"kind":"string","value":"HackerRank/Data Structures/Trees/height-of-a-binary-tree.py"},"size":{"kind":"number","value":331,"string":"331"},"nl_text":{"kind":"string","value":"DFS\n\nv = Vertices\ne = Edges\nd = Depth\n\nTime complexity: O(v + e)\nSpace complexity: O(d)\n\n https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem"},"nl_size":{"kind":"number","value":167,"string":"167"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7225719094276428,"string":"0.722572"}}},{"rowIdx":596,"cells":{"content":{"kind":"string","value":"from dbt.clients.system import load_file_contents\nfrom dbt.contracts.files import (\n FilePath, ParseFileType, SourceFile, FileHash, AnySourceFile, SchemaSourceFile\n)\n\nfrom dbt.parser.schemas import yaml_from_file\nfrom dbt.parser.search import FilesystemSearcher\n\n\n# This loads the files contents and creates the SourceFile object\ndef load_source_file(\n path: FilePath, parse_file_type: ParseFileType,\n project_name: str) -> AnySourceFile:\n file_contents = load_file_contents(path.absolute_path, strip=False)\n checksum = FileHash.from_contents(file_contents)\n sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile\n source_file = sf_cls(path=path, checksum=checksum,\n parse_file_type=parse_file_type, project_name=project_name)\n source_file.contents = file_contents.strip()\n if parse_file_type == ParseFileType.Schema:\n source_file.dfy = yaml_from_file(source_file)\n return source_file\n\n\n# Special processing for big seed files\ndef load_seed_source_file(match: FilePath, project_name) -> SourceFile:\n if match.seed_too_large():\n # We don't want to calculate a hash of this file. Use the path.\n source_file = SourceFile.big_seed(match)\n else:\n file_contents = load_file_contents(match.absolute_path, strip=False)\n checksum = FileHash.from_contents(file_contents)\n source_file = SourceFile(path=match, checksum=checksum)\n source_file.contents = ''\n source_file.parse_file_type = ParseFileType.Seed\n source_file.project_name = project_name\n return source_file\n\n\n# Use the FilesystemSearcher to get a bunch of FilePaths, then turn\n# them into a bunch of FileSource objects\ndef get_source_files(project, paths, extension, parse_file_type):\n # file path list\n fp_list = list(FilesystemSearcher(\n project, paths, extension\n ))\n # file block list\n fb_list = []\n for fp in fp_list:\n if parse_file_type == ParseFileType.Seed:\n fb_list.append(load_seed_source_file(fp, project.project_name))\n else:\n fb_list.append(load_source_file(\n fp, parse_file_type, project.project_name))\n return fb_list\n\n\ndef read_files_for_parser(project, files, dirs, extension, parse_ft):\n parser_files = []\n source_files = get_source_files(\n project, dirs, extension, parse_ft\n )\n for sf in source_files:\n files[sf.file_id] = sf\n parser_files.append(sf.file_id)\n return parser_files\n\n\n# This needs to read files for multiple projects, so the 'files'\n# dictionary needs to be passed in. What determines the order of\n# the various projects? Is the root project always last? Do the\n# non-root projects need to be done separately in order?\ndef read_files(project, files, parser_files):\n\n project_files = {}\n\n project_files['MacroParser'] = read_files_for_parser(\n project, files, project.macro_paths, '.sql', ParseFileType.Macro,\n )\n\n project_files['ModelParser'] = read_files_for_parser(\n project, files, project.source_paths, '.sql', ParseFileType.Model,\n )\n\n project_files['SnapshotParser'] = read_files_for_parser(\n project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot,\n )\n\n project_files['AnalysisParser'] = read_files_for_parser(\n project, files, project.analysis_paths, '.sql', ParseFileType.Analysis,\n )\n\n project_files['DataTestParser'] = read_files_for_parser(\n project, files, project.test_paths, '.sql', ParseFileType.Test,\n )\n\n project_files['SeedParser'] = read_files_for_parser(\n project, files, project.data_paths, '.csv', ParseFileType.Seed,\n )\n\n project_files['DocumentationParser'] = read_files_for_parser(\n project, files, project.docs_paths, '.md', ParseFileType.Documentation,\n )\n\n project_files['SchemaParser'] = read_files_for_parser(\n project, files, project.all_source_paths, '.yml', ParseFileType.Schema,\n )\n\n # Also read .yaml files for schema files. Might be better to change\n # 'read_files_for_parser' accept an array in the future.\n yaml_files = read_files_for_parser(\n project, files, project.all_source_paths, '.yaml', ParseFileType.Schema,\n )\n project_files['SchemaParser'].extend(yaml_files)\n\n # Store the parser files for this particular project\n parser_files[project.project_name] = project_files\n"},"path":{"kind":"string","value":"core/dbt/parser/read_files.py"},"size":{"kind":"number","value":4420,"string":"4,420"},"nl_text":{"kind":"string","value":"This loads the files contents and creates the SourceFile object Special processing for big seed files We don't want to calculate a hash of this file. Use the path. Use the FilesystemSearcher to get a bunch of FilePaths, then turn them into a bunch of FileSource objects file path list file block list This needs to read files for multiple projects, so the 'files' dictionary needs to be passed in. What determines the order of the various projects? Is the root project always last? Do the non-root projects need to be done separately in order? Also read .yaml files for schema files. Might be better to change 'read_files_for_parser' accept an array in the future. Store the parser files for this particular project"},"nl_size":{"kind":"number","value":715,"string":"715"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8628302812576294,"string":"0.86283"}}},{"rowIdx":597,"cells":{"content":{"kind":"string","value":"from typing import List\n\n'''\n1. subproblems: dp(amount) the minimum number of coins needed to make changes for amount of S using the given coin denomination\n2. guessing: all the available denomination c_i\n3. relate subproblems: dp(amount) = min(dp(amount - c_i) + 1) for all possible c_i\n\nTime complexity: O(#subproblems * #coins)\n'''\nclass Solution:\n\n # top down solution\n def coinChange(self, coins: List[int], amount: int) -> int:\n\n # for amount less than 1, return 0\n if amount < 1:\n return 0\n \n memo = {}\n def helper(coins, amount):\n \n # for subproblems that we have alreay solve and memorized\n if amount in memo:\n return memo[amount]\n \n # base case, we reach out the bottom of the tree.\n if amount == 0:\n return 0\n\n # go through all possible coin denomination(breaches in tree)\n dp = float('inf')\n for coin in coins:\n\n if coin > amount:\n continue\n \n # relate subproblems\n dp = min(helper(coins, amount - coin) + 1, dp)\n \n memo[amount] = dp\n return dp\n\n helper(coins, amount)\n return -1 if memo[amount] == float('inf') else memo[amount]\n\n\n # bottom-up solution, DAG\n def coinChange_2(self, coins: List[int], amount: int) -> int:\n\n memo = [float('inf') for i in range(amount + 1)]\n\n # dp[i] = min{dp[i - c_i] + 1} for all c_i\n memo[0] = 0\n for i in range(amount + 1):\n \n # check all the states that are reachable by coins to state i\n for coin in coins:\n if i < coin: \n continue\n\n memo[i] = min(memo[i], memo[i - coin] + 1)\n \n print(memo)\n return -1 if memo[amount] == float('inf') else memo[amount]\n \n\n \n\n\n\nx = Solution()\n# rs = x.coinChange([1, 2, 5], 2)\nprint(x.coinChange_2([1,2,5], 11))"},"path":{"kind":"string","value":"solution/322. coin-change.py"},"size":{"kind":"number","value":2052,"string":"2,052"},"nl_text":{"kind":"string","value":"top down solution for amount less than 1, return 0 for subproblems that we have alreay solve and memorized base case, we reach out the bottom of the tree. go through all possible coin denomination(breaches in tree) relate subproblems bottom-up solution, DAG dp[i] = min{dp[i - c_i] + 1} for all c_i check all the states that are reachable by coins to state i rs = x.coinChange([1, 2, 5], 2)"},"nl_size":{"kind":"number","value":390,"string":"390"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.8649304509162903,"string":"0.86493"}}},{"rowIdx":598,"cells":{"content":{"kind":"string","value":"import SimpleITK as sitk\nimport numpy as np\nimport torch\nimport math\nimport time\nimport sys\nimport cv2\n\nfrom scipy.ndimage.interpolation import zoom\nfrom torch.autograd import Variable\nsys.path.append('../lung_nodule_detector')\nfrom training.layers import nms\n\ndef load_itk_image(filename):\n with open(filename) as f:\n contents = f.readlines()\n line = [k for k in contents if k.startswith('TransformMatrix')][0]\n transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')\n transformM = np.round(transformM)\n if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])):\n isflip = True\n else:\n isflip = False\n\n itkimage = sitk.ReadImage(filename)\n numpyImage = sitk.GetArrayFromImage(itkimage)\n\n numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))\n numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))\n\n return numpyImage, numpyOrigin, numpySpacing, isflip\n\ndef lumTrans(img):\n lungwin = np.array([-1200.,600.])\n newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0])\n newimg[newimg<0]=0\n newimg[newimg>1]=1\n newimg = (newimg*255).astype('uint8')\n return newimg\n\ndef resample(imgs, spacing, new_spacing, progressBar, order=2):\n print (len(imgs.shape))\n if len(imgs.shape)==3:\n new_shape = np.round(imgs.shape * spacing / new_spacing)\n true_spacing = spacing * imgs.shape / new_shape\n resize_factor = new_shape / imgs.shape\n imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)\n progressBar.setValue(40)\n return imgs, true_spacing\n elif len(imgs.shape)==4:\n n = imgs.shape[-1]\n newimg = []\n for i in range(n):\n slice = imgs[:,:,:,i]\n newslice,true_spacing = resample(slice,spacing,new_spacing)\n newimg.append(newslice)\n newimg=np.transpose(np.array(newimg),[1,2,3,0])\n return newimg,true_spacing\n else:\n raise ValueError('wrong shape')\n\ndef resample_v1(imgs, spacing, new_spacing, order=2):\n print (len(imgs.shape))\n if len(imgs.shape)==3:\n new_shape = np.round(imgs.shape * spacing / new_spacing)\n true_spacing = spacing * imgs.shape / new_shape\n resize_factor = new_shape / imgs.shape\n imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)\n return imgs, true_spacing\n elif len(imgs.shape)==4:\n n = imgs.shape[-1]\n newimg = []\n for i in range(n):\n slice = imgs[:,:,:,i]\n newslice,true_spacing = resample(slice,spacing,new_spacing)\n newimg.append(newslice)\n newimg=np.transpose(np.array(newimg),[1,2,3,0])\n return newimg,true_spacing\n else:\n raise ValueError('wrong shape')\n\ndef split_data(data, stride, split_comber):\n print (data.shape[1:])\n nz, nh, nw = data.shape[1:]\n pz = int(np.ceil(float(nz) / stride)) * stride\n ph = int(np.ceil(float(nh) / stride)) * stride\n pw = int(np.ceil(float(nw) / stride)) * stride\n data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0)\n\n xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride),\n np.linspace(-0.5, 0.5, data.shape[2] / stride),\n np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij')\n coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')\n\n data, nzhw = split_comber.split(data)\n coord2, nzhw2 = split_comber.split(coord,\n side_len=split_comber.side_len / stride,\n max_stride=split_comber.max_stride / stride,\n margin=split_comber.margin / stride)\n assert np.all(nzhw == nzhw2)\n data = (data.astype(np.float32) - 128) / 128\n\n return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw)\n\ndef convert_prob(pbb):\n\n for label in pbb:\n pos_ori = label[1:4]\n radious_ori = label[4]\n #pos_ori = pos_ori + extendbox[:, 0]\n\n label[1:4] = pos_ori\n label[4] = radious_ori\n label[0] = sigmoid(label[0])\n return pbb\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\ndef predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar):\n\n net.eval()\n\n total_label = 0\n total_candi = 0\n\n splitlist = list(range(0, len(data) + 1, n_per_run))\n\n if splitlist[-1] != len(data):\n splitlist.append(len(data))\n outputlist = []\n\n for i in range(len(splitlist) - 1):\n with torch.no_grad():\n inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()\n inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()\n output = net(inputdata, inputcoord)\n outputlist.append(output.data.cpu().numpy())\n progressBar.setValue(10 + (80/len(splitlist) * (i+1)))\n output = np.concatenate(outputlist, 0)\n output = split_comber.combine(output, nzhw=nzhw)\n\n # fps 1.215909091, sens 0.933333333, thres 0.371853054\n thresh = 0.371853054\n pbb, mask = get_pbb(output, thresh, ismask=True)\n\n pbb = pbb[pbb[:, 0].argsort()[::-1]]\n pbb_cand_list = []\n # check overlap under 3mm\n for cand in pbb:\n is_overlap = False\n for appended in pbb_cand_list:\n minimum_dist = 3\n dist = math.sqrt(\n math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(\n appended[3] - cand[3], 2))\n if (dist < minimum_dist):\n is_overlap = True\n break;\n\n if not is_overlap:\n pbb_cand_list.append(cand)\n\n pbb_cand_list = np.array(pbb_cand_list)\n pbb_cand_list_nms = nms(pbb_cand_list, 0.3)\n\n # print (name)\n # print (lbb)\n world_pbb = convert_prob(pbb_cand_list_nms)\n # print (world_pbb)\n print(\"label\", len(lbb))\n print(\"z_pos y_pos x_pos size\")\n for i in range(len(lbb)):\n for j in range(len(lbb[i])):\n print(round(lbb[i][j], 2), end='\\t')\n print()\n print(\"candidate\", len(world_pbb))\n print(\"prob z_pos y_pos x_pos size\")\n for i in range(len(world_pbb)):\n for j in range(len(world_pbb[i])):\n print(round(world_pbb[i][j], 2), end='\\t')\n print()\n total_label += len(lbb)\n total_candi += len(world_pbb)\n\n return lbb, world_pbb\n\n\ndef predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb):\n\n net.eval()\n\n total_label = 0\n total_candi = 0\n\n splitlist = list(range(0, len(data) + 1, n_per_run))\n\n if splitlist[-1] != len(data):\n splitlist.append(len(data))\n outputlist = []\n\n for i in range(len(splitlist) - 1):\n with torch.no_grad():\n inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()\n inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()\n output = net(inputdata, inputcoord)\n outputlist.append(output.data.cpu().numpy())\n output = np.concatenate(outputlist, 0)\n output = split_comber.combine(output, nzhw=nzhw)\n\n # fps 1.215909091, sens 0.933333333, thres 0.371853054\n thresh = 0.371853054\n pbb, mask = get_pbb(output, thresh, ismask=True)\n\n pbb = pbb[pbb[:, 0].argsort()[::-1]]\n pbb_cand_list = []\n # check overlap under 3mm\n for cand in pbb:\n is_overlap = False\n for appended in pbb_cand_list:\n minimum_dist = 3\n dist = math.sqrt(\n math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(\n appended[3] - cand[3], 2))\n if (dist < minimum_dist):\n is_overlap = True\n break;\n\n if not is_overlap:\n pbb_cand_list.append(cand)\n\n pbb_cand_list = np.array(pbb_cand_list)\n pbb_cand_list_nms = nms(pbb_cand_list, 0.3)\n\n # print (name)\n # print (lbb)\n world_pbb = convert_prob(pbb_cand_list_nms)\n # print (world_pbb)\n print(\"label\", len(lbb))\n print(\"z_pos y_pos x_pos size\")\n for i in range(len(lbb)):\n for j in range(len(lbb[i])):\n print(round(lbb[i][j], 2), end='\\t')\n print()\n print(\"candidate\", len(world_pbb))\n print(\"prob z_pos y_pos x_pos size\")\n for i in range(len(world_pbb)):\n for j in range(len(world_pbb[i])):\n print(round(world_pbb[i][j], 2), end='\\t')\n print()\n total_label += len(lbb)\n total_candi += len(world_pbb)\n\n return lbb, world_pbb\n\ndef draw_nodule_rect(lbb, world_pbb, img_arr):\n for i in range(len(lbb)):\n label = lbb[i]\n # label = np.ceil(label)\n r = (label[3] / 2) * 1.3\n top_left = (max(int(math.ceil(label[2] - r)), 0),\n max(int(math.ceil(label[1] - r)), 0))\n bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]),\n min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2]))\n z_range = [max(int(math.ceil(label[0] - r)), 0),\n min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])]\n for j in range(z_range[0], z_range[1]):\n cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1)\n\n for i in range(len(world_pbb)):\n candidate = world_pbb[i]\n r = (candidate[4] / 2) * 1.3\n\n top_left = (max(int(math.ceil(candidate[3] - r)), 0),\n max(int(math.ceil(candidate[2] - r)), 0))\n text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0),\n max(int(math.ceil(candidate[2] - r)) - 1, 0))\n bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]),\n min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2]))\n z_range = [max(int(math.ceil(candidate[1] - r)), 0),\n min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])]\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n for j in range(z_range[0], z_range[1]):\n cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1)\n #cv2.putText(img_arr[j], \"c\" + str(i) + \"_\" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)\n cv2.putText(img_arr[j], \"c\" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)\n\n\ndef crop_all(target, img_arr, crop_size = 48):\n target = np.copy(target)\n\n start = []\n for i in range(3):\n start.append(int(round(target[i])) - int(crop_size / 2))\n\n pad = []\n pad.append([0, 0])\n for i in range(3):\n leftpad = max(0, -start[i])\n rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1])\n pad.append([leftpad, rightpad])\n crop = img_arr[:,\n max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]),\n max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]),\n max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])]\n\n crop = np.pad(crop, pad, 'constant', constant_values=0)\n\n for i in range(3):\n target[i] = target[i] - start[i]\n\n return crop, target\n\ndef crop_nodule_arr_2ch(target, img_arr, crop_size = 48):\n\n img_size = [crop_size, crop_size, crop_size]\n crop_img, target = crop_all(target, img_arr, crop_size)\n imgs = np.squeeze(crop_img, axis=0)\n\n z = int(target[0])\n y = int(target[1])\n x = int(target[2])\n print (z, y, x)\n # z = 24\n # y = 24\n # x = 24\n\n nodule_size = int(target[3])\n margin = max(7, nodule_size * 0.4)\n radius = int((nodule_size + margin) / 2)\n\n s_z_pad = 0\n e_z_pad = 0\n s_y_pad = 0\n e_y_pad = 0\n s_x_pad = 0\n e_x_pad = 0\n\n s_z = max(0, z - radius)\n if (s_z == 0):\n s_z_pad = -(z - radius)\n\n e_z = min(np.shape(imgs)[0], z + radius)\n if (e_z == np.shape(imgs)[0]):\n e_z_pad = (z + radius) - np.shape(imgs)[0]\n\n s_y = max(0, y - radius)\n if (s_y == 0):\n s_y_pad = -(y - radius)\n\n e_y = min(np.shape(imgs)[1], y + radius)\n if (e_y == np.shape(imgs)[1]):\n e_y_pad = (y + radius) - np.shape(imgs)[1]\n\n s_x = max(0, x - radius)\n if (s_x == 0):\n s_x_pad = -(x - radius)\n\n e_x = min(np.shape(imgs)[2], x + radius)\n if (e_x == np.shape(imgs)[2]):\n e_x_pad = (x + radius) - np.shape(imgs)[2]\n\n # print (s_x, e_x, s_y, e_y, s_z, e_z)\n # print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))\n nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x]\n nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant',\n constant_values=0)\n\n imgpad_size = [img_size[0] - np.shape(nodule_img)[0],\n img_size[1] - np.shape(nodule_img)[1],\n img_size[2] - np.shape(nodule_img)[2]]\n imgpad = []\n imgpad_left = [int(imgpad_size[0] / 2),\n int(imgpad_size[1] / 2),\n int(imgpad_size[2] / 2)]\n imgpad_right = [int(imgpad_size[0] / 2),\n int(imgpad_size[1] / 2),\n int(imgpad_size[2] / 2)]\n\n for i in range(3):\n if (imgpad_size[i] % 2 != 0):\n\n rand = np.random.randint(2)\n if rand == 0:\n imgpad.append([imgpad_left[i], imgpad_right[i] + 1])\n else:\n imgpad.append([imgpad_left[i] + 1, imgpad_right[i]])\n else:\n imgpad.append([imgpad_left[i], imgpad_right[i]])\n\n padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0)\n\n padding_crop = np.expand_dims(padding_crop, axis=0)\n\n crop = np.concatenate((padding_crop, crop_img))\n crop = (crop.astype(np.float32) - 128) / 128\n\n return torch.from_numpy(crop), crop\n\ndef predict_attribute(attribute_net, crop_img):\n attribute_net.eval()\n with torch.no_grad():\n crop_img = Variable(crop_img.cuda(async=True))\n output = attribute_net(crop_img)\n return output\n"},"path":{"kind":"string","value":"UI_util.py"},"size":{"kind":"number","value":14065,"string":"14,065"},"nl_text":{"kind":"string","value":"pos_ori = pos_ori + extendbox[:, 0] fps 1.215909091, sens 0.933333333, thres 0.371853054 check overlap under 3mm print (name) print (lbb) print (world_pbb) fps 1.215909091, sens 0.933333333, thres 0.371853054 check overlap under 3mm print (name) print (lbb) print (world_pbb) label = np.ceil(label)cv2.putText(img_arr[j], \"c\" + str(i) + \"_\" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA) z = 24 y = 24 x = 24 print (s_x, e_x, s_y, e_y, s_z, e_z) print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))"},"nl_size":{"kind":"number","value":531,"string":"531"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.42847782373428345,"string":"0.428478"}}},{"rowIdx":599,"cells":{"content":{"kind":"string","value":"# Copyright (c) 2021 Sen Wu. All Rights Reserved.\n\n\n\"\"\"Helper function to set random seed for reproducibility of models.\"\"\"\n\nimport logging\nimport random\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nlogger = logging.getLogger(__name__)\n\n\ndef set_random_seed(seed: Optional[int] = None) -> None:\n \"\"\"Set random seed for random, numpy, and pytorch.\n\n Args:\n seed: The random seed, defaults to `None` which select it randomly.\n \"\"\"\n max_value = np.iinfo(np.uint32).max\n min_value = np.iinfo(np.uint32).min\n\n try:\n seed = int(seed)\n logger.info(f\"Set random seed to {seed}.\")\n except (TypeError, ValueError):\n seed = random.randint(min_value, max_value)\n logger.info(f\"No random seed specified, randomly set random seed to {seed}.\")\n\n if not (min_value <= seed <= max_value):\n new_seed = random.randint(min_value, max_value)\n logger.info(\n f\"Random seed {seed} is not valid, randomly set random seed to {new_seed}.\"\n )\n seed = new_seed\n\n # Set random seed for random\n random.seed(seed)\n # Set random seed for all numpy operations\n np.random.seed(seed=seed)\n # Set random seed for PyTorch\n torch.manual_seed(seed)\n"},"path":{"kind":"string","value":"src/emmental/utils/seed.py"},"size":{"kind":"number","value":1240,"string":"1,240"},"nl_text":{"kind":"string","value":"Set random seed for random, numpy, and pytorch.\n\nArgs:\n seed: The random seed, defaults to `None` which select it randomly.\nHelper function to set random seed for reproducibility of models.\n\n Copyright (c) 2021 Sen Wu. All Rights Reserved. Set random seed for random Set random seed for all numpy operations Set random seed for PyTorch"},"nl_size":{"kind":"number","value":336,"string":"336"},"nl_language":{"kind":"string","value":"en"},"nl_language_score":{"kind":"number","value":0.7250106930732727,"string":"0.725011"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":5,"numItemsPerPage":100,"numTotalItems":8000,"offset":500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODA5NDk4MSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9sYW5ndWFnZV9pZF9iaWdjb2RlIiwiZXhwIjoxNzU4MDk4NTgxLCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.oTDolUqscOPgrHhszMpmZiPEk8nbBezUrITGG5BEP8U5zV4_kvH2xbmwv2r2AYnZ87PvKCR2i-QJJcA3bK1DAQ","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
content
stringlengths
27
928k
path
stringlengths
4
230
size
int64
27
928k
nl_text
stringlengths
21
396k
nl_size
int64
21
396k
nl_language
stringlengths
2
3
nl_language_score
float64
0.04
1
from common import IssueProcess, Common from typing import Any, List import os # assignee dict which will be assigned to handle issues _GO_OWNER = {'ArcturusZhang'} # 'github assignee': 'token' _ASSIGNEE_TOKEN_GO = {'ArcturusZhang': os.getenv('AZURESDK_BOT_TOKEN')} class IssueProcessGo(IssueProcess): pass class Go(Common): def __init__(self, issues, assignee_token, language_owner): super(Go, self).__init__(issues, assignee_token, language_owner) self.file_out_name = 'release_go_status.md' def go_process(issues: List[Any]): instance = Go(issues, _ASSIGNEE_TOKEN_GO, _GO_OWNER) instance.run()
scripts/release_helper/go.py
637
assignee dict which will be assigned to handle issues 'github assignee': 'token'
80
en
0.769692
# Generated by Django 3.2.3 on 2021-05-19 08:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0002_auto_20210519_0849'), ] operations = [ migrations.AlterField( model_name='profile', name='year', field=models.CharField(blank=True, choices=[('FR', 'Freshman'), ('SO', 'Sophomore'), ('JR', 'Junior'), ('SR', 'Senior')], default='FR', max_length=2, verbose_name='year'), ), ]
api/migrations/0003_alter_profile_year.py
514
Generated by Django 3.2.3 on 2021-05-19 08:50
45
en
0.782912
# -*- coding: utf-8 -*- import io import sys import textwrap from itertools import chain from pprint import pprint import pytest import canmatrix.canmatrix import canmatrix.formats.sym def test_colliding_mux_values(): f = io.BytesIO( textwrap.dedent( '''\ FormatVersion=5.0 // Do not edit this line! Title="a file" {SEND} [MuxedId] ID=0h Mux=TheMux 0,1 0h Var=Signal unsigned 1,1 [MuxedId] Mux=FirstMux 0,1 1h Var=Signal unsigned 1,1 [MuxedId] Mux=SecondMux 0,1 1h Var=Signal unsigned 1,1 ''', ).encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) error, = matrix.load_errors line_number = 16 assert len(matrix.load_errors) == 1 assert isinstance(error, canmatrix.formats.sym.DuplicateMuxIdError) assert error.line_number == line_number error_string = str(error) assert error_string.startswith( 'line {line_number}: '.format(line_number=line_number), ) assert 'FirstMux' in error_string assert 'SecondMux' in error_string def test_parse_longname_with_colon(): f = io.BytesIO( textwrap.dedent( '''\ FormatVersion=5.0 // Do not edit this line! Title="a file" {SEND} [pass] DLC=8 Var=Password unsigned 16,16 /ln:"Access Level : Password" ''', ).encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) frame = matrix.frames[0] signal = frame.signals[0] assert signal.attributes['LongName'] == 'Access Level : Password' @pytest.mark.parametrize( 'is_float, value, expected', ( (False, '37', '37'), (True, '37.1', '37.1'), ), ) def test_export_default_decimal_places(is_float, value, expected): matrix = canmatrix.canmatrix.CanMatrix() frame = canmatrix.canmatrix.Frame() matrix.add_frame(frame) signal = canmatrix.canmatrix.Signal( size=32, is_float=is_float, is_signed=False, initial_value=value, ) frame.add_signal(signal) s = canmatrix.formats.sym.create_signal(db=matrix, signal=signal) start = '/d:' d, = ( segment for segment in s.split() if segment.startswith(start) ) d = d[len(start):] assert d == expected @pytest.mark.parametrize( 'variable_type, bit_length', ( ('float', 32), ('double', 64), ) ) def tests_parse_float(variable_type, bit_length): f = io.BytesIO( textwrap.dedent( '''\ FormatVersion=5.0 // Do not edit this line! Title="Untitled" {{SENDRECEIVE}} [Symbol1] ID=000h DLC=8 Var=a_signal {variable_type} 0,{bit_length} '''.format( variable_type=variable_type, bit_length=bit_length, ), ).encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) assert matrix.load_errors == [] frame = matrix.frames[0] signal = frame.signals[0] assert signal.is_float def test_unterminated_enum(): f = io.BytesIO( textwrap.dedent( '''\ FormatVersion=5.0 // Do not edit this line! Title="Untitled {ENUMS} enum Categories(0="Animal", 1="Vegetable", 3="Mineral" {SENDRECEIVE} [Symbol1] ID=000h DLC=8 Var=Signal unsigned 0,16 ''' ).encode('utf-8'), ) # Missing ')' at the end of enum used to cause infinite loop matrix = canmatrix.formats.sym.load(f) assert len(matrix.load_errors) == 1 if sys.version_info > (3, 0): assert isinstance(matrix.load_errors[0], EOFError) else: assert isinstance(matrix.load_errors[0], StopIteration) def test_title_read_and_write(): f = io.BytesIO( textwrap.dedent( '''\ FormatVersion=5.0 // Do not edit this line! Title="An Example Title" ''' ).encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) assert matrix.attribute("Title") == "An Example Title" f_out = io.BytesIO() canmatrix.formats.sym.dump(matrix, f_out) assert f_out.getvalue().decode('utf-8').splitlines()[1] == 'Title="An Example Title"' @pytest.mark.parametrize( 'enum_str, enum_dict, enum_label', ( ('enum Animal(0="Dog", 1="Cat", 2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Simple enum"), ('''\ enum Animal(0="Dog", //A Comment 1="Cat", 2="Fox")''', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "Multiline enum"), ('enum Animal(0="Dog",1="Cat",2="Fox")', {"Animal": {0: "Dog", 1: "Cat", 2: "Fox"}}, "No Space in Separator"), ) ) def test_enums_read(enum_str, enum_dict, enum_label): f = io.BytesIO('''\ FormatVersion=5.0 // Do not edit this line! Title="An Example Title" {{ENUMS}} {} '''.format(enum_str).encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) assert matrix.load_errors == [], "Failed to load canmatrix, when testing enum case : '{}'".format(enum_label) assert matrix.value_tables == enum_dict, "Enum not parsed correctly : '{}'".format(enum_label) def test_enums_export(): f = io.BytesIO('''\ FormatVersion=5.0 // Do not edit this line! Title="An Example Title" {ENUMS} enum Animal(0="Dog",1="Cat",2="Fox") {SENDRECEIVE} [Frame1] ID=000h DLC=8 Var=Signal1 unsigned 0,16 '''.encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) assert matrix.load_errors == [], "Failed to load canmatrix" # Add an enum to Signal1 matrix.frame_by_name("Frame1").signal_by_name("Signal1").enumeration = "Plants" matrix.frame_by_name("Frame1").signal_by_name("Signal1").values = {0: "Grass", 1: "Flower", 2: "Tree"} # Export and reimport f_out = io.BytesIO() canmatrix.formats.sym.dump(matrix, f_out) f_in = io.BytesIO(f_out.getvalue()) new_matrix = canmatrix.formats.sym.load(f_in) # Check that Enums from Enums table exported and reimported correctly assert new_matrix.value_tables["Animal"] == {0: "Dog", 1: "Cat", 2: "Fox"} # Check that Enums from a Signal.Values property exported and reimported correctly assert new_matrix.value_tables["Plants"] == {0: "Grass", 1: "Flower", 2: "Tree"} def test_types_read(): f = io.BytesIO('''\ FormatVersion=5.0 // Do not edit this line! Title="Types Test" {ENUMS} enum EnumAnimals(0="Cat", // An enum value for cats 1="Dog", // An enum value for dogs 2="Horse", 3="Monkey", 4="Lion")// An enum with a comment for the final value {SENDRECEIVE} [SymbolLengths] ID=000h DLC=8 Var="1Bit" unsigned 0,1 Var="3Bits" unsigned 1,3 Var="4Bits" unsigned 4,4 Var="21Bits" unsigned 8,21 Var="6Bits" unsigned 29,6 Var="29Bits" unsigned 35,29 [SymbolTypes] ID=001h DLC=8 Var=Bit bit 0,1 Var=Char char 1,8 Var=String string 16,16 Var=Signed signed 32,4 Var=Unsigned unsigned 36,4 Var=Enum EnumAnimals 40,4 Var=Raw raw 48,16 [SymbolDouble] ID=002h DLC=8 Var=Double double 0,64 // Must be 8 Bytes according to PCAN Symbol Editor V5 [SymbolFloat] ID=003h DLC=4 Var=Float float 0,32 // Must be 4 Bytes according to PCAN Symbol Editor V5 '''.encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) # Check no errors loading the matrix assert matrix.load_errors == [] f_out = io.BytesIO() canmatrix.formats.sym.dump(matrix, f_out) f_out_bytes = f_out.getvalue() f_out_string = f_out_bytes.decode("utf-8") # Check that types are preserved when saving back to .SYM format assert "Var=Bit bit" in f_out_string assert "Var=Char char" in f_out_string assert "Var=String string" in f_out_string assert "Var=Signed signed" in f_out_string assert 'Var="21Bits" unsigned' in f_out_string assert 'Var=Float float' in f_out_string assert 'Var=Double double' in f_out_string # Read matrix back in to check all symbols/frames preserved f_in = io.BytesIO(f_out_bytes) new_matrix = canmatrix.formats.sym.load(f_in) # Check no errors loading the matrix assert new_matrix.load_errors == [] # Check that both matrices have the same Frames frames = [f.name for f in matrix.frames] new_frames = [f.name for f in new_matrix.frames] assert sorted(frames) == sorted(new_frames) # Check that both matrices have the same signals, and that all the expected signals are present signals = chain(*[[s.name for s in frame.signals] for frame in matrix.frames]) new_signals = chain(*[[s.name for s in frame.signals] for frame in new_matrix.frames]) assert sorted(signals) == sorted(new_signals) == sorted([ "1Bit", "3Bits", "4Bits", "21Bits", "6Bits", "29Bits", "Bit", "Char", "String", "Signed", "Unsigned", "Enum", "Raw", "Double", "Float", ]) @pytest.mark.parametrize( 'var_name,data,raw_value', ( ('VarMux1', bytearray([1, 12, 0, 0, 0, 0, 0, 0]), 12), ('VarMux2', bytearray([2, 0, 0, 0, 23, 0, 0, 0]), 23), ('VarMux200', bytearray([200, 0, 0, 0, 0, 0, 34, 0]), 34), ) ) def test_mux_decode(var_name,data,raw_value): f = io.BytesIO('''\ FormatVersion=5.0 // Do not edit this line! Title="Types Test" FormatVersion=5.0 // Do not edit this line! Title="Test Symbols File" {SENDRECEIVE} [MuxTestFrame] ID=002h DLC=8 Mux=Mux1 0,8 1 Var=VarMux1 unsigned 8,8 [MuxTestFrame] DLC=8 Mux=Mux2 0,8 2 Var=VarMux2 unsigned 32,8 [MuxTestFrame] DLC=8 Mux=Mux200 0,8 C8h Var=VarMux200 unsigned 48,8 '''.encode('utf-8'), ) matrix = canmatrix.formats.sym.load(f) # Check no errors loading the matrix assert matrix.load_errors == [] frame = matrix.frame_by_name("MuxTestFrame") r = frame.decode(data) assert var_name in r.keys(), "Signal {}, not decoded. Only : {}".format(var_name, ','.join(r for r in r.keys())) assert r[var_name].raw_value == raw_value
src/canmatrix/tests/test_sym.py
11,127
-*- coding: utf-8 -*- Missing ')' at the end of enum used to cause infinite loop Add an enum to Signal1 Export and reimport Check that Enums from Enums table exported and reimported correctly Check that Enums from a Signal.Values property exported and reimported correctly Check no errors loading the matrix Check that types are preserved when saving back to .SYM format Read matrix back in to check all symbols/frames preserved Check no errors loading the matrix Check that both matrices have the same Frames Check that both matrices have the same signals, and that all the expected signals are present Check no errors loading the matrix
638
en
0.829022
from django.forms.utils import flatatt from django.utils.html import format_html, format_html_join from django.utils.translation import gettext as _ from wagtail.core import blocks from wagtail.core.blocks import PageChooserBlock from wagtail.images.blocks import ImageChooserBlock from wagtailmarkdown.utils import render_markdown from wagtailmedia.blocks import AbstractMediaChooserBlock class MediaBlock(AbstractMediaChooserBlock): def render_basic(self, value, context=None): if not value: return '' video_not_supported_text = _("Your browser does not support video playback.") audio_not_supported_text = _("Your browser does not support audio playback.") # Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link. download_video_text = _('If you cannot view the above video, you can' ' instead %(start_link)sdownload it%(end_link)s.') % { 'start_link': '<a href={2} download>', 'end_link': '</a>' } # Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link. download_audio_text = _('If you cannot listen to the above audio, you can' ' instead %(start_link)sdownload it%(end_link)s.') % { 'start_link': '<a href={2} download>', 'end_link': '</a>' } if value.type == 'video': player_code = ''' <div> <video width="320" height="240" {1} controls> {0} ''' + video_not_supported_text + ''' </video> </div> <p class='article__content--video'>''' + download_video_text + '''</p> ''' else: player_code = ''' <div> <audio controls> {0} ''' + audio_not_supported_text + ''' </audio> </div> <p class='article__content--audio'>''' + download_audio_text + '''</p> ''' thumbnail = f'poster={value.thumbnail.url}' if value.thumbnail else '' return format_html(player_code, format_html_join( '\n', "<source{0}>", [[flatatt(s)] for s in value.sources] ), thumbnail, value.url) class SocialMediaLinkBlock(blocks.StructBlock): title = blocks.CharBlock(max_length=255) link = blocks.URLBlock() image = ImageChooserBlock(template='blocks/image.html') class Meta: icon = 'site' class SocialMediaShareButtonBlock(blocks.StructBlock): platform = blocks.CharBlock(max_length=255) is_active = blocks.BooleanBlock(required=False) image = ImageChooserBlock(template='blocks/image.html', required=False) class Meta: icon = 'site' class EmbeddedQuestionnaireChooserBlock(blocks.PageChooserBlock): class Meta: icon = 'form' class EmbeddedQuestionnaireBlock(blocks.StructBlock): direct_display = blocks.BooleanBlock(required=False) class EmbeddedPollBlock(EmbeddedQuestionnaireBlock): poll = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Poll') def get_context(self, value, parent_context=None): context = super().get_context(value, parent_context) poll = value.get('poll') if poll and poll.live: context.update({ 'direct_display': value['direct_display'], 'questionnaire': poll.specific, }) return context class Meta: template = 'questionnaires/tags/questionnaire_wrapper.html' class EmbeddedSurveyBlock(EmbeddedQuestionnaireBlock): survey = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Survey') def get_context(self, value, parent_context=None): context = super().get_context(value, parent_context) survey = value.get('survey') if survey and survey.live: context.update({ 'direct_display': value['direct_display'], 'questionnaire': survey.specific, }) return context class Meta: template = 'questionnaires/tags/questionnaire_wrapper.html' class EmbeddedQuizBlock(EmbeddedQuestionnaireBlock): quiz = EmbeddedQuestionnaireChooserBlock(target_model='questionnaires.Quiz') def get_context(self, value, parent_context=None): context = super().get_context(value, parent_context) quiz = value.get('quiz') if quiz and quiz.live: context.update({ 'direct_display': value['direct_display'], 'questionnaire': quiz.specific, }) return context class Meta: template = 'questionnaires/tags/questionnaire_wrapper.html' class PageButtonBlock(blocks.StructBlock): page = blocks.PageChooserBlock() text = blocks.CharBlock(required=False, max_length=255) def get_context(self, value, parent_context=None): context = super().get_context(value, parent_context) button_page = value.get('page') if button_page and button_page.live: context.update({ 'button_page': button_page.specific, 'text': value.get('text') or button_page.title }) return context class Meta: template = 'blocks/page_button.html' class ArticleBlock(blocks.StructBlock): display_section_title = blocks.BooleanBlock(required=False) article = PageChooserBlock(target_model='home.Article') def get_context(self, value, parent_context=None): context = super().get_context(value, parent_context) article = value.get('article') if article and article.live: context.update({ 'display_section_title': value['display_section_title'], 'article': article.specific, }) return context class Meta: template = 'blocks/article.html' class NumberedListBlock(blocks.ListBlock): def render_basic(self, value, context=None): children = format_html_join( '\n', '<li>{0}</li>', [ (self.child_block.render(child_value, context=context),) for child_value in value ] ) return format_html("<ol>{0}</ol>", children) class RawHTMLBlock(blocks.RawHTMLBlock): def render_basic(self, value, context=None): result = super(RawHTMLBlock, self).render_basic(value, context) return render_markdown(result) class OfflineAppButtonBlock(blocks.StructBlock): smartphone_text = blocks.CharBlock( help_text=_('This text appears when it is possible for the user to install the app on their phone.')) feature_phone_text = blocks.CharBlock(required=False, help_text=_('This text appears when the user is using a feature phone and thus cannot install the app ' '(the button will be disabled in this case). [Currently not implemented]')) offline_text = blocks.CharBlock(required=False, help_text=_('This text appears when the user is navigating the site via the offline app and ' 'thus it doesn\'t make sense to install the offline app again ' '(the button will be disabled in this case). [Currently not implemented]')) class Meta: template = 'blocks/offline_app_button.html'
home/blocks.py
7,922
Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link. Translators: Translators: This message appears below embedded video and audio on the site. Many feature phones won't be able to play embedded video/audio, so the site offers an opportunity to download the file. Part of this message (between %(start_link)s and %(end_link)s ) is a clickable download link.
609
en
0.832843
class Solution: def longestCommonSubsequence(self, text1: str, text2: str) -> int: ''' #最长连续公共子串 l1=len(text1) l2=len(text2) if l1==0 or l2==0: return 0 dp = [[0 for i in range(l2)] for i in range(l1)] res = 0 if text1[0]==text2[0]: dp[0][0]=1 res=1 for i in range(1,l2): if text2[i]==text1[0]: dp[0][i]=1 res=1 for i in range(1,l1): if text1[i]==text2[0]: dp[i][0]=1 res=1 for i in range(1,l1): for j in range(1,l2): if text1[i]==text2[j]: dp[i][j]=dp[i-1][j-1]+1 res=max(res,dp[i][j]) return res ''' ''' #最长子串(可不连续):其实就是在问text1[:i+1]和text2[:j+1]有多少个相同的字母 l1 = len(text1) l2 = len(text2) if l1 == 0 or l2 == 0: return 0 dp = [[0 for i in range(l2)] for i in range(l1)] if text1[0] == text2[0]: dp[0][0] = 1 for i in range(1, l2): if text2[i] == text1[0] or dp[0][0]==1 or dp[0][i-1]==1: dp[0][i] = 1 for i in range(1, l1): if text1[i] == text2[0] or dp[0][0]==1 or dp[i-1][0]==1: dp[i][0] = 1 for i in range(1, l1): for j in range(1, l2): if text1[i] == text2[j]: dp[i][j] = dp[i - 1][j - 1] + 1 else: dp[i][j]=max(dp[i][j-1],dp[i-1][j]) return dp[-1][-1] ''' #recursion #exit case if len(text1)==0 or len(text2)==0: return 0 if text1[-1]==text2[-1]: return 1+self.longestCommonSubsequence(text1[:-1],text2[:-1]) else: return max(self.longestCommonSubsequence(text1[:-1],text2),self.longestCommonSubsequence(text1,text2[:-1])) if __name__ == '__main__': sol=Solution() text1 ="ylqpejqbalahwr" text2 ="yrkzavgdmdgtqpg" # "hofubmnylkra" # "pqhgxgdofcvmr" print(sol.longestCommonSubsequence(text1,text2))
DP/Leetcode1143.py
2,248
#最长连续公共子串 l1=len(text1) l2=len(text2) if l1==0 or l2==0: return 0 dp = [[0 for i in range(l2)] for i in range(l1)] res = 0 if text1[0]==text2[0]: dp[0][0]=1 res=1 for i in range(1,l2): if text2[i]==text1[0]: dp[0][i]=1 res=1 for i in range(1,l1): if text1[i]==text2[0]: dp[i][0]=1 res=1 for i in range(1,l1): for j in range(1,l2): if text1[i]==text2[j]: dp[i][j]=dp[i-1][j-1]+1 res=max(res,dp[i][j]) return res recursionexit case "hofubmnylkra" "pqhgxgdofcvmr"
553
en
0.245401
from __future__ import print_function import torch from torch import nn import numpy as np import torch.nn.functional as F from torch.autograd import Variable from constant import * from torch.nn.utils.rnn import pack_padded_sequence class EncoderGRU(nn.Module): def __init__(self, vocab_size,emb_dim,emb, hidden_dim, nlayers, pad_token, bidir=False): #emb---np wordVec vocab_size=len(emb) super(EncoderGRU,self).__init__() #self.word_emb=nn.Embedding(vocab_size,emb_dim,pad_token) #self.word_emb.weight.data.copy_(torch.from_numpy(emb)) #self.pos1_emb=nn.Embedding(MaxPos,dimWPE) #self.pos2_emb=nn.Embedding(MaxPos,dimWPE) self.hidden_dim=hidden_dim self.emb_dim=emb_dim+dimWPE*2 self.nlayers=nlayers self.bidir=bidir #using gru self.gru=nn.GRU( self.emb_dim//2 if bidir else self.emb_dim, self.hidden_dim, self.nlayers, bidirectional=bidir, batch_first=True ) def forward(self,input_,pos1,pos2): embd=self.word_emb(input_) pos1=self.pos1_emb(pos1) pos2=self.pos2_emb(pos2) embd=torch.cat((embd,pos1,pos2),2) #using gru _,h_t_=self.encoder(embed) h_t=torch.cat((h_t_[-1],h_t_[-2]),1)if self.bidir else h_t_[-1] return h_t class EncoderCNN(nn.Module): def __init__(self, vocab_size,emb,emb_dim=dimWE, hidden_dim=dimC,lang=0): #emb---np wordVec vocab_size=len(emb) super(EncoderCNN,self).__init__() self.lang=lang self.word_emb=nn.Embedding(vocab_size,emb_dim) self.word_emb.weight.data.copy_(torch.from_numpy(emb)) self.pos1_emb=nn.Embedding(MaxPos,dimWPE) self.pos2_emb=nn.Embedding(MaxPos,dimWPE) self.maxPooling=nn.MaxPool1d(SenLen[self.lang]-2) self.emb_dim=emb_dim+dimWPE*2 self.hidden_dim=hidden_dim #using CNN self.tanh=nn.Tanh() self.conv=nn.Conv1d(self.emb_dim,hidden_dim,filter_size) self.dropout=nn.Dropout(p=CNNDropout) def forward(self,inp,pos1,pos2): Len=inp.size(0) embd=self.word_emb(inp) pos1=self.pos1_emb(pos1) pos2=self.pos2_emb(pos2) embd=torch.cat((embd,pos1,pos2),2).transpose(1,2) conved=self.conv(embd) pooled=self.maxPooling(conved).view(Len,dimC) out=self.tanh(pooled) return self.dropout(out) class CNNEncoder(nn.Module): def __init__(self,vocab_en,emb_en,vocab_zh,emb_zh): super(CNNEncoder,self).__init__() self.encoder_en=EncoderCNN(vocab_en,emb_en,dimWE,dimC,0) self.encoder_zh=EncoderCNN(vocab_zh,emb_zh,dimWE,dimC,1) def forward(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh): return self.encoder_en(wordsEn,pos1En,pos2En),self.encoder_zh(wordsZh,pos1Zh,pos2Zh) class Discriminator(nn.Module): def __init__(self, dis_input_dim=Encodered_dim, nlayers=dis_layers, hidden_dim=dis_hidden_dim, input_dropout=dis_input_dropout, dropout=dis_dropout): super(Discriminator,self).__init__() self.dis_input=dis_input_dim layers=[nn.Dropout(input_dropout)] for i in range(0,nlayers+1): input_dim=self.dis_input if i==0 else hidden_dim output_dim=1 if i==nlayers else hidden_dim layers.append(nn.Linear(input_dim,output_dim)) if i<nlayers: layers.append(nn.LeakyReLU(0.2)) layers.append(nn.Dropout(dropout)) layers.append(nn.Sigmoid()) self.layers=nn.Sequential(*layers) def forward(self,inp): assert inp.dim()==2 and inp.size(1)==self.dis_input return self.layers(inp).view(-1) class MultiRE(nn.Module): def __init__(self): super(MultiRE,self).__init__() self.relation_emb=nn.Embedding(dimR,Encodered_dim) self.dropout=nn.Dropout(p=Att_dropout) #self.softmax=nn.Softmax() #self.logsoftmax=nn.LogSoftmax() self.M=nn.Linear(Encodered_dim,dimR) def forward(self,inp_en,r_en,l_en,inp_zh,r_zh,l_zh,re_mask): NumRe=r_en.size(0) NumIn=l_zh.size(0) relation_en=self.relation_emb(r_en) relation_zh=self.relation_emb(r_zh) attn_en=torch.sum(relation_en*inp_en,2) attn_zh=torch.sum(relation_zh*inp_zh,2) p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0)) L_en=0 L_zh=0 R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0)) S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0)) for i in range(0,NumIn): R_en=L_en+l_en[i].data[0] R_zh=L_zh+l_zh[i].data[0] if R_en>L_en and R_zh>L_zh: Att=F.softmax(torch.cat((attn_en[:,L_en:R_en],attn_zh[:,L_zh:R_zh]),1),1) S[i]=self.dropout(torch.matmul(Att,torch.cat((inp_en[L_en:R_en],inp_zh[L_zh:R_zh]),0))) R_vec[i]=relation_en[:,L_en,:] elif R_en>L_en: Att=F.softmax(attn_en[:,L_en:R_en],1) S[i]=self.dropout(torch.matmul(Att,inp_en[L_en:R_en])) R_vec[i]=relation_en[:,L_en,:] elif R_zh>L_zh: Att=F.softmax(attn_zh[:,L_zh:R_zh],1) S[i]=self.dropout(torch.matmul(Att,inp_zh[L_zh:R_zh])) R_vec[i]=relation_zh[:,L_zh,:] else: print("ERR NO sentences") exit() L_en=R_en L_zh=R_zh p_n=F.log_softmax(self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1),2).view(NumIn,NumRe,dimR) return p_n[re_mask].view(NumIn,NumRe) class MonoRE(nn.Module): def __init__(self): super(MonoRE,self).__init__() self.relation_emb=nn.Embedding(dimR,Encodered_dim) self.dropout=nn.Dropout(p=Att_dropout) #self.softmax=nn.Softmax() #self.logsoftmax=nn.LogSoftmax() self.M=nn.Linear(Encodered_dim,dimR) def forward(self,inp,r,l,re_mask): NumRe=r.size(0) NumIn=l.size(0) relation=self.relation_emb(r) attn=torch.sum(relation*inp,2) p=Variable(torch.cuda.FloatTensor(NumIn,NumRe).fill_(0.0)) L=0 R_vec=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0)) S=Variable(torch.cuda.FloatTensor(NumIn,NumRe,Encodered_dim).fill_(0.0)) for i in range(0,NumIn): R=L+l[i].data[0] if R>L: Att=F.softmax(attn[:,L:R],1) S[i]=self.dropout(torch.matmul(Att,inp[L:R])) R_vec[i]=relation[:,L,:] L=R p_n=F.log_softmax((self.M(S)+torch.sum(R_vec*S,2).view(NumIn,NumRe,1)),2).view(NumIn,NumRe,dimR) return p_n[re_mask].view(NumIn,NumRe) class AMRE(nn.Module): def __init__(self,emb_en,emb_zh): super(AMRE,self).__init__() self.encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda() self.enRE=MonoRE().cuda() self.zhRE=MonoRE().cuda() def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask): inp_en,inp_zh=self.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh) return self.enRE(inp_en,rEn,lEn,re_mask)+self.zhRE(inp_zh,rZh,lZh,re_mask) class MARE(nn.Module): def __init__(self,emb_en,emb_zh): super(MARE,self).__init__() self.D=Discriminator().cuda() self.share_encoder=CNNEncoder(len(emb_en),emb_en,len(emb_zh),emb_zh).cuda() self.multiRE=MultiRE().cuda() self.monoRE=AMRE(emb_en,emb_zh) def Orth_con(self,wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh): share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh) mono_en,mono_zh=self.monoRE.encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh) share=torch.cat((share_en,share_zh),0) mono=torch.cat((mono_en,mono_zh),0) share-=torch.mean(share,0) mono-=torch.mean(mono,0) share=F.normalize(share,2,1) mono=F.normalize(mono,2,1) correlation_mat=torch.matmul(share.transpose(0,1),mono) cost=torch.mean(correlation_mat*correlation_mat) return cost def forward(self,wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask): share_en,share_zh=self.share_encoder(wordsEn,pos1En,pos2En,wordsZh,pos1Zh,pos2Zh) return self.monoRE(wordsEn,pos1En,pos2En,rEn,lEn,wordsZh,pos1Zh,pos2Zh,rZh,lZh,re_mask)+self.multiRE(share_en,rEn,lEn,share_zh,rZh,lZh,re_mask)
CNN/src/models.py
8,728
emb---np wordVec vocab_size=len(emb)self.word_emb=nn.Embedding(vocab_size,emb_dim,pad_token)self.word_emb.weight.data.copy_(torch.from_numpy(emb))self.pos1_emb=nn.Embedding(MaxPos,dimWPE)self.pos2_emb=nn.Embedding(MaxPos,dimWPE)using gruusing gruemb---np wordVec vocab_size=len(emb)using CNNself.softmax=nn.Softmax()self.logsoftmax=nn.LogSoftmax()self.softmax=nn.Softmax()self.logsoftmax=nn.LogSoftmax()
403
en
0.099738
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : utils_node.py @Time : 2022/03/08 14:35:13 @Author : Jianwen Chen @Version : 1.0 @Contact : [email protected] @License : (C)Copyright 2021-2022, SAIL-Lab ''' ######################################## import area ######################################## # common library import os import random import torch import torch.nn as nn import numpy as np from tqdm import tqdm from sklearn import metrics from torch.optim.lr_scheduler import _LRScheduler ######################################## function area ######################################## def seed_everything(seed=2021): os.environ['PYTHONHASHSEED'] = str(seed) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def initialize_weights(model): """ Initializes the weights of a model in place. :param model: An nn.Module. """ for param in model.parameters(): if param.dim() > 1: nn.init.xavier_normal_(param) def loop(data_loader, model, optimizer, scheduler, device): batch_size = data_loader.batch_size data_loader = tqdm(data_loader) if optimizer is not None else data_loader loss_sum, y_true, y_pred = 0.0, list(), list() for batch in data_loader: smiles, mols, batch_node_features, batch_edge_features, batch_distance_matrix, labels = batch # add mask batch_masks = torch.sum(torch.abs(batch_node_features), dim=-1) != 0 # (batch, max_length, node_dim) batch_node_features = batch_node_features.to(device) # (batch, max_length, max_length, edge_dim) batch_edge_features = batch_edge_features.to(device) # (batch, max_length, max_length) batch_distance_matrix = batch_distance_matrix.to(device) # (batch, max_length) batch_masks = batch_masks.to(device) # (batch, max_length, 1) labels = labels.to(device) # (batch, max_length, 1) outputs = model(batch_node_features, batch_edge_features, batch_distance_matrix, batch_masks, device) # loss calculation loss = cal_loss(y_true=labels, y_pred=outputs, device=device) loss_sum += loss.item() if optimizer is not None: # clear gradients for this training step optimizer.zero_grad() # back propagation, compute gradients loss.backward() # apply gradients optimizer.step() # NormLR need step every batch if scheduler is not None: scheduler.step() # collect result labels = labels.detach().cpu().numpy() outputs = outputs.detach().cpu().numpy() y_true.append([]) y_pred.append([]) for label, output in zip(labels, outputs): label, output = label.flatten(), output.flatten() for l, o in zip(label, output): if l != 0.0: y_true[-1].append(l) y_pred[-1].append(o) # clear cuda cache torch.cuda.empty_cache() # metric calculation results = cal_metric(y_true=y_true, y_pred=y_pred) results['loss'] = loss_sum / (len(data_loader) * batch_size) return results def cal_loss(y_true, y_pred, device): y_true, y_pred = y_true.flatten(), y_pred.flatten() y_mask = torch.where(y_true != 0.0, torch.full_like(y_true, 1), torch.full_like(y_true, 0)) loss = torch.sum(torch.abs(y_true - y_pred) * y_mask) / torch.sum(y_mask) return loss def cal_metric(y_true, y_pred): concatenate_true, concatenate_pred = np.concatenate(y_true, axis=-1), np.concatenate(y_pred, axis=-1) mae = metrics.mean_absolute_error(concatenate_true, concatenate_pred) r2 = metrics.r2_score(concatenate_true, concatenate_pred) return {'mae':mae, 'r2':r2} class NoamLR(_LRScheduler): """ Noam learning rate scheduler with piecewise linear increase and exponential decay. The learning rate increases linearly from init_lr to max_lr over the course of the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch). Then the learning rate decreases exponentially from max_lr to final_lr over the course of the remaining total_steps - warmup_steps (where total_steps = total_epochs * steps_per_epoch). This is roughly based on the learning rate schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762). """ def __init__(self, optimizer, warmup_epochs, total_epochs, steps_per_epoch, init_lr, max_lr, final_lr): """ Initializes the learning rate scheduler. :param optimizer: A PyTorch optimizer. :param warmup_epochs: The number of epochs during which to linearly increase the learning rate. :param total_epochs: The total number of epochs. :param steps_per_epoch: The number of steps (batches) per epoch. :param init_lr: The initial learning rate. :param max_lr: The maximum learning rate (achieved after warmup_epochs). :param final_lr: The final learning rate (achieved after total_epochs). """ assert len(optimizer.param_groups) == len(warmup_epochs) == len(total_epochs) == len(init_lr) == len(max_lr) == len(final_lr) self.num_lrs = len(optimizer.param_groups) self.optimizer = optimizer self.warmup_epochs = np.array(warmup_epochs) self.total_epochs = np.array(total_epochs) self.steps_per_epoch = steps_per_epoch self.init_lr = np.array(init_lr) self.max_lr = np.array(max_lr) self.final_lr = np.array(final_lr) self.current_step = 0 self.lr = init_lr self.warmup_steps = (self.warmup_epochs * self.steps_per_epoch).astype(int) self.total_steps = self.total_epochs * self.steps_per_epoch self.linear_increment = (self.max_lr - self.init_lr) / self.warmup_steps self.exponential_gamma = (self.final_lr / self.max_lr) ** (1 / (self.total_steps - self.warmup_steps)) super(NoamLR, self).__init__(optimizer) def get_lr(self): """Gets a list of the current learning rates.""" return list(self.lr) def step(self, current_step: int = None): """ Updates the learning rate by taking a step. :param current_step: Optionally specify what step to set the learning rate to. If None, current_step = self.current_step + 1. """ if current_step is not None: self.current_step = current_step else: self.current_step += 1 for i in range(self.num_lrs): if self.current_step <= self.warmup_steps[i]: self.lr[i] = self.init_lr[i] + self.current_step * self.linear_increment[i] elif self.current_step <= self.total_steps[i]: self.lr[i] = self.max_lr[i] * (self.exponential_gamma[i] ** (self.current_step - self.warmup_steps[i])) else: # theoretically this case should never be reached since training should stop at total_steps self.lr[i] = self.final_lr[i] self.optimizer.param_groups[i]['lr'] = self.lr[i]
Repeat/CoMPT/utils_node.py
7,429
Noam learning rate scheduler with piecewise linear increase and exponential decay. The learning rate increases linearly from init_lr to max_lr over the course of the first warmup_steps (where warmup_steps = warmup_epochs * steps_per_epoch). Then the learning rate decreases exponentially from max_lr to final_lr over the course of the remaining total_steps - warmup_steps (where total_steps = total_epochs * steps_per_epoch). This is roughly based on the learning rate schedule from Attention is All You Need, section 5.3 (https://arxiv.org/abs/1706.03762). Initializes the learning rate scheduler. :param optimizer: A PyTorch optimizer. :param warmup_epochs: The number of epochs during which to linearly increase the learning rate. :param total_epochs: The total number of epochs. :param steps_per_epoch: The number of steps (batches) per epoch. :param init_lr: The initial learning rate. :param max_lr: The maximum learning rate (achieved after warmup_epochs). :param final_lr: The final learning rate (achieved after total_epochs). Gets a list of the current learning rates. Initializes the weights of a model in place. :param model: An nn.Module. Updates the learning rate by taking a step. :param current_step: Optionally specify what step to set the learning rate to. If None, current_step = self.current_step + 1. @File : utils_node.py @Time : 2022/03/08 14:35:13 @Author : Jianwen Chen @Version : 1.0 @Contact : [email protected] @License : (C)Copyright 2021-2022, SAIL-Lab !/usr/bin/env python -*- encoding: utf-8 -*- import area common library function area add mask (batch, max_length, node_dim) (batch, max_length, max_length, edge_dim) (batch, max_length, max_length) (batch, max_length) (batch, max_length, 1) (batch, max_length, 1) loss calculation clear gradients for this training step back propagation, compute gradients apply gradients NormLR need step every batch collect result clear cuda cache metric calculation theoretically this case should never be reached since training should stop at total_steps
2,060
en
0.744669
from djangocms_style.cms_plugins import StylePlugin from cms.plugin_pool import plugin_pool from django.utils.translation import gettext_lazy as _ from .models import TaccsiteSection # Plugins @plugin_pool.register_plugin class TaccsiteSectionPlugin(StylePlugin): """ Patterns > "Section" Plugin https://confluence.tacc.utexas.edu/x/c5TtDg """ module = 'TACC Site' model = TaccsiteSection name = _('Section') # Copied from djangocms_style sans 'Inline style settings' # FAQ: If user wants to override spacing, they may: # - use Style plugin (if they have permission) # - request Design & Dev standardize use case # https://github.com/django-cms/djangocms-style/blob/3.0.0/djangocms_style/cms_plugins.py#L15-L40 fieldsets = ( (None, { 'fields': ( 'label', ('class_name', 'tag_type'), ) }), (_('Advanced settings'), { 'classes': ('collapse',), 'fields': ( 'additional_classes', 'id_name', 'template', 'attributes', ), }), )
djangocms_tacc_section/cms_plugins.py
1,181
Patterns > "Section" Plugin https://confluence.tacc.utexas.edu/x/c5TtDg Plugins Copied from djangocms_style sans 'Inline style settings' FAQ: If user wants to override spacing, they may: - use Style plugin (if they have permission) - request Design & Dev standardize use case https://github.com/django-cms/djangocms-style/blob/3.0.0/djangocms_style/cms_plugins.pyL15-L40
382
en
0.637282
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib import admin from .models import Location,Category,Image # Register your models here. admin.site.register(Location) admin.site.register(Category) admin.site.register(Image) class Image(admin.ModelAdmin): search_fields = ('image_category')
shots/admin.py
330
-*- coding: utf-8 -*- Register your models here.
48
en
0.933192
# -*- coding: utf-8 -*- """ Authors: Tim Hessels Module: Collect/SRTM Description: This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/. Use the DEM functions to download and create DEM images in Gtiff format. Examples: from pyWAPOR.Collect import SRTM SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109]) """ from .DEM import main as DEM __all__ = ['DEM'] __version__ = '0.1'
pyWAPOR/Collect/SRTM/__init__.py
419
Authors: Tim Hessels Module: Collect/SRTM Description: This module downloads DEM data from http://earlywarning.usgs.gov/hydrodata/. Use the DEM functions to download and create DEM images in Gtiff format. Examples: from pyWAPOR.Collect import SRTM SRTM.DEM(Dir='C:/TempDEM4/', latlim=[29, 32], lonlim=[-113, -109]) -*- coding: utf-8 -*-
341
en
0.399931
""" Selects a matplotlib backend so you can run without a GUI/tkinter. Supports: - PyQt5 - PySide2 - WX - Tkinter """ from pyNastran.gui import IS_DEV if IS_DEV: # there is no interactive backend when testing on TravisCI matplotlib_backend = 'Agg' else: # fails if using the terminal and PyQt/PySide & qtpy are installed # how do I check if there is a terminal vs just running in command line? # try: from pyNastran.gui.qt_version import qt_int matplotlib_backend = 'Qt%iAgg' % qt_int except ImportError: try: # hasn't been tested on a machine without a backend... # default matplotlib backend import tkinter matplotlib_backend = 'tkAgg' except ImportError: # no-gui backend matplotlib_backend = 'Agg'
pyNastran/gui/matplotlib_backend.py
840
Selects a matplotlib backend so you can run without a GUI/tkinter. Supports: - PyQt5 - PySide2 - WX - Tkinter there is no interactive backend when testing on TravisCI fails if using the terminal and PyQt/PySide & qtpy are installed how do I check if there is a terminal vs just running in command line? hasn't been tested on a machine without a backend... default matplotlib backend no-gui backend
404
en
0.725779
# The new config inherits a base config to highlight the necessary modification _base_ = '../retinanet_r50_fpn_1x_coco.py' # We also need to change the num_classes in head to match the dataset's annotation model = dict( pretrained=None, ) # Modify dataset related settings dataset_type = 'COCODataset' classes = ('Cấm ngược chiều', 'Cấm dừng và đỗ', 'Cấm rẽ', 'Giới hạn tốc độ', 'Cấm còn lại', 'Nguy hiểm', 'Hiệu lệnh') img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) data = dict( samples_per_gpu=2, # Batch size of a single GPU workers_per_gpu=2, # Worker to pre-fetch data for each single GPU train=dict( classes=classes, img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/', ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/train.json', pipeline= [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(1622, 622), multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] ), val=dict( classes=classes, img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/images/', ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_train/val.json', pipeline= [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1622, 622), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] ), test=dict( classes=classes, img_prefix='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/images/', ann_file='/data2/zalo-ai-2020/za_traffic_2020/data/traffic_public_test/test.json', pipeline= [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1622, 622), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] ), )
configs/retinanet/traffic_sign/retinanet_r50_fpn_1x_traffic_sign.py
3,151
The new config inherits a base config to highlight the necessary modification We also need to change the num_classes in head to match the dataset's annotation Modify dataset related settings Batch size of a single GPU Worker to pre-fetch data for each single GPU
262
en
0.808026
#!/usr/bin/env python # Copyright 1996-2019 Cyberbotics Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test header version.""" import unittest import os import fnmatch ignoredProtos = [ 'projects/robots/mobsya/thymio/controllers/thymio2_aseba/aseba/clients/studio/plugins/ThymioVPL/UsageProfile.proto', 'projects/samples/tutorials/protos/FourWheelsRobot.proto' ] skippedDirectories = [ 'dependencies', 'distribution', '.git' ] class TestHeaderVersion(unittest.TestCase): """Unit test of the PROTO and world headers.""" def setUp(self): """Get all the PROTO files to be tested.""" # 1. Get Webots version (without revision) self.version = None with open(os.environ['WEBOTS_HOME'] + os.sep + 'resources' + os.sep + 'version.txt') as file: content = file.read() self.version = content.splitlines()[0].strip().split()[0] # 2. Get all the PROTO files self.files = [] for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']): dirNames[:] = [d for d in dirNames if d not in skippedDirectories] for fileName in fnmatch.filter(fileNames, '*.proto'): proto = os.path.join(rootPath, fileName) shouldIgnore = False for ignoredProto in ignoredProtos: path = os.environ['WEBOTS_HOME'] + os.sep + ignoredProto.replace('/', os.sep) if proto == path: shouldIgnore = True break if not shouldIgnore: self.files.append((proto, '#VRML_SIM %s utf8' % self.version)) # 3. Get all the world files for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']): dirNames[:] = [d for d in dirNames if d not in skippedDirectories] for fileName in fnmatch.filter(fileNames, '*.wbt'): world = os.path.join(rootPath, fileName) self.files.append((world, '#VRML_SIM %s utf8' % self.version)) # 4. Get all the .wbproj files for rootPath, dirNames, fileNames in os.walk(os.environ['WEBOTS_HOME']): dirNames[:] = [d for d in dirNames if d not in skippedDirectories] for fileName in fnmatch.filter(fileNames, '*.wbproj'): projFile = os.path.join(rootPath, fileName) self.files.append((projFile, 'Webots Project File version %s' % self.version)) def test_header_version(self): """Test that the PROTO and world files have the correct header.""" for currentFile in self.files: fileToTest = currentFile[0] with open(fileToTest) as file: content = file.read() if content == '': continue line = content.splitlines()[0].strip() self.assertTrue( line.startswith(currentFile[1]), msg='Wrong header in file: "%s"' % fileToTest ) if __name__ == '__main__': unittest.main()
tests/sources/test_header_version.py
3,603
Unit test of the PROTO and world headers. Get all the PROTO files to be tested. Test that the PROTO and world files have the correct header. Test header version. !/usr/bin/env python Copyright 1996-2019 Cyberbotics Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 1. Get Webots version (without revision) 2. Get all the PROTO files 3. Get all the world files 4. Get all the .wbproj files
866
en
0.789774
import sys, imp, atexit, os sys.path.append("/home/courses/cs3214/software/pexpect-dpty/"); import pexpect, shellio, signal, time, os, re, proc_check # Determine the path this file is in thisdir = os.path.dirname(os.path.realpath(__file__)) #Ensure the shell process is terminated def force_shell_termination(shell_process): c.close(force=True) # pulling in the regular expression and other definitions # this should be the eshoutput.py file of the hosting shell, see usage above definitions_scriptname = sys.argv[1] def_module = imp.load_source('', definitions_scriptname) # you can define logfile=open("log.txt", "w") in your eshoutput.py if you want logging! logfile = None if hasattr(def_module, 'logfile'): logfile = def_module.logfile #spawn an instance of the shell, note the -p flags c = pexpect.spawn(def_module.shell, drainpty=True, logfile=logfile, args=['-p', thisdir]) atexit.register(force_shell_termination, shell_process=c) # set timeout for all following 'expect*' calls to 5 seconds c.timeout = 5 ############################################################################# # # Actual Test assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (1)" c.sendline("systemInfo") assert c.expect('------------------------------------------------\r\n') == 0, "Shell did not print out expected values"; assert c.expect(def_module.prompt) == 0, "Shell did not print expected prompt (2)" shellio.success()
Systems/esh-spring-2015.git/src/plugins/systemInfo_test.py
1,458
Determine the path this file is inEnsure the shell process is terminated pulling in the regular expression and other definitions this should be the eshoutput.py file of the hosting shell, see usage above you can define logfile=open("log.txt", "w") in your eshoutput.py if you want logging!spawn an instance of the shell, note the -p flags set timeout for all following 'expect*' calls to 5 seconds Actual Test
409
en
0.792693
import os import numpy as np import pandas as pd from sklearn.datasets.samples_generator import make_swiss_roll import torch import torchvision from torchvision import transforms import glob import random import config as cfg import utils.metadata as meta from . import csv_loader from . import img_loader # Datasets # pytorch.org/docs/master/torchvision/datasets.html # https://github.com/bfortuner/pytorch-cheatsheet/blob/master/pytorch-cheatsheet.ipynb def get_iris_data(): fpath = "../data/iris.csv" url = "https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv" df = csv_loader.load_or_download_df(fpath, url) return df def get_sin_data(): rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) return X,y def get_housing_data(): # https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html fpath = "../data/housing.csv" url = "https://raw.githubusercontent.com/ggallo/boston-housing/master/housing.csv" df = csv_loader.load_or_download_df(fpath, url) return df def get_advertising_data(): fpath = "../data/advertising.csv" url = "http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv" df = csv_loader.load_or_download_df(fpath, url) df = df.drop(df.columns[0], axis=1) return df def get_swiss_roll_data(n_samples=1000): noise = 0.2 X, _ = make_swiss_roll(n_samples, noise) X = X.astype('float32')[:, [0, 2]] return X, _ def get_swiss_roll_loader(n_samples=1000): X, _ = get_swiss_roll_data(n_samples) dataset = torch.utils.data.dataset.TensorDataset( torch.FloatTensor(X), torch.FloatTensor(_)) loader = torch.utils.data.dataloader.DataLoader( dataset, batch_size=100, shuffle=True) return loader def get_mnist_loader(): MNIST_MEAN = np.array([0.1307,]) MNIST_STD = np.array([0.3081,]) normTransform = transforms.Normalize(MNIST_MEAN, MNIST_STD) trainTransform = transforms.Compose([ transforms.ToTensor(), normTransform ]) testTransform = transforms.Compose([ transforms.ToTensor(), normTransform ]) trainset = torchvision.datasets.MNIST(root='../data', train=True, download=True, transform=trainTransform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2) testset = torchvision.datasets.MNIST(root='../data', train=False, download=True, transform=testTransform) testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=2) return trainloader, testloader def get_cifar_loader(): # https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py CIFAR_MEAN = np.array([0.49139968, 0.48215827, 0.44653124]) CIFAR_STD = np.array([0.24703233, 0.24348505, 0.26158768]) normTransform = transforms.Normalize(CIFAR_MEAN, CIFAR_STD) trainTransform = transforms.Compose([ transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normTransform ]) testTransform = transforms.Compose([ transforms.ToTensor(), normTransform ]) trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=trainTransform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=testTransform) testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') return trainloader, testloader, classes def get_catsdogs_loader(imgs_dir): # Need to download Kaggle cats/dogs competition # And move ALL images into single directory classes = ['cat','dog'] class_to_idx, idx_to_class = meta.get_key_int_maps(classes) def get_targs_from_fpaths(fpaths): targs = [] for fpath in fpaths: classname = fpath.split('/')[-1].split('.')[0] # For one-hot sigmoid #targ = meta.onehot_encode_class( # class_to_idx, classname) targs.append(class_to_idx[classname]) return targs normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) trainTransform = transforms.Compose([ transforms.RandomSizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]) testTransform = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) fpaths = glob.glob(imgs_dir + '*.jpg') random.shuffle(fpaths) trn_fpaths = fpaths[:20000] val_fpaths = fpaths[20000:] trn_targs = get_targs_from_fpaths(trn_fpaths) val_targs = get_targs_from_fpaths(val_fpaths) img_reader = 'pil' trn_dataset = FileDataset( trn_fpaths, img_reader, trn_targs, trainTransform) val_dataset = FileDataset( val_fpaths, img_reader, val_targs, testTransform) trn_loader = torch.utils.data.DataLoader( trn_dataset, batch_size=64, shuffle=True, num_workers=4) val_loader = torch.utils.data.DataLoader( val_dataset, batch_size=64, shuffle=False, num_workers=2) return trn_loader, val_loader, classes loaders = { 'pil': img_loader.pil_loader, 'tns': img_loader.tensor_loader, 'npy': img_loader.numpy_loader, 'io': img_loader.io_loader } class FileDataset(torch.utils.data.Dataset): def __init__(self, fpaths, img_loader='pil', targets=None, transform=None, target_transform=None): self.fpaths = fpaths self.loader = self._get_loader(img_loader) self.targets = targets self.transform = transform self.target_transform = target_transform def _get_loader(self, loader_type): return loaders[loader_type] def _get_target(self, index): if self.targets is None: return 1 target = self.targets[index] if self.target_transform is not None: return self.target_transform(target) return int(target) def _get_input(self, index): img_path = self.fpaths[index] img = self.loader(img_path) if self.transform is not None: img = self.transform(img) return img def __getitem__(self, index): input_ = self._get_input(index) target = self._get_target(index) img_path = self.fpaths[index] return input_, target, img_path def __len__(self): return len(self.fpaths)
utils/datasets.py
7,293
Datasets pytorch.org/docs/master/torchvision/datasets.html https://github.com/bfortuner/pytorch-cheatsheet/blob/master/pytorch-cheatsheet.ipynb https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py Need to download Kaggle cats/dogs competition And move ALL images into single directory For one-hot sigmoidtarg = meta.onehot_encode_class( class_to_idx, classname)
466
en
0.522603
#!/usr/bin/env python import sys import subprocess try: import gtk except: print >> sys.stderr, "You need to install the python gtk bindings" sys.exit(1) # import vte try: import vte except: error = gtk.MessageDialog (None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK, 'You need to install python bindings for libvte') error.run() sys.exit (1) def on_key_press_event(widget, event): keyname = gtk.gdk.keyval_name(event.keyval) '''print "Key %s (%d) was pressed" % (keyname, event.keyval) v.feed_child(keyname, len(keyname)) v2.feed_child(keyname, len(keyname))''' for i in terms: i.emit("key-press-event", event) if (event.keyval == 65293): text.set_text("") nbterm = 3 terms = [] if __name__ == '__main__': w = gtk.Window() hbox = gtk.HBox() x = 0 y = 0 for i in range(0, len(sys.argv)): v = vte.Terminal () v.connect ("child-exited", lambda term: gtk.main_quit()) v.fork_command() window = gtk.Window() if (i > 0): print sys.argv[i] r=subprocess.Popen(["/bin/bash", "-i", "-c", sys.argv[i]], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) #v.feed_child(sys.argv[i], len(sys.argv[i])) #line=r.stdout.readline() #print line v.feed_child(sys.argv[i], len(sys.argv[i])) e = gtk.gdk.Event(gtk.gdk.KEY_PRESS) e.keyval = 65293 e.send_event = True window.set_title("Window %s" % (sys.argv[i])) else: window.set_title("Window %d" % (i+1)) terms.append(v) window.add(v) window.connect('delete-event', lambda window, event: gtk.main_quit()) window.move(x, y) window.set_default_size(200, 100) #window.set_title("Window %d" % (i+1)) window.show_all() if (i > 0): e.window = window.get_window() v.emit("key-press-event", e) x += 780 if (i-1 % 3 == 0): y += 450 x = 0 text = gtk.Entry() text.connect("key_press_event", on_key_press_event) w.set_default_size(200, 15) w.move(0, 0) hbox.pack_start(text, True, True, 0) w.add(hbox) w.connect('delete-event', lambda window, event: gtk.main_quit()) w.show_all() text.set_can_focus(True) text.grab_focus() gtk.main()
multipleterm.py
2,432
!/usr/bin/env python import vtev.feed_child(sys.argv[i], len(sys.argv[i]))line=r.stdout.readline()print linewindow.set_title("Window %d" % (i+1))
145
en
0.112522
__all__ = ['read_cif','cif_site_labels'] from ase.io import read from ase.spacegroup import spacegroup import sys import os import logging from math import * import numpy as np import pkg_resources import warnings warnings.filterwarnings("ignore") path = '.temp_files/' filepath = pkg_resources.resource_filename(__name__,path) ''' NOTE ABOUT CIF FILE FORMATS: CIFs must include '_symmetry_Int_Taables_number' to be read by ASE. If this is not included please edit your CIF file to include this information. ''' def get_atom_lines(alllines): order = [] for i,line in enumerate(alllines): if '_atom' in line: order.append(line) start = i+1 end = None for i,line in enumerate(alllines[start:]): if len(line.split()) == 0: end = start+i-1 break if not end: end = len(alllines)-1 new_order = [] for i,o in enumerate(order): if 'site_label' in o: new_order.append(i) if 'site_type_symbol' in o: new_order.append(i) if 'fract_x' in o: new_order.append(i) if 'fract_y' in o: new_order.append(i) if 'fract_z' in o: new_order.append(i) return start,end,new_order def fix_cif(cif): f = open(cif,"r") alllines = f.readlines() f.close() for i, line in enumerate(alllines): if 'IT_coordinate_system_code' in line: fields = line.split() alllines[i] = '_symmetry_space_group_setting {0} \n'.format(fields[-1]) if '_atom_site_type_symbol' in line and '_atom_site_label' in alllines[i+1]: alllines[i],alllines[i+1] = alllines[i+1],alllines[i] file_name = cif.rstrip('.cif') temp_file = '{0}/{1}_temp.cif'.format(filepath,file_name.split('/')[-1]) f = open(temp_file,"w") f.writelines(alllines) f.close() atoms = read(temp_file); os.remove(temp_file) return atoms, alllines def get_tsites(cif): from ase.geometry import get_distances tsites = [] tpos = [] z,alllines = fix_cif(cif) si = [atom.index for atom in z if atom.symbol!='O'] start,end,order = get_atom_lines(alllines) for line in alllines[start:end+1]: if 'Si' in line or 'T' in line: line = line.split() temp_label = line[order[0]] if not any(str.isdigit(c) for c in temp_label): temp_label = line[order[1]] if 'Si' in temp_label: temp_label = temp_label.replace('Si','T') tsites.append(temp_label) pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])] tpos.append([round(num,2) for num in pos]) tpos = np.array(tpos) pos = z[si].get_scaled_positions() tinds = [] tmults = [] t_class = [] for tp in tpos: for i,p in enumerate(pos): p = [round(num,2) for num in p] diff = abs(tp-p) if sum(diff) <= 0.03: tinds.append(si[i]) for i in range(1,len(tsites)): tmults.append(tinds[i]-tinds[i-1]) tmults.append(si[-1]-tinds[-1]+1) # # si = [atom.index for atom in z if atom.symbol=='Si'] # o = [atom.index for atom in z if atom.symbol=='O'] # si_pos = z[si].positions # cell = z.cell # distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1] # # for i in tinds: # orig_ind = si.index(i) # dists = sorted(distances[orig_ind]) # t_class.append([round(num,2) for num in dists]) # # # for i,d in enumerate(t_class): # for j,t in enumerate(distances): # dist = [round(num,2) for num in sorted(t)] # if np.array_equal(dist,d): # dist = [round(num,2) for num in sorted(t)] # d = np.array(d) # dist = np.array(dist) # diff = abs(d - dist) # if sum(diff) <= 0.1: # tmults[i]+=1 n = len(si) sn = sum(tmults) if n != sn: print('Something Went Wrong With T Sites') return tsites, tmults, tinds def get_osites(cif): from ase.geometry import get_distances osites = [] opos = [] z,alllines = fix_cif(cif) start,end,order = get_atom_lines(alllines) for line in alllines[start:end+1]: if 'O' in line: line = line.split() temp_label = line[order[0]] if not any(str.isdigit(c) for c in temp_label): temp_label = line[order[1]] osites.append(temp_label) pos = [float(line[order[2]]),float(line[order[3]]),float(line[order[4]])] opos.append([round(num,2) for num in pos]) opos = np.array(opos) pos = z.get_scaled_positions() oinds = [] omults = [] o_class = [] si = [atom.index for atom in z if atom.symbol=='Si'] o = [atom.index for atom in z if atom.symbol=='O'] o_pos = z[o].get_scaled_positions() for op in opos: for i,p in enumerate(o_pos): p = np.array([round(num,2) for num in p]) diff = abs(op-p) if sum(diff) <= 0.02: oinds.append(o[i]) for i in range(1,len(osites)): omults.append(oinds[i]-oinds[i-1]) omults.append(o[-1]-oinds[-1]+1) # all_pos = z.positions # o_pos = z[o].positions # si_pos = z[si].positions # cell = z.cell # distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1] # # for i in oinds: # orig_ind = o.index(i) # dists = sorted(distances[orig_ind]) # o_class.append([round(num,2) for num in dists]) # # for i,d in enumerate(o_class): # for j,t in enumerate(distances): # dist = [round(num,2) for num in sorted(t)] # d = np.array(d) # dist = np.array(dist) # diff = abs(d - dist) # if sum(diff) <= 0.05: # omults[i]+=1 n = len(o) sn = sum(omults) if n != sn: print('Something Went Wrong With O Sites') return osites, omults, oinds def read_cif(cif): atoms, alllines = fix_cif(cif) ts,tm,tinds = get_tsites(cif) os,om,oinds = get_osites(cif) return atoms,ts,tm,tinds,os,om,oinds def cif_site_labels(cif): atoms,ts,tm,tinds,os,om,oinds = read_cif(cif) labels = {} for i,t in enumerate(ts): for j in range(tm[i]): labels[tinds[i]+j] = t for i,o in enumerate(os): for j in range(om[i]): labels[oinds[i]+j] = o return labels ''' DEPRECRATED FUNCTIONS''' def float_with_error(x): """ some value in cif accompanies error like "1.234(5) """ if "?" in x: return 0 pos = x.find("(") if pos >= 0: x = x[:pos] return float(x) def get_mults(cif): # read the cif file F = open(cif,"r") alllines = F.readlines() F.close() # Parse out data from the cif file for i,line in enumerate(alllines): if '_cell_length_a' in line: fields = line.split() field = fields[-1] field = float_with_error(field) La = field if '_cell_length_b' in line: fields = line.split() field = fields[-1] field = float_with_error(field) Lb = field if '_cell_length_c' in line: fields = line.split() field = fields[-1] field = float_with_error(field) Lc = field if '_cell_angle_alpha' in line: fields = line.split() field = fields[-1] field = float_with_error(field) alpha = field if '_cell_angle_beta' in line: fields = line.split() field = fields[-1] field = float_with_error(field) beta = field if '_cell_angle_gamma' in line: fields = line.split() field = fields[-1] field = float_with_error(field) gamma = field if '_space_group_symop' in line or '_symmetry_equiv_pos' in line or '_space_group' in line: n = i lastline = len(alllines) loops = [] for i,line in enumerate(alllines): if 'loop' in line: loops.append(i) ops = [] for i in range(n+1,loops[1]): n+=1 line = alllines[i] if 'x' in line or 'X' in line: ops.append(line.replace("'",'')) for i in range(len(ops)): ops[i] = ops[i].replace("0/", "0./") # also for e.g. 10/9 ops[i] = ops[i].replace("1/", "1./") ops[i] = ops[i].replace("2/", "2./") ops[i] = ops[i].replace("3/", "3./") ops[i] = ops[i].replace("4/", "4./") ops[i] = ops[i].replace("5/", "5./") ops[i] = ops[i].replace("6/", "6./") ops[i] = ops[i].replace("7/", "7./") ops[i] = ops[i].replace("8/", "8./") ops[i] = ops[i].replace("9/", "9./") osites = [] tsites = [] atoms = [] for j in range(n,lastline): line = alllines[j] if '_' not in line: fields = line.split() if len(fields) >3: tmp = (fields[0],float(fields[2]),float(fields[3]),float(fields[4])) if 'O' in fields[0]: osites.append(fields[0]) if 'T' in fields[0]: tsites.append(fields[0]) atoms.append(tmp) for i in range(len(atoms)): (name,xn,yn,zn) = atoms[i] xn = (xn + 10.0) % 1.0 yn = (yn + 10.0) % 1.0 zn = (zn + 10.0) % 1.0 atoms[i] = (name,xn,yn,zn) # perfrom symmetry operations label_list = [] symbols = [] positions = [] for i in atoms: label_list.append(i[0]) eps = 0.01 imax = len(atoms) i=0 while (i<imax): label,x,y,z=atoms[i] for op in ops: op = op.replace("'",'') op = op.lower() xn,yn,zn = eval(op) xn = (xn + 10.0) % 1.0 yn = (yn + 10.0) % 1.0 zn = (zn + 10.0) % 1.0 new_atom = True for at in atoms: if (abs(at[1]-xn) < eps and abs(at[2]-yn) < eps and abs(at[3]-zn) < eps): new_atom = False if new_atom: p1 = np.array([at[1],at[2],at[3]]) p2 = np.array([xn,yn,zn]) diff = abs(p1-p2) diff = np.round(diff,2) count = np.count_nonzero(diff) if count ==1 and 1 in diff: new_atom = False if new_atom: atoms.append( (label,xn,yn,zn) ) label_list.append(label) i += 1 imax =len(atoms) #atoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma]) # count up the osits label_list = sorted(label_list) omults = [] for o in osites: count = label_list.count(o) omults.append(count) tmults = [] for t in tsites: count = label_list.count(t) tmults.append(count) return tsites, tmults, osites, omults def get_indices(cif): ''' This is a tool that will read a CIF file and return the unique T-sites, their multiplicities, and an example atom index. It also does the same for the unique O-sites in the framework. This tool only works on CIFs that are formatted the same way as the IZA Structure Database CIFs. ''' tsites, tmults, osites, omults = get_mults(cif) f = open(cif,"r") alllines = f.read() f.close() for i, line in enumerate(alllines): if 'IT_coordinate_system_code' in line: fields = line.split() alllines[i] = '_symmetry_space_group_setting {0}'.format(fields[-1]) atoms = read(cif) oinds = [atom.index for atom in atoms if atom.symbol=='O'] index = 0 first_os = [] for i,m in enumerate(omults): first_os.append(oinds[index]) index+=m tinds = [atom.index for atom in atoms if atom.symbol !='O'] index = 0 first_ts = [] for i,m, in enumerate(tmults): first_ts.append(tinds[index]) index+=m return tsites,tmults,first_ts, osites, omults, first_os
cif_tools.py
12,327
some value in cif accompanies error like "1.234(5) This is a tool that will read a CIF file and return the unique T-sites, their multiplicities, and an example atom index. It also does the same for the unique O-sites in the framework. This tool only works on CIFs that are formatted the same way as the IZA Structure Database CIFs. si = [atom.index for atom in z if atom.symbol=='Si'] o = [atom.index for atom in z if atom.symbol=='O'] si_pos = z[si].positions cell = z.cell distances = get_distances(si_pos,si_pos,cell=cell,pbc=[1,1,1])[1] for i in tinds: orig_ind = si.index(i) dists = sorted(distances[orig_ind]) t_class.append([round(num,2) for num in dists]) for i,d in enumerate(t_class): for j,t in enumerate(distances): dist = [round(num,2) for num in sorted(t)] if np.array_equal(dist,d): dist = [round(num,2) for num in sorted(t)] d = np.array(d) dist = np.array(dist) diff = abs(d - dist) if sum(diff) <= 0.1: tmults[i]+=1 all_pos = z.positions o_pos = z[o].positions si_pos = z[si].positions cell = z.cell distances = get_distances(o_pos,all_pos,cell=cell,pbc=[1,1,1])[1] for i in oinds: orig_ind = o.index(i) dists = sorted(distances[orig_ind]) o_class.append([round(num,2) for num in dists]) for i,d in enumerate(o_class): for j,t in enumerate(distances): dist = [round(num,2) for num in sorted(t)] d = np.array(d) dist = np.array(dist) diff = abs(d - dist) if sum(diff) <= 0.05: omults[i]+=1 read the cif file Parse out data from the cif file also for e.g. 10/9 perfrom symmetry operationsatoms2 = Atoms(symbols,scaled_positions=positions,cell = [La,Lb,Lc,alpha,beta,gamma]) count up the osits
1,789
en
0.609264
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API.""" import pkg_resources import warnings from google.oauth2 import service_account import google.api_core.gapic_v1.client_info import google.api_core.gapic_v1.config import google.api_core.gapic_v1.method import google.api_core.grpc_helpers import grpc from google.cloud.texttospeech_v1beta1.gapic import enums from google.cloud.texttospeech_v1beta1.gapic import text_to_speech_client_config from google.cloud.texttospeech_v1beta1.gapic.transports import ( text_to_speech_grpc_transport, ) from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2 from google.cloud.texttospeech_v1beta1.proto import cloud_tts_pb2_grpc _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( "google-cloud-texttospeech" ).version class TextToSpeechClient(object): """Service that implements Google Cloud Text-to-Speech API.""" SERVICE_ADDRESS = "texttospeech.googleapis.com:443" """The default address of the service.""" # The name of the interface for this client. This is the key used to # find the method configuration in the client_config dictionary. _INTERFACE_NAME = "google.cloud.texttospeech.v1beta1.TextToSpeech" @classmethod def from_service_account_file(cls, filename, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: TextToSpeechClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials return cls(*args, **kwargs) from_service_account_json = from_service_account_file def __init__( self, transport=None, channel=None, credentials=None, client_config=None, client_info=None, ): """Constructor. Args: transport (Union[~.TextToSpeechGrpcTransport, Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. """ # Raise deprecation warnings for things we want to go away. if client_config is not None: warnings.warn( "The `client_config` argument is deprecated.", PendingDeprecationWarning, stacklevel=2, ) else: client_config = text_to_speech_client_config.config if channel: warnings.warn( "The `channel` argument is deprecated; use " "`transport` instead.", PendingDeprecationWarning, stacklevel=2, ) # Instantiate the transport. # The transport is responsible for handling serialization and # deserialization and actually sending data to the service. if transport: if callable(transport): self.transport = transport( credentials=credentials, default_class=text_to_speech_grpc_transport.TextToSpeechGrpcTransport, ) else: if credentials: raise ValueError( "Received both a transport instance and " "credentials; these are mutually exclusive." ) self.transport = transport else: self.transport = text_to_speech_grpc_transport.TextToSpeechGrpcTransport( address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( gapic_version=_GAPIC_LIBRARY_VERSION ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC # from the client configuration. # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( client_config["interfaces"][self._INTERFACE_NAME] ) # Save a dictionary of cached API call functions. # These are the actual callables which invoke the proper # transport methods, wrapped with `wrap_method` to add retry, # timeout, and the like. self._inner_api_calls = {} # Service calls def list_voices( self, language_code=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Returns a list of ``Voice`` supported for synthesis. Example: >>> from google.cloud import texttospeech_v1beta1 >>> >>> client = texttospeech_v1beta1.TextToSpeechClient() >>> >>> response = client.list_voices() Args: language_code (str): Optional (but recommended) `BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag. If specified, the ListVoices call will only return voices that can be used to synthesize this language\_code. E.g. when specifying "en-NZ", you will get supported "en-*" voices; when specifying "no", you will get supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" will also get supported "cmn-*" voices; specifying "zh-hk" will also get supported "yue-\*" voices. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_voices" not in self._inner_api_calls: self._inner_api_calls[ "list_voices" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_voices, default_retry=self._method_configs["ListVoices"].retry, default_timeout=self._method_configs["ListVoices"].timeout, client_info=self._client_info, ) request = cloud_tts_pb2.ListVoicesRequest(language_code=language_code) return self._inner_api_calls["list_voices"]( request, retry=retry, timeout=timeout, metadata=metadata ) def synthesize_speech( self, input_, voice, audio_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Synthesizes speech synchronously: receive results after all text input has been processed. Example: >>> from google.cloud import texttospeech_v1beta1 >>> >>> client = texttospeech_v1beta1.TextToSpeechClient() >>> >>> # TODO: Initialize `input_`: >>> input_ = {} >>> >>> # TODO: Initialize `voice`: >>> voice = {} >>> >>> # TODO: Initialize `audio_config`: >>> audio_config = {} >>> >>> response = client.synthesize_speech(input_, voice, audio_config) Args: input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput` voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams` audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "synthesize_speech" not in self._inner_api_calls: self._inner_api_calls[ "synthesize_speech" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.synthesize_speech, default_retry=self._method_configs["SynthesizeSpeech"].retry, default_timeout=self._method_configs["SynthesizeSpeech"].timeout, client_info=self._client_info, ) request = cloud_tts_pb2.SynthesizeSpeechRequest( input=input_, voice=voice, audio_config=audio_config ) return self._inner_api_calls["synthesize_speech"]( request, retry=retry, timeout=timeout, metadata=metadata )
texttospeech/google/cloud/texttospeech_v1beta1/gapic/text_to_speech_client.py
13,590
Service that implements Google Cloud Text-to-Speech API. Constructor. Args: transport (Union[~.TextToSpeechGrpcTransport, Callable[[~.Credentials, type], ~.TextToSpeechGrpcTransport]): A transport instance, responsible for actually making the API calls. The default transport uses the gRPC protocol. This argument may also be a callable which returns a transport instance. Callables will be sent the credentials as the first argument and the default transport class as the second argument. channel (grpc.Channel): DEPRECATED. A ``Channel`` instance through which to make calls. This argument is mutually exclusive with ``credentials``; providing both will raise an exception. credentials (google.auth.credentials.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. This argument is mutually exclusive with providing a transport instance to ``transport``; doing so will raise an exception. client_config (dict): DEPRECATED. A dictionary of call options for each method. If not specified, the default configuration is used. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: TextToSpeechClient: The constructed client. Returns a list of ``Voice`` supported for synthesis. Example: >>> from google.cloud import texttospeech_v1beta1 >>> >>> client = texttospeech_v1beta1.TextToSpeechClient() >>> >>> response = client.list_voices() Args: language_code (str): Optional (but recommended) `BCP-47 <https://www.rfc-editor.org/rfc/bcp/bcp47.txt>`__ language tag. If specified, the ListVoices call will only return voices that can be used to synthesize this language\_code. E.g. when specifying "en-NZ", you will get supported "en-*" voices; when specifying "no", you will get supported "no-*" (Norwegian) and "nb-*" (Norwegian Bokmal) voices; specifying "zh" will also get supported "cmn-*" voices; specifying "zh-hk" will also get supported "yue-\*" voices. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.texttospeech_v1beta1.types.ListVoicesResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. Synthesizes speech synchronously: receive results after all text input has been processed. Example: >>> from google.cloud import texttospeech_v1beta1 >>> >>> client = texttospeech_v1beta1.TextToSpeechClient() >>> >>> # TODO: Initialize `input_`: >>> input_ = {} >>> >>> # TODO: Initialize `voice`: >>> voice = {} >>> >>> # TODO: Initialize `audio_config`: >>> audio_config = {} >>> >>> response = client.synthesize_speech(input_, voice, audio_config) Args: input_ (Union[dict, ~google.cloud.texttospeech_v1beta1.types.SynthesisInput]): Required. The Synthesizer requires either plain text or SSML as input. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.SynthesisInput` voice (Union[dict, ~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams]): Required. The desired voice of the synthesized audio. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.VoiceSelectionParams` audio_config (Union[dict, ~google.cloud.texttospeech_v1beta1.types.AudioConfig]): Required. The configuration of the synthesized audio. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.texttospeech_v1beta1.types.AudioConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.texttospeech_v1beta1.types.SynthesizeSpeechResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. Accesses the google.cloud.texttospeech.v1beta1 TextToSpeech API. -*- coding: utf-8 -*- Copyright 2018 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The name of the interface for this client. This is the key used to find the method configuration in the client_config dictionary. Raise deprecation warnings for things we want to go away. Instantiate the transport. The transport is responsible for handling serialization and deserialization and actually sending data to the service. Parse out the default settings for retry and timeout for each RPC from the client configuration. (Ordinarily, these are the defaults specified in the `*_config.py` file next to this one.) Save a dictionary of cached API call functions. These are the actual callables which invoke the proper transport methods, wrapped with `wrap_method` to add retry, timeout, and the like. Service calls Wrap the transport method to add retry and timeout logic. Wrap the transport method to add retry and timeout logic.
7,427
en
0.755708
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-03-18 04:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wildlifecompliance', '0146_auto_20190308_1626'), ] operations = [ migrations.AddField( model_name='returntype', name='return_type', field=models.CharField(choices=[('sheet', 'Sheet'), ('question', 'Question'), ('data', 'Data')], default='sheet', max_length=30, verbose_name='Type'), ), ]
wildlifecompliance/migrations/0147_returntype_return_type.py
581
-*- coding: utf-8 -*- Generated by Django 1.10.8 on 2019-03-18 04:04
68
en
0.56968
# Copyright 2012-2019 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import mparser from . import environment from . import coredata from . import dependencies from . import mlog from . import build from . import optinterpreter from . import compilers from .wrap import wrap, WrapMode from . import mesonlib from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep, unholder from .dependencies import ExternalProgram from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException from .depfile import DepFile from .interpreterbase import InterpreterBase from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs from .interpreterbase import ObjectHolder from .modules import ModuleReturnValue from .cmake import CMakeInterpreter from .backend.backends import TestProtocol from pathlib import Path, PurePath import os import shutil import uuid import re import shlex import subprocess import collections import functools import typing as T import importlib permitted_method_kwargs = { 'partial_dependency': {'compile_args', 'link_args', 'links', 'includes', 'sources'}, } def stringifyUserArguments(args): if isinstance(args, list): return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args]) elif isinstance(args, dict): return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()]) elif isinstance(args, int): return str(args) elif isinstance(args, str): return "'%s'" % args raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') class OverrideProgram(dependencies.ExternalProgram): pass class FeatureOptionHolder(InterpreterObject, ObjectHolder): def __init__(self, env, name, option): InterpreterObject.__init__(self) ObjectHolder.__init__(self, option) if option.is_auto(): self.held_object = env.coredata.builtins['auto_features'] self.name = name self.methods.update({'enabled': self.enabled_method, 'disabled': self.disabled_method, 'auto': self.auto_method, }) @noPosargs @permittedKwargs({}) def enabled_method(self, args, kwargs): return self.held_object.is_enabled() @noPosargs @permittedKwargs({}) def disabled_method(self, args, kwargs): return self.held_object.is_disabled() @noPosargs @permittedKwargs({}) def auto_method(self, args, kwargs): return self.held_object.is_auto() def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True): val = kwargs.get('required', default) disabled = False required = False feature = None if isinstance(val, FeatureOptionHolder): if not feature_check: feature_check = FeatureNew('User option "feature"', '0.47.0') feature_check.use(subproject) option = val.held_object feature = val.name if option.is_disabled(): disabled = True elif option.is_enabled(): required = True elif isinstance(val, bool): required = val else: raise InterpreterException('required keyword argument must be boolean or a feature option') # Keep boolean value in kwargs to simplify other places where this kwarg is # checked. kwargs['required'] = required return disabled, required, feature def extract_search_dirs(kwargs): search_dirs = mesonlib.stringlistify(kwargs.get('dirs', [])) search_dirs = [Path(d).expanduser() for d in search_dirs] for d in search_dirs: if mesonlib.is_windows() and d.root.startswith('\\'): # a Unix-path starting with `/` that is not absolute on Windows. # discard without failing for end-user ease of cross-platform directory arrays continue if not d.is_absolute(): raise InvalidCode('Search directory {} is not an absolute path.'.format(d)) return list(map(str, search_dirs)) class TryRunResultHolder(InterpreterObject): def __init__(self, res): super().__init__() self.res = res self.methods.update({'returncode': self.returncode_method, 'compiled': self.compiled_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.res.returncode @noPosargs @permittedKwargs({}) def compiled_method(self, args, kwargs): return self.res.compiled @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.res.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.res.stderr class RunProcess(InterpreterObject): def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True): super().__init__() if not isinstance(cmd, ExternalProgram): raise AssertionError('BUG: RunProcess must be passed an ExternalProgram') self.capture = capture pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check) self.returncode = pc.returncode self.methods.update({'returncode': self.returncode_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False): command_array = cmd.get_command() + args menv = {'MESON_SOURCE_ROOT': source_dir, 'MESON_BUILD_ROOT': build_dir, 'MESON_SUBDIR': subdir, 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]), } if in_builddir: cwd = os.path.join(build_dir, subdir) else: cwd = os.path.join(source_dir, subdir) child_env = os.environ.copy() child_env.update(menv) child_env = env.get_env(child_env) stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL mlog.debug('Running command:', ' '.join(command_array)) try: p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd) if self.capture: mlog.debug('--- stdout ---') mlog.debug(o) else: o = '' mlog.debug('--- stdout disabled ---') mlog.debug('--- stderr ---') mlog.debug(e) mlog.debug('') if check and p.returncode != 0: raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode)) return p, o, e except FileNotFoundError: raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array)) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.returncode @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.stderr class ConfigureFileHolder(InterpreterObject, ObjectHolder): def __init__(self, subdir, sourcename, targetname, configuration_data): InterpreterObject.__init__(self) obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data) ObjectHolder.__init__(self, obj) class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, initial_values=None): MutableInterpreterObject.__init__(self) ObjectHolder.__init__(self, build.EnvironmentVariables()) self.methods.update({'set': self.set_method, 'append': self.append_method, 'prepend': self.prepend_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif isinstance(initial_values, list): for e in initial_values: if '=' not in e: raise InterpreterException('Env var definition must be of type key=val.') (k, val) = e.split('=', 1) k = k.strip() val = val.strip() if ' ' in k: raise InterpreterException('Env var key must not have spaces in it.') self.set_method([k, val], {}) elif initial_values: raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values') def __repr__(self): repr_str = "<{0}: {1}>" return repr_str.format(self.__class__.__name__, self.held_object.envvars) def add_var(self, method, args, kwargs): if not isinstance(kwargs.get("separator", ""), str): raise InterpreterException("EnvironmentVariablesHolder methods 'separator'" " argument needs to be a string.") if len(args) < 2: raise InterpreterException("EnvironmentVariablesHolder methods require at least" "2 arguments, first is the name of the variable and" " following one are values") # Warn when someone tries to use append() or prepend() on an env var # which already has an operation set on it. People seem to think that # multiple append/prepend operations stack, but they don't. if method != self.held_object.set and self.held_object.has_name(args[0]): mlog.warning('Overriding previous value of environment variable {!r} with a new one' .format(args[0]), location=self.current_node) self.held_object.add_var(method, args[0], args[1:], kwargs) @stringArgs @permittedKwargs({'separator'}) def set_method(self, args, kwargs): self.add_var(self.held_object.set, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def append_method(self, args, kwargs): self.add_var(self.held_object.append, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def prepend_method(self, args, kwargs): self.add_var(self.held_object.prepend, args, kwargs) class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, pv, initial_values=None): MutableInterpreterObject.__init__(self) self.used = False # These objects become immutable after use in configure_file. ObjectHolder.__init__(self, build.ConfigurationData(), pv) self.methods.update({'set': self.set_method, 'set10': self.set10_method, 'set_quoted': self.set_quoted_method, 'has': self.has_method, 'get': self.get_method, 'get_unquoted': self.get_unquoted_method, 'merge_from': self.merge_from_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif initial_values: raise AssertionError('Unsupported ConfigurationDataHolder initial_values') def is_used(self): return self.used def mark_used(self): self.used = True def validate_args(self, args, kwargs): if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2: mlog.deprecation('Passing a list as the single argument to ' 'configuration_data.set is deprecated. This will ' 'become a hard error in the future.', location=self.current_node) args = args[0] if len(args) != 2: raise InterpreterException("Configuration set requires 2 arguments.") if self.used: raise InterpreterException("Can not set values on configuration object that has been used.") name, val = args if not isinstance(val, (int, str)): msg = 'Setting a configuration data value to {!r} is invalid, ' \ 'and will fail at configure_file(). If you are using it ' \ 'just to store some values, please use a dict instead.' mlog.deprecation(msg.format(val), location=self.current_node) desc = kwargs.get('description', None) if not isinstance(name, str): raise InterpreterException("First argument to set must be a string.") if desc is not None and not isinstance(desc, str): raise InterpreterException('Description must be a string.') return name, val, desc @noArgsFlattening def set_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) self.held_object.values[name] = (val, desc) def set_quoted_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if not isinstance(val, str): raise InterpreterException("Second argument to set_quoted must be a string.") escaped_val = '\\"'.join(val.split('"')) self.held_object.values[name] = ('"' + escaped_val + '"', desc) def set10_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if val: self.held_object.values[name] = (1, desc) else: self.held_object.values[name] = (0, desc) def has_method(self, args, kwargs): return args[0] in self.held_object.values @FeatureNew('configuration_data.get()', '0.38.0') @noArgsFlattening def get_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: return self.held_object.get(name)[0] if len(args) > 1: return args[1] raise InterpreterException('Entry %s not in configuration data.' % name) @FeatureNew('configuration_data.get_unquoted()', '0.44.0') def get_unquoted_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: val = self.held_object.get(name)[0] elif len(args) > 1: val = args[1] else: raise InterpreterException('Entry %s not in configuration data.' % name) if val[0] == '"' and val[-1] == '"': return val[1:-1] return val def get(self, name): return self.held_object.values[name] # (val, desc) def keys(self): return self.held_object.values.keys() def merge_from_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Merge_from takes one positional argument.') from_object = args[0] if not isinstance(from_object, ConfigurationDataHolder): raise InterpreterException('Merge_from argument must be a configuration data object.') from_object = from_object.held_object for k, v in from_object.values.items(): self.held_object.values[k] = v # Interpreter objects can not be pickled so we must have # these wrappers. class DependencyHolder(InterpreterObject, ObjectHolder): def __init__(self, dep, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, dep, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'version': self.version_method, 'name': self.name_method, 'get_pkgconfig_variable': self.pkgconfig_method, 'get_configtool_variable': self.configtool_method, 'get_variable': self.variable_method, 'partial_dependency': self.partial_dependency_method, 'include_type': self.include_type_method, 'as_system': self.as_system_method, }) def found(self): return self.found_method([], {}) @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): if self.held_object.type_name == 'internal': return True return self.held_object.found() @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.held_object.get_version() @noPosargs @permittedKwargs({}) def name_method(self, args, kwargs): return self.held_object.get_name() @permittedKwargs({'define_variable', 'default'}) def pkgconfig_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_pkgconfig_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_pkgconfig_variable(varname, kwargs) @FeatureNew('dep.get_configtool_variable', '0.44.0') @permittedKwargs({}) def configtool_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_configtool_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_configtool_variable(varname) @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) @FeatureNew('dep.get_variable', '0.51.0') @noPosargs @permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'internal', 'default_value', 'pkgconfig_define'}) @FeatureNewKwargs('dep.get_variable', '0.54.0', ['internal']) def variable_method(self, args, kwargs): return self.held_object.get_variable(**kwargs) @FeatureNew('dep.include_type', '0.52.0') @noPosargs @permittedKwargs({}) def include_type_method(self, args, kwargs): return self.held_object.get_include_type() @FeatureNew('dep.as_system', '0.52.0') @permittedKwargs({}) def as_system_method(self, args, kwargs): args = listify(args) new_is_system = 'system' if len(args) > 1: raise InterpreterException('as_system takes only one optional value') if len(args) == 1: new_is_system = args[0] new_dep = self.held_object.generate_system_dependency(new_is_system) return DependencyHolder(new_dep, self.subproject) class ExternalProgramHolder(InterpreterObject, ObjectHolder): def __init__(self, ep, subproject, backend=None): InterpreterObject.__init__(self) ObjectHolder.__init__(self, ep) self.subproject = subproject self.backend = backend self.methods.update({'found': self.found_method, 'path': self.path_method, 'full_path': self.full_path_method}) self.cached_version = None @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() @noPosargs @permittedKwargs({}) def path_method(self, args, kwargs): mlog.deprecation('path() method is deprecated and replaced by full_path()') return self._full_path() @noPosargs @permittedKwargs({}) @FeatureNew('ExternalProgram.full_path', '0.55.0') def full_path_method(self, args, kwargs): return self._full_path() def _full_path(self): exe = self.held_object if isinstance(exe, build.Executable): return self.backend.get_target_filename_abs(exe) return exe.get_path() def found(self): return isinstance(self.held_object, build.Executable) or self.held_object.found() def get_command(self): return self.held_object.get_command() def get_name(self): exe = self.held_object if isinstance(exe, build.Executable): return exe.name return exe.get_name() def get_version(self, interpreter): if isinstance(self.held_object, build.Executable): return self.held_object.project_version if not self.cached_version: raw_cmd = self.get_command() + ['--version'] cmd = [self, '--version'] res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True) if res.returncode != 0: m = 'Running {!r} failed' raise InterpreterException(m.format(raw_cmd)) output = res.stdout.strip() if not output: output = res.stderr.strip() match = re.search(r'([0-9][0-9\.]+)', output) if not match: m = 'Could not find a version number in output of {!r}' raise InterpreterException(m.format(raw_cmd)) self.cached_version = match.group(1) return self.cached_version class ExternalLibraryHolder(InterpreterObject, ObjectHolder): def __init__(self, el, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, el, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'partial_dependency': self.partial_dependency_method, }) def found(self): return self.held_object.found() @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def get_name(self): return self.held_object.name def get_compile_args(self): return self.held_object.get_compile_args() def get_link_args(self): return self.held_object.get_link_args() def get_exe_args(self): return self.held_object.get_exe_args() @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) class GeneratorHolder(InterpreterObject, ObjectHolder): @FeatureNewKwargs('generator', '0.43.0', ['capture']) def __init__(self, interp, args, kwargs): self.interpreter = interp InterpreterObject.__init__(self) ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject) self.methods.update({'process': self.process_method}) @FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from']) @permittedKwargs({'extra_args', 'preserve_path_from'}) def process_method(self, args, kwargs): extras = mesonlib.stringlistify(kwargs.get('extra_args', [])) if 'preserve_path_from' in kwargs: preserve_path_from = kwargs['preserve_path_from'] if not isinstance(preserve_path_from, str): raise InvalidArguments('Preserve_path_from must be a string.') preserve_path_from = os.path.normpath(preserve_path_from) if not os.path.isabs(preserve_path_from): # This is a bit of a hack. Fix properly before merging. raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.') else: preserve_path_from = None gl = self.held_object.process_files('Generator', args, self.interpreter, preserve_path_from, extra_args=extras) return GeneratedListHolder(gl) class GeneratedListHolder(InterpreterObject, ObjectHolder): def __init__(self, arg1, extra_args=None): InterpreterObject.__init__(self) if isinstance(arg1, GeneratorHolder): ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else [])) else: ObjectHolder.__init__(self, arg1) def __repr__(self): r = '<{}: {!r}>' return r.format(self.__class__.__name__, self.held_object.get_outputs()) def add_file(self, a): self.held_object.add_file(a) # A machine that's statically known from the cross file class MachineHolder(InterpreterObject, ObjectHolder): def __init__(self, machine_info): InterpreterObject.__init__(self) ObjectHolder.__init__(self, machine_info) self.methods.update({'system': self.system_method, 'cpu': self.cpu_method, 'cpu_family': self.cpu_family_method, 'endian': self.endian_method, }) @noPosargs @permittedKwargs({}) def cpu_family_method(self, args, kwargs): return self.held_object.cpu_family @noPosargs @permittedKwargs({}) def cpu_method(self, args, kwargs): return self.held_object.cpu @noPosargs @permittedKwargs({}) def system_method(self, args, kwargs): return self.held_object.system @noPosargs @permittedKwargs({}) def endian_method(self, args, kwargs): return self.held_object.endian class IncludeDirsHolder(InterpreterObject, ObjectHolder): def __init__(self, idobj): InterpreterObject.__init__(self) ObjectHolder.__init__(self, idobj) class Headers(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.install_subdir = kwargs.get('subdir', '') if os.path.isabs(self.install_subdir): mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.') self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None: if not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def set_install_subdir(self, subdir): self.install_subdir = subdir def get_install_subdir(self): return self.install_subdir def get_sources(self): return self.sources def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode class DataHolder(InterpreterObject, ObjectHolder): def __init__(self, data): InterpreterObject.__init__(self) ObjectHolder.__init__(self, data) def get_source_subdir(self): return self.held_object.source_subdir def get_sources(self): return self.held_object.sources def get_install_dir(self): return self.held_object.install_dir class InstallDir(InterpreterObject): def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory): InterpreterObject.__init__(self) self.source_subdir = src_subdir self.installable_subdir = inst_subdir self.install_dir = install_dir self.install_mode = install_mode self.exclude = exclude self.strip_directory = strip_directory class Man(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.validate_sources() self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def validate_sources(self): for s in self.sources: try: num = int(s.split('.')[-1]) except (IndexError, ValueError): num = 0 if num < 1 or num > 8: raise InvalidArguments('Man file must have a file extension of a number between 1 and 8') def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode def get_sources(self): return self.sources class GeneratedObjectsHolder(InterpreterObject, ObjectHolder): def __init__(self, held_object): InterpreterObject.__init__(self) ObjectHolder.__init__(self, held_object) class TargetHolder(InterpreterObject, ObjectHolder): def __init__(self, target, interp): InterpreterObject.__init__(self) ObjectHolder.__init__(self, target, interp.subproject) self.interpreter = interp class BuildTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'extract_objects': self.extract_objects_method, 'extract_all_objects': self.extract_all_objects_method, 'name': self.name_method, 'get_id': self.get_id_method, 'outdir': self.outdir_method, 'full_path': self.full_path_method, 'private_dir_include': self.private_dir_include_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.filename) def is_cross(self): return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine) @noPosargs @permittedKwargs({}) def private_dir_include_method(self, args, kwargs): return IncludeDirsHolder(build.IncludeDirs('', [], False, [self.interpreter.backend.get_target_private_dir(self.held_object)])) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) @noPosargs @permittedKwargs({}) def outdir_method(self, args, kwargs): return self.interpreter.backend.get_target_dir(self.held_object) @permittedKwargs({}) def extract_objects_method(self, args, kwargs): gobjs = self.held_object.extract_objects(args) return GeneratedObjectsHolder(gobjs) @FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive']) @noPosargs @permittedKwargs({'recursive'}) def extract_all_objects_method(self, args, kwargs): recursive = kwargs.get('recursive', False) gobjs = self.held_object.extract_all_objects(recursive) if gobjs.objlist and 'recursive' not in kwargs: mlog.warning('extract_all_objects called without setting recursive ' 'keyword argument. Meson currently defaults to ' 'non-recursive to maintain backward compatibility but ' 'the default will be changed in the future.', location=self.current_node) return GeneratedObjectsHolder(gobjs) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.held_object.get_id() @FeatureNew('name', '0.54.0') @noPosargs @permittedKwargs({}) def name_method(self, args, kwargs): return self.held_object.name class ExecutableHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class StaticLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class SharedLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) # Set to True only when called from self.func_shared_lib(). target.shared_library_only = False class BothLibrariesHolder(BuildTargetHolder): def __init__(self, shared_holder, static_holder, interp): # FIXME: This build target always represents the shared library, but # that should be configurable. super().__init__(shared_holder.held_object, interp) self.shared_holder = shared_holder self.static_holder = static_holder self.methods.update({'get_shared_lib': self.get_shared_lib_method, 'get_static_lib': self.get_static_lib_method, }) def __repr__(self): r = '<{} {}: {}, {}: {}>' h1 = self.shared_holder.held_object h2 = self.static_holder.held_object return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename) @noPosargs @permittedKwargs({}) def get_shared_lib_method(self, args, kwargs): return self.shared_holder @noPosargs @permittedKwargs({}) def get_static_lib_method(self, args, kwargs): return self.static_holder class SharedModuleHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class JarHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class CustomTargetIndexHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'full_path': self.full_path_method, }) @FeatureNew('custom_target[i].full_path', '0.54.0') @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) class CustomTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'full_path': self.full_path_method, 'to_list': self.to_list_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) @FeatureNew('custom_target.to_list', '0.54.0') @noPosargs @permittedKwargs({}) def to_list_method(self, args, kwargs): result = [] for i in self.held_object: result.append(CustomTargetIndexHolder(i, self.interpreter)) return result def __getitem__(self, index): return CustomTargetIndexHolder(self.held_object[index], self.interpreter) def __setitem__(self, index, value): # lgtm[py/unexpected-raise-in-special-method] raise InterpreterException('Cannot set a member of a CustomTarget') def __delitem__(self, index): # lgtm[py/unexpected-raise-in-special-method] raise InterpreterException('Cannot delete a member of a CustomTarget') def outdir_include(self): return IncludeDirsHolder(build.IncludeDirs('', [], False, [os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))])) class RunTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) class Test(InterpreterObject): def __init__(self, name: str, project: str, suite: T.List[str], exe: build.Executable, depends: T.List[T.Union[build.CustomTarget, build.BuildTarget]], is_parallel: bool, cmd_args: T.List[str], env: build.EnvironmentVariables, should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str, priority: int): InterpreterObject.__init__(self) self.name = name self.suite = suite self.project_name = project self.exe = exe self.depends = depends self.is_parallel = is_parallel self.cmd_args = cmd_args self.env = env self.should_fail = should_fail self.timeout = timeout self.workdir = workdir self.protocol = TestProtocol.from_str(protocol) self.priority = priority def get_exe(self): return self.exe def get_name(self): return self.name class SubprojectHolder(InterpreterObject, ObjectHolder): def __init__(self, subinterpreter, subproject_dir, name, warnings=0, disabled_feature=None, exception=None): InterpreterObject.__init__(self) ObjectHolder.__init__(self, subinterpreter) self.name = name self.warnings = warnings self.disabled_feature = disabled_feature self.exception = exception self.subproject_dir = subproject_dir self.methods.update({'get_variable': self.get_variable_method, 'found': self.found_method, }) @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def found(self): return self.held_object is not None @permittedKwargs({}) @noArgsFlattening def get_variable_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get_variable takes one or two arguments.') if not self.found(): raise InterpreterException('Subproject "%s/%s" disabled can\'t get_variable on it.' % ( self.subproject_dir, self.name)) varname = args[0] if not isinstance(varname, str): raise InterpreterException('Get_variable first argument must be a string.') try: return self.held_object.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InvalidArguments('Requested variable "{0}" not found.'.format(varname)) header_permitted_kwargs = set([ 'required', 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', ]) find_library_permitted_kwargs = set([ 'has_headers', 'required', 'dirs', 'static', ]) find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs]) class CompilerHolder(InterpreterObject): def __init__(self, compiler, env, subproject): InterpreterObject.__init__(self) self.compiler = compiler self.environment = env self.subproject = subproject self.methods.update({'compiles': self.compiles_method, 'links': self.links_method, 'get_id': self.get_id_method, 'get_linker_id': self.get_linker_id_method, 'compute_int': self.compute_int_method, 'sizeof': self.sizeof_method, 'get_define': self.get_define_method, 'check_header': self.check_header_method, 'has_header': self.has_header_method, 'has_header_symbol': self.has_header_symbol_method, 'run': self.run_method, 'has_function': self.has_function_method, 'has_member': self.has_member_method, 'has_members': self.has_members_method, 'has_type': self.has_type_method, 'alignment': self.alignment_method, 'version': self.version_method, 'cmd_array': self.cmd_array_method, 'find_library': self.find_library_method, 'has_argument': self.has_argument_method, 'has_function_attribute': self.has_func_attribute_method, 'get_supported_function_attributes': self.get_supported_function_attributes_method, 'has_multi_arguments': self.has_multi_arguments_method, 'get_supported_arguments': self.get_supported_arguments_method, 'first_supported_argument': self.first_supported_argument_method, 'has_link_argument': self.has_link_argument_method, 'has_multi_link_arguments': self.has_multi_link_arguments_method, 'get_supported_link_arguments': self.get_supported_link_arguments_method, 'first_supported_link_argument': self.first_supported_link_argument_method, 'unittest_args': self.unittest_args_method, 'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method, 'get_argument_syntax': self.get_argument_syntax_method, }) def _dep_msg(self, deps, endl): msg_single = 'with dependency {}' msg_many = 'with dependencies {}' if not deps: return endl if endl is None: endl = '' tpl = msg_many if len(deps) > 1 else msg_single names = [] for d in deps: if isinstance(d, dependencies.ExternalLibrary): name = '-l' + d.name else: name = d.name names.append(name) return tpl.format(', '.join(names)) + endl @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.compiler.version @noPosargs @permittedKwargs({}) def cmd_array_method(self, args, kwargs): return self.compiler.exelist def determine_args(self, kwargs, mode='link'): nobuiltins = kwargs.get('no_builtin_args', False) if not isinstance(nobuiltins, bool): raise InterpreterException('Type of no_builtin_args not a boolean.') args = [] incdirs = extract_as_list(kwargs, 'include_directories') for i in incdirs: if not isinstance(i, IncludeDirsHolder): raise InterpreterException('Include directories argument must be an include_directories object.') for idir in i.held_object.get_incdirs(): idir = os.path.join(self.environment.get_source_dir(), i.held_object.get_curdir(), idir) args += self.compiler.get_include_args(idir, False) if not nobuiltins: for_machine = Interpreter.machine_from_native_kwarg(kwargs) opts = self.environment.coredata.compiler_options[for_machine][self.compiler.language] args += self.compiler.get_option_compile_args(opts) if mode == 'link': args += self.compiler.get_option_link_args(opts) args += mesonlib.stringlistify(kwargs.get('args', [])) return args def determine_dependencies(self, kwargs, endl=':'): deps = kwargs.get('dependencies', None) if deps is not None: deps = listify(deps) final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if isinstance(d, InternalDependency) or not isinstance(d, Dependency): raise InterpreterException('Dependencies must be external dependencies') final_deps.append(d) deps = final_deps return deps, self._dep_msg(deps, endl) @permittedKwargs({ 'prefix', 'args', 'dependencies', }) def alignment_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Alignment method takes exactly one positional argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of alignment must be a string.') extra_args = mesonlib.stringlistify(kwargs.get('args', [])) deps, msg = self.determine_dependencies(kwargs) result = self.compiler.alignment(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def run_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Run method takes exactly one positional argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result = self.compiler.run(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if not result.compiled: h = mlog.red('DID NOT COMPILE') elif result.returncode == 0: h = mlog.green('YES') else: h = mlog.red('NO (%d)' % result.returncode) mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h) return TryRunResultHolder(result) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.compiler.get_id() @noPosargs @permittedKwargs({}) @FeatureNew('compiler.get_linker_id', '0.53.0') def get_linker_id_method(self, args, kwargs): return self.compiler.get_linker_id() @noPosargs @permittedKwargs({}) def symbols_have_underscore_prefix_method(self, args, kwargs): ''' Check if the compiler prefixes _ (underscore) to global C symbols See: https://en.wikipedia.org/wiki/Name_mangling#C ''' return self.compiler.symbols_have_underscore_prefix(self.environment) @noPosargs @permittedKwargs({}) def unittest_args_method(self, args, kwargs): ''' This function is deprecated and should not be used. It can be removed in a future version of Meson. ''' if not hasattr(self.compiler, 'get_feature_args'): raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language())) build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir()) return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src) @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_member_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Has_member takes exactly two arguments.') check_stringlist(args) typename, membername = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_member must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, [membername], prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking whether type', mlog.bold(typename, True), 'has member', mlog.bold(membername, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_members_method(self, args, kwargs): if len(args) < 2: raise InterpreterException('Has_members needs at least two arguments.') check_stringlist(args) typename, *membernames = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_members must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, membernames, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames])) mlog.log('Checking whether type', mlog.bold(typename, True), 'has members', members, msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_function_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_function takes exactly one argument.') check_stringlist(args) funcname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_function must be a string.') extra_args = self.determine_args(kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_function(funcname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_type_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_type takes exactly one argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_type must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_type(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached) return had @FeatureNew('compiler.compute_int', '0.40.0') @permittedKwargs({ 'prefix', 'low', 'high', 'guess', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compute_int_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Compute_int takes exactly one argument.') check_stringlist(args) expression = args[0] prefix = kwargs.get('prefix', '') low = kwargs.get('low', None) high = kwargs.get('high', None) guess = kwargs.get('guess', None) if not isinstance(prefix, str): raise InterpreterException('Prefix argument of compute_int must be a string.') if low is not None and not isinstance(low, int): raise InterpreterException('Low argument of compute_int must be an int.') if high is not None and not isinstance(high, int): raise InterpreterException('High argument of compute_int must be an int.') if guess is not None and not isinstance(guess, int): raise InterpreterException('Guess argument of compute_int must be an int.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) res = self.compiler.compute_int(expression, low, high, guess, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Computing int of', mlog.bold(expression, True), msg, res) return res @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def sizeof_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Sizeof takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of sizeof must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) esize = self.compiler.sizeof(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for size of', mlog.bold(element, True), msg, esize) return esize @FeatureNew('compiler.get_define', '0.40.0') @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def get_define_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_define() takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of get_define() must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) value, cached = self.compiler.get_define(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached) return value @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compiles_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('compiles method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.compiles(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def links_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('links method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.links(code, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached) return result @FeatureNew('compiler.check_header', '0.47.0') @FeatureNewKwargs('compiler.check_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def check_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('check_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.check_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('has_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Has header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_symbol_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('has_header_symbol method takes exactly two arguments.') check_stringlist(args) hname, symbol = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header_symbol must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment, extra_args=extra_args, dependencies=deps) if required and not haz: raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached) return haz def notfound_library(self, libname): lib = dependencies.ExternalLibrary(libname, None, self.environment, self.compiler.language, silent=True) return ExternalLibraryHolder(lib, self.subproject) @FeatureNewKwargs('compiler.find_library', '0.51.0', ['static']) @FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers']) @FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(find_library_permitted_kwargs) def find_library_method(self, args, kwargs): # TODO add dependencies support? if len(args) != 1: raise InterpreterException('find_library method takes one argument.') libname = args[0] if not isinstance(libname, str): raise InterpreterException('Library name not a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_library(libname) has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')} has_header_kwargs['required'] = required headers = mesonlib.stringlistify(kwargs.get('has_headers', [])) for h in headers: if not self.has_header_method([h], has_header_kwargs): return self.notfound_library(libname) search_dirs = extract_search_dirs(kwargs) libtype = mesonlib.LibType.PREFER_SHARED if 'static' in kwargs: if not isinstance(kwargs['static'], bool): raise InterpreterException('static must be a boolean') libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype) if required and not linkargs: raise InterpreterException( '{} library {!r} not found'.format(self.compiler.get_display_language(), libname)) lib = dependencies.ExternalLibrary(libname, linkargs, self.environment, self.compiler.language) return ExternalLibraryHolder(lib, self.subproject) @permittedKwargs({}) def has_argument_method(self, args: T.Sequence[str], kwargs) -> bool: args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_argument takes exactly one argument.') return self.has_multi_arguments_method(args, kwargs) @permittedKwargs({}) def has_multi_arguments_method(self, args: T.Sequence[str], kwargs: dict): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_arguments(args, self.environment) if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log( 'Compiler for {} supports arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_arguments', '0.43.0') @permittedKwargs({}) def get_supported_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @permittedKwargs({}) def first_supported_argument_method(self, args: T.Sequence[str], kwargs: dict) -> T.List[str]: for arg in mesonlib.stringlistify(args): if self.has_argument_method(arg, kwargs): mlog.log('First supported argument:', mlog.bold(arg)) return [arg] mlog.log('First supported argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_link_argument', '0.46.0') @permittedKwargs({}) def has_link_argument_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_link_argument takes exactly one argument.') return self.has_multi_link_arguments_method(args, kwargs) @FeatureNew('compiler.has_multi_link_argument', '0.46.0') @permittedKwargs({}) def has_multi_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_link_arguments(args, self.environment) cached = mlog.blue('(cached)') if cached else '' if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log( 'Compiler for {} supports link arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0') @permittedKwargs({}) def get_supported_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_link_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @FeatureNew('compiler.first_supported_link_argument_method', '0.46.0') @permittedKwargs({}) def first_supported_link_argument_method(self, args, kwargs): for i in mesonlib.stringlistify(args): if self.has_link_argument_method(i, kwargs): mlog.log('First supported link argument:', mlog.bold(i)) return [i] mlog.log('First supported link argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_function_attribute', '0.48.0') @permittedKwargs({}) def has_func_attribute_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_func_attribute takes exactly one argument.') result, cached = self.compiler.has_func_attribute(args[0], self.environment) cached = mlog.blue('(cached)') if cached else '' h = mlog.green('YES') if result else mlog.red('NO') mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached) return result @FeatureNew('compiler.get_supported_function_attributes', '0.48.0') @permittedKwargs({}) def get_supported_function_attributes_method(self, args, kwargs): args = mesonlib.stringlistify(args) return [a for a in args if self.has_func_attribute_method(a, kwargs)] @FeatureNew('compiler.get_argument_syntax_method', '0.49.0') @noPosargs @noKwargs def get_argument_syntax_method(self, args, kwargs): return self.compiler.get_argument_syntax() ModuleState = collections.namedtuple('ModuleState', [ 'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment', 'project_name', 'project_version', 'backend', 'targets', 'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine', 'host_machine', 'target_machine', 'current_node']) class ModuleHolder(InterpreterObject, ObjectHolder): def __init__(self, modname, module, interpreter): InterpreterObject.__init__(self) ObjectHolder.__init__(self, module) self.modname = modname self.interpreter = interpreter def method_call(self, method_name, args, kwargs): try: fn = getattr(self.held_object, method_name) except AttributeError: raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name)) if method_name.startswith('_'): raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname)) if not getattr(fn, 'no-args-flattening', False): args = flatten(args) # This is not 100% reliable but we can't use hash() # because the Build object contains dicts and lists. num_targets = len(self.interpreter.build.targets) state = ModuleState( source_root = self.interpreter.environment.get_source_dir(), build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(), self.interpreter.environment.get_build_dir()), subproject=self.interpreter.subproject, subdir=self.interpreter.subdir, current_lineno=self.interpreter.current_lineno, environment=self.interpreter.environment, project_name=self.interpreter.build.project_name, project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname], # The backend object is under-used right now, but we will need it: # https://github.com/mesonbuild/meson/issues/1419 backend=self.interpreter.backend, targets=self.interpreter.build.targets, data=self.interpreter.build.data, headers=self.interpreter.build.get_headers(), man=self.interpreter.build.get_man(), #global_args_for_build = self.interpreter.build.global_args.build, global_args = self.interpreter.build.global_args.host, #project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}), project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}), build_machine=self.interpreter.builtin['build_machine'].held_object, host_machine=self.interpreter.builtin['host_machine'].held_object, target_machine=self.interpreter.builtin['target_machine'].held_object, current_node=self.current_node ) # Many modules do for example self.interpreter.find_program_impl(), # so we have to ensure they use the current interpreter and not the one # that first imported that module, otherwise it will use outdated # overrides. self.held_object.interpreter = self.interpreter if self.held_object.is_snippet(method_name): value = fn(self.interpreter, state, args, kwargs) return self.interpreter.holderify(value) else: value = fn(state, args, kwargs) if num_targets != len(self.interpreter.build.targets): raise InterpreterException('Extension module altered internal state illegally.') return self.interpreter.module_method_callback(value) class Summary: def __init__(self, project_name, project_version): self.project_name = project_name self.project_version = project_version self.sections = collections.defaultdict(dict) self.max_key_len = 0 def add_section(self, section, values, kwargs): bool_yn = kwargs.get('bool_yn', False) if not isinstance(bool_yn, bool): raise InterpreterException('bool_yn keyword argument must be boolean') list_sep = kwargs.get('list_sep') if list_sep is not None and not isinstance(list_sep, str): raise InterpreterException('list_sep keyword argument must be string') for k, v in values.items(): if k in self.sections[section]: raise InterpreterException('Summary section {!r} already have key {!r}'.format(section, k)) formatted_values = [] for i in listify(v): if not isinstance(i, (str, int)): m = 'Summary value in section {!r}, key {!r}, must be string, integer or boolean' raise InterpreterException(m.format(section, k)) if bool_yn and isinstance(i, bool): formatted_values.append(mlog.green('YES') if i else mlog.red('NO')) else: formatted_values.append(i) self.sections[section][k] = (formatted_values, list_sep) self.max_key_len = max(self.max_key_len, len(k)) def dump(self): mlog.log(self.project_name, mlog.normal_cyan(self.project_version)) for section, values in self.sections.items(): mlog.log('') # newline if section: mlog.log(' ', mlog.bold(section)) for k, v in values.items(): v, list_sep = v indent = self.max_key_len - len(k) + 3 end = ' ' if v else '' mlog.log(' ' * indent, k + ':', end=end) if list_sep is None: indent = self.max_key_len + 6 list_sep = '\n' + ' ' * indent mlog.log(*v, sep=list_sep) mlog.log('') # newline class MesonMain(InterpreterObject): def __init__(self, build, interpreter): InterpreterObject.__init__(self) self.build = build self.interpreter = interpreter self._found_source_scripts = {} self.methods.update({'get_compiler': self.get_compiler_method, 'is_cross_build': self.is_cross_build_method, 'has_exe_wrapper': self.has_exe_wrapper_method, 'is_unity': self.is_unity_method, 'is_subproject': self.is_subproject_method, 'current_source_dir': self.current_source_dir_method, 'current_build_dir': self.current_build_dir_method, 'source_root': self.source_root_method, 'build_root': self.build_root_method, 'add_install_script': self.add_install_script_method, 'add_postconf_script': self.add_postconf_script_method, 'add_dist_script': self.add_dist_script_method, 'install_dependency_manifest': self.install_dependency_manifest_method, 'override_dependency': self.override_dependency_method, 'override_find_program': self.override_find_program_method, 'project_version': self.project_version_method, 'project_license': self.project_license_method, 'version': self.version_method, 'project_name': self.project_name_method, 'get_cross_property': self.get_cross_property_method, 'get_external_property': self.get_external_property_method, 'backend': self.backend_method, }) def _find_source_script(self, prog: T.Union[str, ExecutableHolder], args): if isinstance(prog, ExecutableHolder): prog_path = self.interpreter.backend.get_target_filename(prog.held_object) return build.RunScript([prog_path], args) elif isinstance(prog, ExternalProgramHolder): return build.RunScript(prog.get_command(), args) # Prefer scripts in the current source directory search_dir = os.path.join(self.interpreter.environment.source_dir, self.interpreter.subdir) key = (prog, search_dir) if key in self._found_source_scripts: found = self._found_source_scripts[key] else: found = dependencies.ExternalProgram(prog, search_dir=search_dir) if found.found(): self._found_source_scripts[key] = found else: m = 'Script or command {!r} not found or not executable' raise InterpreterException(m.format(prog)) return build.RunScript(found.get_command(), args) def _process_script_args( self, name: str, args: T.List[T.Union[ str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder, ExternalProgramHolder, ExecutableHolder, ]], allow_built: bool = False) -> T.List[str]: script_args = [] # T.List[str] new = False for a in args: a = unholder(a) if isinstance(a, str): script_args.append(a) elif isinstance(a, mesonlib.File): new = True script_args.append(a.rel_to_builddir(self.interpreter.environment.source_dir)) elif isinstance(a, (build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)): if not allow_built: raise InterpreterException('Arguments to {} cannot be built'.format(name)) new = True script_args.extend([os.path.join(a.get_subdir(), o) for o in a.get_outputs()]) # This feels really hacky, but I'm not sure how else to fix # this without completely rewriting install script handling. # This is complicated by the fact that the install target # depends on all. if isinstance(a, build.CustomTargetIndex): a.target.build_by_default = True else: a.build_by_default = True elif isinstance(a, build.ConfigureFile): new = True script_args.append(os.path.join(a.subdir, a.targetname)) elif isinstance(a, dependencies.ExternalProgram): script_args.extend(a.command) new = True else: raise InterpreterException( 'Arguments to {} must be strings, Files, CustomTargets, ' 'Indexes of CustomTargets, or ConfigureFiles'.format(name)) if new: FeatureNew('Calling "{}" with File, CustomTaget, Index of CustomTarget, ConfigureFile, Executable, or ExternalProgram'.format(name), '0.55.0').use( self.interpreter.subproject) return script_args @permittedKwargs(set()) def add_install_script_method(self, args: 'T.Tuple[T.Union[str, ExecutableHolder], T.Union[str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder], ...]', kwargs): if len(args) < 1: raise InterpreterException('add_install_script takes one or more arguments') script_args = self._process_script_args('add_install_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.install_scripts.append(script) @permittedKwargs(set()) def add_postconf_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_postconf_script takes one or more arguments') script_args = self._process_script_args('add_postconf_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.postconf_scripts.append(script) @permittedKwargs(set()) def add_dist_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_dist_script takes one or more arguments') if len(args) > 1: FeatureNew('Calling "add_dist_script" with multiple arguments', '0.49.0').use(self.interpreter.subproject) if self.interpreter.subproject != '': raise InterpreterException('add_dist_script may not be used in a subproject.') script_args = self._process_script_args('add_dist_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.dist_scripts.append(script) @noPosargs @permittedKwargs({}) def current_source_dir_method(self, args, kwargs): src = self.interpreter.environment.source_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def current_build_dir_method(self, args, kwargs): src = self.interpreter.environment.build_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def backend_method(self, args, kwargs): return self.interpreter.backend.name @noPosargs @permittedKwargs({}) def source_root_method(self, args, kwargs): return self.interpreter.environment.source_dir @noPosargs @permittedKwargs({}) def build_root_method(self, args, kwargs): return self.interpreter.environment.build_dir @noPosargs @permittedKwargs({}) def has_exe_wrapper_method(self, args, kwargs): if self.is_cross_build_method(None, None) and \ self.build.environment.need_exe_wrapper(): if self.build.environment.exe_wrapper is None: return False # We return True when exe_wrap is defined, when it's not needed, and # when we're compiling natively. The last two are semantically confusing. # Need to revisit this. return True @noPosargs @permittedKwargs({}) def is_cross_build_method(self, args, kwargs): return self.build.environment.is_cross_build() @permittedKwargs({'native'}) def get_compiler_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_compiler_method must have one and only one argument.') cname = args[0] for_machine = Interpreter.machine_from_native_kwarg(kwargs) clist = self.interpreter.coredata.compilers[for_machine] if cname in clist: return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject) raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname) @noPosargs @permittedKwargs({}) def is_unity_method(self, args, kwargs): optval = self.interpreter.environment.coredata.get_builtin_option('unity') if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()): return True return False @noPosargs @permittedKwargs({}) def is_subproject_method(self, args, kwargs): return self.interpreter.is_subproject() @permittedKwargs({}) def install_dependency_manifest_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Must specify manifest install file name') if not isinstance(args[0], str): raise InterpreterException('Argument must be a string.') self.build.dep_manifest_name = args[0] @FeatureNew('meson.override_find_program', '0.46.0') @permittedKwargs({}) def override_find_program_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Override needs two arguments') name, exe = args if not isinstance(name, str): raise InterpreterException('First argument must be a string') if hasattr(exe, 'held_object'): exe = exe.held_object if isinstance(exe, mesonlib.File): abspath = exe.absolute_path(self.interpreter.environment.source_dir, self.interpreter.environment.build_dir) if not os.path.exists(abspath): raise InterpreterException('Tried to override %s with a file that does not exist.' % name) exe = OverrideProgram(abspath) if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)): raise InterpreterException('Second argument must be an external program or executable.') self.interpreter.add_find_program_override(name, exe) @FeatureNew('meson.override_dependency', '0.54.0') @permittedKwargs({'native'}) def override_dependency_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Override needs two arguments') name = args[0] dep = args[1] if not isinstance(name, str) or not name: raise InterpreterException('First argument must be a string and cannot be empty') if hasattr(dep, 'held_object'): dep = dep.held_object if not isinstance(dep, dependencies.Dependency): raise InterpreterException('Second argument must be a dependency object') identifier = dependencies.get_dep_identifier(name, kwargs) for_machine = self.interpreter.machine_from_native_kwarg(kwargs) override = self.build.dependency_overrides[for_machine].get(identifier) if override: m = 'Tried to override dependency {!r} which has already been resolved or overridden at {}' location = mlog.get_error_location_string(override.node.filename, override.node.lineno) raise InterpreterException(m.format(name, location)) self.build.dependency_overrides[for_machine][identifier] = \ build.DependencyOverride(dep, self.interpreter.current_node) @noPosargs @permittedKwargs({}) def project_version_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['version'] @FeatureNew('meson.project_license()', '0.45.0') @noPosargs @permittedKwargs({}) def project_license_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['license'] @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return coredata.version @noPosargs @permittedKwargs({}) def project_name_method(self, args, kwargs): return self.interpreter.active_projectname @noArgsFlattening @permittedKwargs({}) def get_cross_property_method(self, args, kwargs) -> str: if len(args) < 1 or len(args) > 2: raise InterpreterException('Must have one or two arguments.') propname = args[0] if not isinstance(propname, str): raise InterpreterException('Property name must be string.') try: props = self.interpreter.environment.properties.host return props[propname] except Exception: if len(args) == 2: return args[1] raise InterpreterException('Unknown cross property: %s.' % propname) @noArgsFlattening @permittedKwargs({'native'}) @FeatureNew('meson.get_external_property', '0.54.0') def get_external_property_method(self, args: T.Sequence[str], kwargs: dict) -> str: if len(args) < 1 or len(args) > 2: raise InterpreterException('Must have one or two positional arguments.') propname = args[0] if not isinstance(propname, str): raise InterpreterException('Property name must be string.') def _get_native() -> str: try: props = self.interpreter.environment.properties.build return props[propname] except Exception: if len(args) == 2: return args[1] raise InterpreterException('Unknown native property: %s.' % propname) if 'native' in kwargs: if kwargs['native']: return _get_native() else: return self.get_cross_property_method(args, {}) else: # native: not specified if self.build.environment.is_cross_build(): return self.get_cross_property_method(args, kwargs) else: return _get_native() known_library_kwargs = ( build.known_shlib_kwargs | build.known_stlib_kwargs ) known_build_target_kwargs = ( known_library_kwargs | build.known_exe_kwargs | build.known_jar_kwargs | {'target_type'} ) _base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'} permitted_kwargs = {'add_global_arguments': {'language', 'native'}, 'add_global_link_arguments': {'language', 'native'}, 'add_languages': {'required', 'native'}, 'add_project_link_arguments': {'language', 'native'}, 'add_project_arguments': {'language', 'native'}, 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'}, 'benchmark': _base_test_args, 'build_target': known_build_target_kwargs, 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'depfile', 'install_dir', 'install_mode', 'capture', 'install', 'format', 'output_format', 'encoding'}, 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'install_mode', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default', 'build_always_stale', 'console'}, 'dependency': {'default_options', 'embed', 'fallback', 'language', 'main', 'method', 'modules', 'components', 'cmake_module_path', 'optional_modules', 'native', 'not_found_message', 'required', 'static', 'version', 'private_headers', 'cmake_args', 'include_type', }, 'declare_dependency': {'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version', 'variables', }, 'executable': build.known_exe_kwargs, 'find_program': {'required', 'native', 'version', 'dirs'}, 'generator': {'arguments', 'output', 'depends', 'depfile', 'capture', 'preserve_path_from'}, 'include_directories': {'is_system'}, 'install_data': {'install_dir', 'install_mode', 'rename', 'sources'}, 'install_headers': {'install_dir', 'install_mode', 'subdir'}, 'install_man': {'install_dir', 'install_mode'}, 'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'}, 'jar': build.known_jar_kwargs, 'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'}, 'run_command': {'check', 'capture', 'env'}, 'run_target': {'command', 'depends'}, 'shared_library': build.known_shlib_kwargs, 'shared_module': build.known_shmod_kwargs, 'static_library': build.known_stlib_kwargs, 'both_libraries': known_library_kwargs, 'library': known_library_kwargs, 'subdir': {'if_found'}, 'subproject': {'version', 'default_options', 'required'}, 'test': set.union(_base_test_args, {'is_parallel'}), 'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'}, } class Interpreter(InterpreterBase): def __init__(self, build, backend=None, subproject='', subdir='', subproject_dir='subprojects', modules = None, default_project_options=None, mock=False, ast=None): super().__init__(build.environment.get_source_dir(), subdir, subproject) self.an_unpicklable_object = mesonlib.an_unpicklable_object self.build = build self.environment = build.environment self.coredata = self.environment.get_coredata() self.backend = backend self.summary = {} if modules is None: self.modules = {} else: self.modules = modules # Subproject directory is usually the name of the subproject, but can # be different for dependencies provided by wrap files. self.subproject_directory_name = subdir.split(os.path.sep)[-1] self.subproject_dir = subproject_dir self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt') if not mock and ast is None: self.load_root_meson_file() self.sanity_check_ast() elif ast is not None: self.ast = ast self.sanity_check_ast() self.builtin.update({'meson': MesonMain(build, self)}) self.generators = [] self.visited_subdirs = {} self.project_args_frozen = False self.global_args_frozen = False # implies self.project_args_frozen self.subprojects = {} self.subproject_stack = [] self.configure_file_outputs = {} # Passed from the outside, only used in subprojects. if default_project_options: self.default_project_options = default_project_options.copy() else: self.default_project_options = {} self.project_default_options = {} self.build_func_dict() # build_def_files needs to be defined before parse_project is called self.build_def_files = [os.path.join(self.subdir, environment.build_filename)] if not mock: self.parse_project() self._redetect_machines() def _redetect_machines(self): # Re-initialize machine descriptions. We can do a better job now because we # have the compilers needed to gain more knowledge, so wipe out old # inference and start over. machines = self.build.environment.machines.miss_defaulting() machines.build = environment.detect_machine_info(self.coredata.compilers.build) self.build.environment.machines = machines.default_missing() assert self.build.environment.machines.build.cpu is not None assert self.build.environment.machines.host.cpu is not None assert self.build.environment.machines.target.cpu is not None self.builtin['build_machine'] = \ MachineHolder(self.build.environment.machines.build) self.builtin['host_machine'] = \ MachineHolder(self.build.environment.machines.host) self.builtin['target_machine'] = \ MachineHolder(self.build.environment.machines.target) def get_non_matching_default_options(self): env = self.environment for def_opt_name, def_opt_value in self.project_default_options.items(): for opts in env.coredata.get_all_options(): cur_opt_value = opts.get(def_opt_name) if cur_opt_value is not None: def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value) if def_opt_value != cur_opt_value.value: yield (def_opt_name, def_opt_value, cur_opt_value) def build_func_dict(self): self.funcs.update({'add_global_arguments': self.func_add_global_arguments, 'add_project_arguments': self.func_add_project_arguments, 'add_global_link_arguments': self.func_add_global_link_arguments, 'add_project_link_arguments': self.func_add_project_link_arguments, 'add_test_setup': self.func_add_test_setup, 'add_languages': self.func_add_languages, 'alias_target': self.func_alias_target, 'assert': self.func_assert, 'benchmark': self.func_benchmark, 'build_target': self.func_build_target, 'configuration_data': self.func_configuration_data, 'configure_file': self.func_configure_file, 'custom_target': self.func_custom_target, 'declare_dependency': self.func_declare_dependency, 'dependency': self.func_dependency, 'disabler': self.func_disabler, 'environment': self.func_environment, 'error': self.func_error, 'executable': self.func_executable, 'generator': self.func_generator, 'gettext': self.func_gettext, 'get_option': self.func_get_option, 'get_variable': self.func_get_variable, 'files': self.func_files, 'find_library': self.func_find_library, 'find_program': self.func_find_program, 'include_directories': self.func_include_directories, 'import': self.func_import, 'install_data': self.func_install_data, 'install_headers': self.func_install_headers, 'install_man': self.func_install_man, 'install_subdir': self.func_install_subdir, 'is_disabler': self.func_is_disabler, 'is_variable': self.func_is_variable, 'jar': self.func_jar, 'join_paths': self.func_join_paths, 'library': self.func_library, 'message': self.func_message, 'warning': self.func_warning, 'option': self.func_option, 'project': self.func_project, 'run_target': self.func_run_target, 'run_command': self.func_run_command, 'set_variable': self.func_set_variable, 'subdir': self.func_subdir, 'subdir_done': self.func_subdir_done, 'subproject': self.func_subproject, 'summary': self.func_summary, 'shared_library': self.func_shared_lib, 'shared_module': self.func_shared_module, 'static_library': self.func_static_lib, 'both_libraries': self.func_both_lib, 'test': self.func_test, 'vcs_tag': self.func_vcs_tag }) if 'MESON_UNIT_TEST' in os.environ: self.funcs.update({'exception': self.func_exception}) def holderify(self, item): if isinstance(item, list): return [self.holderify(x) for x in item] if isinstance(item, dict): return {k: self.holderify(v) for k, v in item.items()} if isinstance(item, build.CustomTarget): return CustomTargetHolder(item, self) elif isinstance(item, (int, str, bool, Disabler)) or item is None: return item elif isinstance(item, build.Executable): return ExecutableHolder(item, self) elif isinstance(item, build.GeneratedList): return GeneratedListHolder(item) elif isinstance(item, build.RunTarget): raise RuntimeError('This is not a pipe.') elif isinstance(item, build.RunScript): raise RuntimeError('Do not do this.') elif isinstance(item, build.Data): return DataHolder(item) elif isinstance(item, dependencies.Dependency): return DependencyHolder(item, self.subproject) elif isinstance(item, dependencies.ExternalProgram): return ExternalProgramHolder(item, self.subproject) elif hasattr(item, 'held_object'): return item else: raise InterpreterException('Module returned a value of unknown type.') def process_new_values(self, invalues): invalues = listify(invalues) for v in invalues: if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)): v = v.held_object if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)): self.add_target(v.name, v) elif isinstance(v, list): self.module_method_callback(v) elif isinstance(v, build.GeneratedList): pass elif isinstance(v, build.RunScript): self.build.install_scripts.append(v) elif isinstance(v, build.Data): self.build.data.append(v) elif isinstance(v, dependencies.ExternalProgram): return ExternalProgramHolder(v, self.subproject) elif isinstance(v, dependencies.InternalDependency): # FIXME: This is special cased and not ideal: # The first source is our new VapiTarget, the rest are deps self.process_new_values(v.sources[0]) elif hasattr(v, 'held_object'): pass elif isinstance(v, (int, str, bool, Disabler)): pass else: raise InterpreterException('Module returned a value of unknown type.') def module_method_callback(self, return_object): if not isinstance(return_object, ModuleReturnValue): raise InterpreterException('Bug in module, it returned an invalid object') invalues = return_object.new_objects self.process_new_values(invalues) return self.holderify(return_object.return_value) def get_build_def_files(self): return self.build_def_files def add_build_def_file(self, f): # Use relative path for files within source directory, and absolute path # for system files. Skip files within build directory. Also skip not regular # files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this # is especially important to convert '/' to '\' on Windows. if isinstance(f, mesonlib.File): if f.is_built: return f = os.path.normpath(f.relative_name()) elif os.path.isfile(f) and not f.startswith('/dev'): srcdir = Path(self.environment.get_source_dir()) builddir = Path(self.environment.get_build_dir()) f = Path(f).resolve() if builddir in f.parents: return if srcdir in f.parents: f = f.relative_to(srcdir) f = str(f) else: return if f not in self.build_def_files: self.build_def_files.append(f) def get_variables(self): return self.variables def check_stdlibs(self): for for_machine in MachineChoice: props = self.build.environment.properties[for_machine] for l in self.coredata.compilers[for_machine].keys(): try: di = mesonlib.stringlistify(props.get_stdlib(l)) if len(di) != 2: raise InterpreterException('Stdlib definition for %s should have exactly two elements.' % l) projname, depname = di subproj = self.do_subproject(projname, 'meson', {}) self.build.stdlibs.host[l] = subproj.get_variable_method([depname], {}) except KeyError: pass except InvalidArguments: pass @stringArgs @noKwargs def func_import(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Import takes one argument.') modname = args[0] if modname.startswith('unstable-'): plainname = modname.split('-', 1)[1] mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node) modname = 'unstable_' + plainname if modname not in self.modules: try: module = importlib.import_module('mesonbuild.modules.' + modname) except ImportError: raise InvalidArguments('Module "%s" does not exist' % (modname, )) self.modules[modname] = module.initialize(self) return ModuleHolder(modname, self.modules[modname], self) @stringArgs @noKwargs def func_files(self, node, args, kwargs): return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args] @FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole']) @FeatureNewKwargs('declare_dependency', '0.54.0', ['variables']) @permittedKwargs(permitted_kwargs['declare_dependency']) @noPosargs def func_declare_dependency(self, node, args, kwargs): version = kwargs.get('version', self.project_version) if not isinstance(version, str): raise InterpreterException('Version must be a string.') incs = self.extract_incdirs(kwargs) libs = unholder(extract_as_list(kwargs, 'link_with')) libs_whole = unholder(extract_as_list(kwargs, 'link_whole')) sources = extract_as_list(kwargs, 'sources') sources = unholder(listify(self.source_strings_to_files(sources))) deps = unholder(extract_as_list(kwargs, 'dependencies')) compile_args = mesonlib.stringlistify(kwargs.get('compile_args', [])) link_args = mesonlib.stringlistify(kwargs.get('link_args', [])) variables = kwargs.get('variables', {}) if not isinstance(variables, dict): raise InterpreterException('variables must be a dict.') if not all(isinstance(v, str) for v in variables.values()): # Because that is how they will come from pkg-config and cmake raise InterpreterException('variables values be strings.') final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)): raise InterpreterException('Dependencies must be external deps') final_deps.append(d) for l in libs: if isinstance(l, dependencies.Dependency): raise InterpreterException('''Entries in "link_with" may only be self-built targets, external dependencies (including libraries) must go to "dependencies".''') dep = dependencies.InternalDependency(version, incs, compile_args, link_args, libs, libs_whole, sources, final_deps, variables) return DependencyHolder(dep, self.subproject) @noKwargs def func_assert(self, node, args, kwargs): if len(args) == 1: FeatureNew('assert function without message argument', '0.53.0').use(self.subproject) value = args[0] message = None elif len(args) == 2: value, message = args if not isinstance(message, str): raise InterpreterException('Assert message not a string.') else: raise InterpreterException('Assert takes between one and two arguments') if not isinstance(value, bool): raise InterpreterException('Assert value not bool.') if not value: if message is None: from .ast import AstPrinter printer = AstPrinter() node.args.arguments[0].accept(printer) message = printer.result raise InterpreterException('Assert failed: ' + message) def validate_arguments(self, args, argcount, arg_types): if argcount is not None: if argcount != len(args): raise InvalidArguments('Expected %d arguments, got %d.' % (argcount, len(args))) for actual, wanted in zip(args, arg_types): if wanted is not None: if not isinstance(actual, wanted): raise InvalidArguments('Incorrect argument type.') @FeatureNewKwargs('run_command', '0.50.0', ['env']) @FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture']) @permittedKwargs(permitted_kwargs['run_command']) def func_run_command(self, node, args, kwargs): return self.run_command_impl(node, args, kwargs) def run_command_impl(self, node, args, kwargs, in_builddir=False): if len(args) < 1: raise InterpreterException('Not enough arguments') cmd, *cargs = args capture = kwargs.get('capture', True) srcdir = self.environment.get_source_dir() builddir = self.environment.get_build_dir() check = kwargs.get('check', False) if not isinstance(check, bool): raise InterpreterException('Check must be boolean.') env = self.unpack_env_kwarg(kwargs) m = 'must be a string, or the output of find_program(), files() '\ 'or configure_file(), or a compiler object; not {!r}' expanded_args = [] if isinstance(cmd, ExternalProgramHolder): cmd = cmd.held_object if isinstance(cmd, build.Executable): progname = node.args.arguments[0].value msg = 'Program {!r} was overridden with the compiled executable {!r}'\ ' and therefore cannot be used during configuration' raise InterpreterException(msg.format(progname, cmd.description())) if not cmd.found(): raise InterpreterException('command {!r} not found or not executable'.format(cmd.get_name())) elif isinstance(cmd, CompilerHolder): exelist = cmd.compiler.get_exelist() cmd = exelist[0] prog = ExternalProgram(cmd, silent=True) if not prog.found(): raise InterpreterException('Program {!r} not found ' 'or not executable'.format(cmd)) cmd = prog expanded_args = exelist[1:] else: if isinstance(cmd, mesonlib.File): cmd = cmd.absolute_path(srcdir, builddir) elif not isinstance(cmd, str): raise InterpreterException('First argument ' + m.format(cmd)) # Prefer scripts in the current source directory search_dir = os.path.join(srcdir, self.subdir) prog = ExternalProgram(cmd, silent=True, search_dir=search_dir) if not prog.found(): raise InterpreterException('Program or command {!r} not found ' 'or not executable'.format(cmd)) cmd = prog for a in listify(cargs): if isinstance(a, str): expanded_args.append(a) elif isinstance(a, mesonlib.File): expanded_args.append(a.absolute_path(srcdir, builddir)) elif isinstance(a, ExternalProgramHolder): expanded_args.append(a.held_object.get_path()) else: raise InterpreterException('Arguments ' + m.format(a)) # If any file that was used as an argument to the command # changes, we must re-run the configuration step. self.add_build_def_file(cmd.get_path()) for a in expanded_args: if not os.path.isabs(a): a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a) self.add_build_def_file(a) return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir, self.environment.get_build_command() + ['introspect'], in_builddir=in_builddir, check=check, capture=capture) @stringArgs def func_gettext(self, nodes, args, kwargs): raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead') def func_option(self, nodes, args, kwargs): raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.') @FeatureNewKwargs('subproject', '0.38.0', ['default_options']) @permittedKwargs(permitted_kwargs['subproject']) @stringArgs def func_subproject(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Subproject takes exactly one argument') dirname = args[0] return self.do_subproject(dirname, 'meson', kwargs) def disabled_subproject(self, dirname, disabled_feature=None, exception=None): sub = SubprojectHolder(None, self.subproject_dir, dirname, disabled_feature=disabled_feature, exception=exception) self.subprojects[dirname] = sub return sub def do_subproject(self, dirname: str, method: str, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Subproject', mlog.bold(dirname), ':', 'skipped: feature', mlog.bold(feature), 'disabled') return self.disabled_subproject(dirname, disabled_feature=feature) default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) default_options = coredata.create_options_dict(default_options) if dirname == '': raise InterpreterException('Subproject dir name must not be empty.') if dirname[0] == '.': raise InterpreterException('Subproject dir name must not start with a period.') if '..' in dirname: raise InterpreterException('Subproject name must not contain a ".." path segment.') if os.path.isabs(dirname): raise InterpreterException('Subproject name must not be an absolute path.') if has_path_sep(dirname): mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.', location=self.current_node) if dirname in self.subproject_stack: fullstack = self.subproject_stack + [dirname] incpath = ' => '.join(fullstack) raise InvalidCode('Recursive include of subprojects: %s.' % incpath) if dirname in self.subprojects: subproject = self.subprojects[dirname] if required and not subproject.found(): raise InterpreterException('Subproject "%s/%s" required but not found.' % ( self.subproject_dir, dirname)) return subproject subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir) r = wrap.Resolver(subproject_dir_abs, self.coredata.get_builtin_option('wrap_mode')) try: resolved = r.resolve(dirname, method) except wrap.WrapException as e: subprojdir = os.path.join(self.subproject_dir, r.directory) if isinstance(e, wrap.WrapNotFoundException): # if the reason subproject execution failed was because # the directory doesn't exist, try to give some helpful # advice if it's a nested subproject that needs # promotion... self.print_nested_info(dirname) if not required: mlog.log(e) mlog.log('Subproject ', mlog.bold(subprojdir), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(dirname, exception=e) raise e subdir = os.path.join(self.subproject_dir, resolved) subdir_abs = os.path.join(subproject_dir_abs, resolved) os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True) self.global_args_frozen = True mlog.log() with mlog.nested(): mlog.log('Executing subproject', mlog.bold(dirname), 'method', mlog.bold(method), '\n') try: if method == 'meson': return self._do_subproject_meson(dirname, subdir, default_options, kwargs) elif method == 'cmake': return self._do_subproject_cmake(dirname, subdir, subdir_abs, default_options, kwargs) else: raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, dirname)) # Invalid code is always an error except InvalidCode: raise except Exception as e: if not required: with mlog.nested(): # Suppress the 'ERROR:' prefix because this exception is not # fatal and VS CI treat any logs with "ERROR:" as fatal. mlog.exception(e, prefix=mlog.yellow('Exception:')) mlog.log('\nSubproject', mlog.bold(dirname), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(dirname, exception=e) raise e def _do_subproject_meson(self, dirname, subdir, default_options, kwargs, ast=None, build_def_files=None): with mlog.nested(): new_build = self.build.copy() subi = Interpreter(new_build, self.backend, dirname, subdir, self.subproject_dir, self.modules, default_options, ast=ast) subi.subprojects = self.subprojects subi.subproject_stack = self.subproject_stack + [dirname] current_active = self.active_projectname current_warnings_counter = mlog.log_warnings_counter mlog.log_warnings_counter = 0 subi.run() subi_warnings = mlog.log_warnings_counter mlog.log_warnings_counter = current_warnings_counter mlog.log('Subproject', mlog.bold(dirname), 'finished.') mlog.log() if 'version' in kwargs: pv = subi.project_version wanted = kwargs['version'] if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]: raise InterpreterException('Subproject %s version is %s but %s required.' % (dirname, pv, wanted)) self.active_projectname = current_active self.subprojects.update(subi.subprojects) self.subprojects[dirname] = SubprojectHolder(subi, self.subproject_dir, dirname, warnings=subi_warnings) # Duplicates are possible when subproject uses files from project root if build_def_files: self.build_def_files = list(set(self.build_def_files + build_def_files)) else: self.build_def_files = list(set(self.build_def_files + subi.build_def_files)) self.build.merge(subi.build) self.build.subprojects[dirname] = subi.project_version self.summary.update(subi.summary) return self.subprojects[dirname] def _do_subproject_cmake(self, dirname, subdir, subdir_abs, default_options, kwargs): with mlog.nested(): new_build = self.build.copy() prefix = self.coredata.builtins['prefix'].value cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', [])) cm_int = CMakeInterpreter(new_build, subdir, subdir_abs, prefix, new_build.environment, self.backend) cm_int.initialise(cmake_options) cm_int.analyse() # Generate a meson ast and execute it with the normal do_subproject_meson ast = cm_int.pretend_to_be_meson() mlog.log() with mlog.nested(): mlog.log('Processing generated meson AST') # Debug print the generated meson file from .ast import AstIndentationGenerator, AstPrinter printer = AstPrinter() ast.accept(AstIndentationGenerator()) ast.accept(printer) printer.post_process() meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build') with open(meson_filename, "w") as f: f.write(printer.result) mlog.log('Build file:', meson_filename) mlog.cmd_ci_include(meson_filename) mlog.log() result = self._do_subproject_meson(dirname, subdir, default_options, kwargs, ast, cm_int.bs_files) result.cm_interpreter = cm_int mlog.log() return result def get_option_internal(self, optname): raw_optname = optname if self.is_subproject(): optname = self.subproject + ':' + optname for opts in [ self.coredata.base_options, compilers.base_options, self.coredata.builtins, dict(self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine)), dict(self.coredata.flatten_lang_iterator( self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options))), ]: v = opts.get(optname) if v is None or v.yielding: v = opts.get(raw_optname) if v is not None: return v try: opt = self.coredata.user_options[optname] if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options: popt = self.coredata.user_options[raw_optname] if type(opt) is type(popt): opt = popt else: # Get class name, then option type as a string opt_type = opt.__class__.__name__[4:][:-6].lower() popt_type = popt.__class__.__name__[4:][:-6].lower() # This is not a hard error to avoid dependency hell, the workaround # when this happens is to simply set the subproject's option directly. mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield ' 'to parent option of type {3!r}, ignoring parent value. ' 'Use -D{2}:{0}=value to set the value for this option manually' '.'.format(raw_optname, opt_type, self.subproject, popt_type), location=self.current_node) return opt except KeyError: pass raise InterpreterException('Tried to access unknown option "%s".' % optname) @stringArgs @noKwargs def func_get_option(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Argument required for get_option.') optname = args[0] if ':' in optname: raise InterpreterException('Having a colon in option name is forbidden, ' 'projects are not allowed to directly access ' 'options of other subprojects.') opt = self.get_option_internal(optname) if isinstance(opt, coredata.UserFeatureOption): return FeatureOptionHolder(self.environment, optname, opt) elif isinstance(opt, coredata.UserOption): return opt.value return opt @noKwargs def func_configuration_data(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('configuration_data takes only one optional positional arguments') elif len(args) == 1: FeatureNew('configuration_data dictionary', '0.49.0').use(self.subproject) initial_values = args[0] if not isinstance(initial_values, dict): raise InterpreterException('configuration_data first argument must be a dictionary') else: initial_values = {} return ConfigurationDataHolder(self.subproject, initial_values) def set_backend(self): # The backend is already set when parsing subprojects if self.backend is not None: return backend = self.coredata.get_builtin_option('backend') from .backend import backends self.backend = backends.get_backend_from_name(backend, self.build, self) if self.backend is None: raise InterpreterException('Unknown backend "%s".' % backend) if backend != self.backend.name: if self.backend.name.startswith('vs'): mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name)) self.coredata.set_builtin_option('backend', self.backend.name) # Only init backend options on first invocation otherwise it would # override values previously set from command line. if self.environment.first_invocation: self.coredata.init_backend_options(backend) options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')} self.coredata.set_options(options) @stringArgs @permittedKwargs(permitted_kwargs['project']) def func_project(self, node, args, kwargs): if len(args) < 1: raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.') proj_name, *proj_langs = args if ':' in proj_name: raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name)) if 'meson_version' in kwargs: cv = coredata.version pv = kwargs['meson_version'] if not mesonlib.version_compare(cv, pv): raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv)) if os.path.exists(self.option_file): oi = optinterpreter.OptionInterpreter(self.subproject) oi.process(self.option_file) self.coredata.merge_user_options(oi.options) self.add_build_def_file(self.option_file) # Do not set default_options on reconfigure otherwise it would override # values previously set from command line. That means that changing # default_options in a project will trigger a reconfigure but won't # have any effect. self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) self.project_default_options = coredata.create_options_dict(self.project_default_options) if self.environment.first_invocation: default_options = self.project_default_options default_options.update(self.default_project_options) self.coredata.init_builtins(self.subproject) else: default_options = {} self.coredata.set_default_options(default_options, self.subproject, self.environment) if not self.is_subproject(): self.build.project_name = proj_name self.active_projectname = proj_name self.project_version = kwargs.get('version', 'undefined') if self.build.project_version is None: self.build.project_version = self.project_version proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown')) self.build.dep_manifest[proj_name] = {'version': self.project_version, 'license': proj_license} if self.subproject in self.build.projects: raise InvalidCode('Second call to project().') if not self.is_subproject() and 'subproject_dir' in kwargs: spdirname = kwargs['subproject_dir'] if not isinstance(spdirname, str): raise InterpreterException('Subproject_dir must be a string') if os.path.isabs(spdirname): raise InterpreterException('Subproject_dir must not be an absolute path.') if spdirname.startswith('.'): raise InterpreterException('Subproject_dir must not begin with a period.') if '..' in spdirname: raise InterpreterException('Subproject_dir must not contain a ".." segment.') self.subproject_dir = spdirname self.build.subproject_dir = self.subproject_dir mesonlib.project_meson_versions[self.subproject] = '' if 'meson_version' in kwargs: mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version'] self.build.projects[self.subproject] = proj_name mlog.log('Project name:', mlog.bold(proj_name)) mlog.log('Project version:', mlog.bold(self.project_version)) self.add_languages(proj_langs, True, MachineChoice.BUILD) self.add_languages(proj_langs, True, MachineChoice.HOST) self.set_backend() if not self.is_subproject(): self.check_stdlibs() @FeatureNewKwargs('add_languages', '0.54.0', ['native']) @permittedKwargs(permitted_kwargs['add_languages']) @stringArgs def func_add_languages(self, node, args, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: for lang in sorted(args, key=compilers.sort_clink): mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled') return False if 'native' in kwargs: return self.add_languages(args, required, self.machine_from_native_kwarg(kwargs)) else: # absent 'native' means 'both' for backwards compatibility mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.', location=self.current_node) success = self.add_languages(args, False, MachineChoice.BUILD) success &= self.add_languages(args, required, MachineChoice.HOST) return success def get_message_string_arg(self, arg): if isinstance(arg, list): argstr = stringifyUserArguments(arg) elif isinstance(arg, dict): argstr = stringifyUserArguments(arg) elif isinstance(arg, str): argstr = arg elif isinstance(arg, int): argstr = str(arg) else: raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') return argstr @noArgsFlattening @noKwargs def func_message(self, node, args, kwargs): if len(args) > 1: FeatureNew('message with more than one argument', '0.54.0').use(self.subproject) args_str = [self.get_message_string_arg(i) for i in args] self.message_impl(args_str) def message_impl(self, args): mlog.log(mlog.bold('Message:'), *args) @noArgsFlattening @FeatureNewKwargs('summary', '0.54.0', ['list_sep']) @permittedKwargs({'section', 'bool_yn', 'list_sep'}) @FeatureNew('summary', '0.53.0') def func_summary(self, node, args, kwargs): if len(args) == 1: if not isinstance(args[0], dict): raise InterpreterException('Summary first argument must be dictionary.') values = args[0] elif len(args) == 2: if not isinstance(args[0], str): raise InterpreterException('Summary first argument must be string.') values = {args[0]: args[1]} else: raise InterpreterException('Summary accepts at most 2 arguments.') section = kwargs.get('section', '') if not isinstance(section, str): raise InterpreterException('Summary\'s section keyword argument must be string.') self.summary_impl(section, values, kwargs) def summary_impl(self, section, values, kwargs): if self.subproject not in self.summary: self.summary[self.subproject] = Summary(self.active_projectname, self.project_version) self.summary[self.subproject].add_section(section, values, kwargs) def _print_summary(self): # Add automatic 'Supbrojects' section in main project. all_subprojects = collections.OrderedDict() for name, subp in sorted(self.subprojects.items()): value = subp.found() if subp.disabled_feature: value = [value, 'Feature {!r} disabled'.format(subp.disabled_feature)] elif subp.exception: value = [value, str(subp.exception)] elif subp.warnings > 0: value = [value, '{} warnings'.format(subp.warnings)] all_subprojects[name] = value if all_subprojects: self.summary_impl('Subprojects', all_subprojects, {'bool_yn': True, 'list_sep': ' ', }) # Print all summaries, main project last. mlog.log('') # newline main_summary = self.summary.pop('', None) for _, summary in sorted(self.summary.items()): summary.dump() if main_summary: main_summary.dump() @noArgsFlattening @FeatureNew('warning', '0.44.0') @noKwargs def func_warning(self, node, args, kwargs): if len(args) > 1: FeatureNew('warning with more than one argument', '0.54.0').use(self.subproject) args_str = [self.get_message_string_arg(i) for i in args] mlog.warning(*args_str, location=node) @noKwargs def func_error(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) raise InterpreterException('Problem encountered: ' + args[0]) @noKwargs def func_exception(self, node, args, kwargs): self.validate_arguments(args, 0, []) raise Exception() def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool: success = self.add_languages_for(args, required, for_machine) if not self.coredata.is_cross_build(): self.coredata.copy_build_options_from_regular_ones() self._redetect_machines() return success def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool: if for_machine != MachineChoice.HOST: return False if not self.environment.is_cross_build(): return False should = self.environment.properties.host.get('skip_sanity_check', False) if not isinstance(should, bool): raise InterpreterException('Option skip_sanity_check must be a boolean.') return should def add_languages_for(self, args, required, for_machine: MachineChoice): langs = set(self.coredata.compilers[for_machine].keys()) langs.update(args) if 'vala' in langs: if 'c' not in langs: raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.') success = True for lang in sorted(args, key=compilers.sort_clink): lang = lang.lower() clist = self.coredata.compilers[for_machine] machine_name = for_machine.get_lower_case_name() if lang in clist: comp = clist[lang] else: try: comp = self.environment.detect_compiler_for(lang, for_machine) if comp is None: raise InvalidArguments('Tried to use unknown language "%s".' % lang) if self.should_skip_sanity_check(for_machine): mlog.log_once('Cross compiler sanity tests disabled via the cross file.') else: comp.sanity_check(self.environment.get_scratch_dir(), self.environment) except Exception: if not required: mlog.log('Compiler for language', mlog.bold(lang), 'for the', machine_name, 'machine not found.') success = False continue else: raise if for_machine == MachineChoice.HOST or self.environment.is_cross_build(): logger_fun = mlog.log else: logger_fun = mlog.debug logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:', mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string()) if comp.linker is not None: logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:', mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version) self.build.ensure_static_linker(comp) return success def program_from_file_for(self, for_machine, prognames, silent): for p in unholder(prognames): if isinstance(p, mesonlib.File): continue # Always points to a local (i.e. self generated) file. if not isinstance(p, str): raise InterpreterException('Executable name must be a string') prog = ExternalProgram.from_bin_list(self.environment, for_machine, p) if prog.found(): return ExternalProgramHolder(prog, self.subproject) return None def program_from_system(self, args, search_dirs, silent=False): # Search for scripts relative to current subdir. # Do not cache found programs because find_program('foobar') # might give different results when run from different source dirs. source_dir = os.path.join(self.environment.get_source_dir(), self.subdir) for exename in args: if isinstance(exename, mesonlib.File): if exename.is_built: search_dir = os.path.join(self.environment.get_build_dir(), exename.subdir) else: search_dir = os.path.join(self.environment.get_source_dir(), exename.subdir) exename = exename.fname extra_search_dirs = [] elif isinstance(exename, str): search_dir = source_dir extra_search_dirs = search_dirs else: raise InvalidArguments('find_program only accepts strings and ' 'files, not {!r}'.format(exename)) extprog = dependencies.ExternalProgram(exename, search_dir=search_dir, extra_search_dirs=extra_search_dirs, silent=silent) progobj = ExternalProgramHolder(extprog, self.subproject) if progobj.found(): return progobj def program_from_overrides(self, command_names, silent=False): for name in command_names: if not isinstance(name, str): continue if name in self.build.find_overrides: exe = self.build.find_overrides[name] if not silent: mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'), '(overridden: %s)' % exe.description()) return ExternalProgramHolder(exe, self.subproject, self.backend) return None def store_name_lookups(self, command_names): for name in command_names: if isinstance(name, str): self.build.searched_programs.add(name) def add_find_program_override(self, name, exe): if name in self.build.searched_programs: raise InterpreterException('Tried to override finding of executable "%s" which has already been found.' % name) if name in self.build.find_overrides: raise InterpreterException('Tried to override executable "%s" which has already been overridden.' % name) self.build.find_overrides[name] = exe # TODO update modules to always pass `for_machine`. It is bad-form to assume # the host machine. def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST, required=True, silent=True, wanted='', search_dirs=None): if not isinstance(args, list): args = [args] progobj = self.program_from_overrides(args, silent=silent) if progobj is None: progobj = self.program_from_file_for(for_machine, args, silent=silent) if progobj is None: progobj = self.program_from_system(args, search_dirs, silent=silent) if progobj is None and args[0].endswith('python3'): prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True) progobj = ExternalProgramHolder(prog, self.subproject) if required and (progobj is None or not progobj.found()): raise InvalidArguments('Program(s) {!r} not found or not executable'.format(args)) if progobj is None: return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject) # Only store successful lookups self.store_name_lookups(args) if wanted: version = progobj.get_version(self) is_found, not_found, found = mesonlib.version_compare_many(version, wanted) if not is_found: mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'), 'found {!r} but need:'.format(version), ', '.join(["'{}'".format(e) for e in not_found])) if required: m = 'Invalid version of program, need {!r} {!r} found {!r}.' raise InvalidArguments(m.format(progobj.get_name(), not_found, version)) return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject) return progobj @FeatureNewKwargs('find_program', '0.53.0', ['dirs']) @FeatureNewKwargs('find_program', '0.52.0', ['version']) @FeatureNewKwargs('find_program', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['find_program']) def func_find_program(self, node, args, kwargs): if not args: raise InterpreterException('No program name specified.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled') return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject) search_dirs = extract_search_dirs(kwargs) wanted = mesonlib.stringlistify(kwargs.get('version', [])) for_machine = self.machine_from_native_kwarg(kwargs) return self.find_program_impl(args, for_machine, required=required, silent=False, wanted=wanted, search_dirs=search_dirs) def func_find_library(self, node, args, kwargs): raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n' 'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n' 'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n' ) def _find_cached_dep(self, name, display_name, kwargs): # Check if we want this as a build-time / build machine or runt-time / # host machine dep. for_machine = self.machine_from_native_kwarg(kwargs) identifier = dependencies.get_dep_identifier(name, kwargs) wanted_vers = mesonlib.stringlistify(kwargs.get('version', [])) override = self.build.dependency_overrides[for_machine].get(identifier) if override: info = [mlog.blue('(overridden)' if override.explicit else '(cached)')] cached_dep = override.dep # We don't implicitly override not-found dependencies, but user could # have explicitly called meson.override_dependency() with a not-found # dep. if not cached_dep.found(): mlog.log('Dependency', mlog.bold(display_name), 'found:', mlog.red('NO'), *info) return identifier, cached_dep found_vers = cached_dep.get_version() if not self.check_version(wanted_vers, found_vers): mlog.log('Dependency', mlog.bold(name), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(found_vers), 'but need:', mlog.bold(', '.join(["'{}'".format(e) for e in wanted_vers])), *info) return identifier, NotFoundDependency(self.environment) else: info = [mlog.blue('(cached)')] cached_dep = self.coredata.deps[for_machine].get(identifier) if cached_dep: found_vers = cached_dep.get_version() if not self.check_version(wanted_vers, found_vers): return identifier, None if cached_dep: if found_vers: info = [mlog.normal_cyan(found_vers), *info] mlog.log('Dependency', mlog.bold(display_name), 'found:', mlog.green('YES'), *info) return identifier, cached_dep return identifier, None @staticmethod def check_version(wanted, found): if not wanted: return True if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]: return False return True def notfound_dependency(self): return DependencyHolder(NotFoundDependency(self.environment), self.subproject) def verify_fallback_consistency(self, dirname, varname, cached_dep): subi = self.subprojects.get(dirname) if not cached_dep or not varname or not subi or not cached_dep.found(): return dep = subi.get_variable_method([varname], {}) if dep.held_object != cached_dep: m = 'Inconsistency: Subproject has overridden the dependency with another variable than {!r}' raise DependencyException(m.format(varname)) def get_subproject_dep(self, name, display_name, dirname, varname, kwargs): required = kwargs.get('required', True) wanted = mesonlib.stringlistify(kwargs.get('version', [])) subproj_path = os.path.join(self.subproject_dir, dirname) dep = self.notfound_dependency() try: subproject = self.subprojects[dirname] _, cached_dep = self._find_cached_dep(name, display_name, kwargs) if varname is None: # Assuming the subproject overridden the dependency we want if cached_dep: if required and not cached_dep.found(): m = 'Dependency {!r} is not satisfied' raise DependencyException(m.format(display_name)) return DependencyHolder(cached_dep, self.subproject) else: m = 'Subproject {} did not override dependency {}' raise DependencyException(m.format(subproj_path, display_name)) if subproject.found(): self.verify_fallback_consistency(dirname, varname, cached_dep) dep = self.subprojects[dirname].get_variable_method([varname], {}) except InvalidArguments: pass if not isinstance(dep, DependencyHolder): raise InvalidCode('Fetched variable {!r} in the subproject {!r} is ' 'not a dependency object.'.format(varname, dirname)) if not dep.found(): if required: raise DependencyException('Could not find dependency {} in subproject {}' ''.format(varname, dirname)) # If the dependency is not required, don't raise an exception mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproj_path), 'found:', mlog.red('NO')) return dep found = dep.held_object.get_version() if not self.check_version(wanted, found): if required: raise DependencyException('Version {} of subproject dependency {} already ' 'cached, requested incompatible version {} for ' 'dep {}'.format(found, dirname, wanted, display_name)) mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproj_path), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(found), 'but need:', mlog.bold(', '.join(["'{}'".format(e) for e in wanted]))) return self.notfound_dependency() found = mlog.normal_cyan(found) if found else None mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproj_path), 'found:', mlog.green('YES'), found) return dep def _handle_featurenew_dependencies(self, name): 'Do a feature check on dependencies used by this subproject' if name == 'mpi': FeatureNew('MPI Dependency', '0.42.0').use(self.subproject) elif name == 'pcap': FeatureNew('Pcap Dependency', '0.42.0').use(self.subproject) elif name == 'vulkan': FeatureNew('Vulkan Dependency', '0.42.0').use(self.subproject) elif name == 'libwmf': FeatureNew('LibWMF Dependency', '0.44.0').use(self.subproject) elif name == 'openmp': FeatureNew('OpenMP Dependency', '0.46.0').use(self.subproject) @FeatureNewKwargs('dependency', '0.54.0', ['components']) @FeatureNewKwargs('dependency', '0.52.0', ['include_type']) @FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args']) @FeatureNewKwargs('dependency', '0.49.0', ['disabler']) @FeatureNewKwargs('dependency', '0.40.0', ['method']) @FeatureNewKwargs('dependency', '0.38.0', ['default_options']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['dependency']) def func_dependency(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) name = args[0] display_name = name if name else '(anonymous)' mods = extract_as_list(kwargs, 'modules') if mods: display_name += ' (modules: {})'.format(', '.join(str(i) for i in mods)) not_found_message = kwargs.get('not_found_message', '') if not isinstance(not_found_message, str): raise InvalidArguments('The not_found_message must be a string.') try: d = self.dependency_impl(name, display_name, kwargs) except Exception: if not_found_message: self.message_impl([not_found_message]) raise if not d.found() and not_found_message: self.message_impl([not_found_message]) self.message_impl([not_found_message]) # Override this dependency to have consistent results in subsequent # dependency lookups. if name and d.found(): for_machine = self.machine_from_native_kwarg(kwargs) identifier = dependencies.get_dep_identifier(name, kwargs) if identifier not in self.build.dependency_overrides[for_machine]: self.build.dependency_overrides[for_machine][identifier] = \ build.DependencyOverride(d.held_object, node, explicit=False) return d def dependency_impl(self, name, display_name, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_dependency() has_fallback = 'fallback' in kwargs if 'default_options' in kwargs and not has_fallback: mlog.warning('The "default_options" keyworg argument does nothing without a "fallback" keyword argument.', location=self.current_node) # writing just "dependency('')" is an error, because it can only fail if name == '' and required and not has_fallback: raise InvalidArguments('Dependency is both required and not-found') if '<' in name or '>' in name or '=' in name: raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify' 'version\n requirements use the \'version\' keyword argument instead.') identifier, cached_dep = self._find_cached_dep(name, display_name, kwargs) if cached_dep: if has_fallback: dirname, varname = self.get_subproject_infos(kwargs) self.verify_fallback_consistency(dirname, varname, cached_dep) if required and not cached_dep.found(): m = 'Dependency {!r} was already checked and was not found' raise DependencyException(m.format(display_name)) return DependencyHolder(cached_dep, self.subproject) # If the dependency has already been configured, possibly by # a higher level project, try to use it first. if has_fallback: dirname, varname = self.get_subproject_infos(kwargs) if dirname in self.subprojects: return self.get_subproject_dep(name, display_name, dirname, varname, kwargs) wrap_mode = self.coredata.get_builtin_option('wrap_mode') forcefallback = wrap_mode == WrapMode.forcefallback and has_fallback if name != '' and not forcefallback: self._handle_featurenew_dependencies(name) kwargs['required'] = required and not has_fallback dep = dependencies.find_external_dependency(name, self.environment, kwargs) kwargs['required'] = required # Only store found-deps in the cache # Never add fallback deps to self.coredata.deps since we # cannot cache them. They must always be evaluated else # we won't actually read all the build files. if dep.found(): for_machine = self.machine_from_native_kwarg(kwargs) self.coredata.deps[for_machine].put(identifier, dep) return DependencyHolder(dep, self.subproject) if has_fallback: return self.dependency_fallback(name, display_name, kwargs) return self.notfound_dependency() @FeatureNew('disabler', '0.44.0') @noKwargs @noPosargs def func_disabler(self, node, args, kwargs): return Disabler() def print_nested_info(self, dependency_name): message = ['Dependency', mlog.bold(dependency_name), 'not found but it is available in a sub-subproject.\n' + 'To use it in the current project, promote it by going in the project source\n' 'root and issuing'] sprojs = mesonlib.detect_subprojects('subprojects', self.source_root) if dependency_name not in sprojs: return found = sprojs[dependency_name] if len(found) > 1: message.append('one of the following commands:') else: message.append('the following command:') command_templ = '\nmeson wrap promote {}' for l in found: message.append(mlog.bold(command_templ.format(l[len(self.source_root) + 1:]))) mlog.warning(*message, location=self.current_node) def get_subproject_infos(self, kwargs): fbinfo = mesonlib.stringlistify(kwargs['fallback']) if len(fbinfo) == 1: FeatureNew('Fallback without variable name', '0.53.0').use(self.subproject) return fbinfo[0], None elif len(fbinfo) != 2: raise InterpreterException('Fallback info must have one or two items.') return fbinfo def dependency_fallback(self, name, display_name, kwargs): required = kwargs.get('required', True) if self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback: mlog.log('Not looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback ' 'dependencies is disabled.') if required: m = 'Dependency {!r} not found and fallback is disabled' raise DependencyException(m.format(display_name)) return self.notfound_dependency() elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.') else: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name)) dirname, varname = self.get_subproject_infos(kwargs) sp_kwargs = { 'default_options': kwargs.get('default_options', []), 'required': required, } self.do_subproject(dirname, 'meson', sp_kwargs) return self.get_subproject_dep(name, display_name, dirname, varname, kwargs) @FeatureNewKwargs('executable', '0.42.0', ['implib']) @permittedKwargs(permitted_kwargs['executable']) def func_executable(self, node, args, kwargs): return self.build_target(node, args, kwargs, ExecutableHolder) @permittedKwargs(permitted_kwargs['static_library']) def func_static_lib(self, node, args, kwargs): return self.build_target(node, args, kwargs, StaticLibraryHolder) @permittedKwargs(permitted_kwargs['shared_library']) def func_shared_lib(self, node, args, kwargs): holder = self.build_target(node, args, kwargs, SharedLibraryHolder) holder.held_object.shared_library_only = True return holder @permittedKwargs(permitted_kwargs['both_libraries']) def func_both_lib(self, node, args, kwargs): return self.build_both_libraries(node, args, kwargs) @FeatureNew('shared_module', '0.37.0') @permittedKwargs(permitted_kwargs['shared_module']) def func_shared_module(self, node, args, kwargs): return self.build_target(node, args, kwargs, SharedModuleHolder) @permittedKwargs(permitted_kwargs['library']) def func_library(self, node, args, kwargs): return self.build_library(node, args, kwargs) @permittedKwargs(permitted_kwargs['jar']) def func_jar(self, node, args, kwargs): return self.build_target(node, args, kwargs, JarHolder) @FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options']) @permittedKwargs(permitted_kwargs['build_target']) def func_build_target(self, node, args, kwargs): if 'target_type' not in kwargs: raise InterpreterException('Missing target_type keyword argument') target_type = kwargs.pop('target_type') if target_type == 'executable': return self.build_target(node, args, kwargs, ExecutableHolder) elif target_type == 'shared_library': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif target_type == 'shared_module': FeatureNew('build_target(target_type: \'shared_module\')', '0.51.0').use(self.subproject) return self.build_target(node, args, kwargs, SharedModuleHolder) elif target_type == 'static_library': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif target_type == 'both_libraries': return self.build_both_libraries(node, args, kwargs) elif target_type == 'library': return self.build_library(node, args, kwargs) elif target_type == 'jar': return self.build_target(node, args, kwargs, JarHolder) else: raise InterpreterException('Unknown target_type.') @permittedKwargs(permitted_kwargs['vcs_tag']) def func_vcs_tag(self, node, args, kwargs): if 'input' not in kwargs or 'output' not in kwargs: raise InterpreterException('Keyword arguments input and output must exist') if 'fallback' not in kwargs: FeatureNew('Optional fallback in vcs_tag', '0.41.0').use(self.subproject) fallback = kwargs.pop('fallback', self.project_version) if not isinstance(fallback, str): raise InterpreterException('Keyword argument fallback must be a string.') replace_string = kwargs.pop('replace_string', '@VCS_TAG@') regex_selector = '(.*)' # default regex selector for custom command: use complete output vcs_cmd = kwargs.get('command', None) if vcs_cmd and not isinstance(vcs_cmd, list): vcs_cmd = [vcs_cmd] source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir)) if vcs_cmd: # Is the command an executable in path or maybe a script in the source tree? vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0]) else: vcs = mesonlib.detect_vcs(source_dir) if vcs: mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir'])) vcs_cmd = vcs['get_rev'].split() regex_selector = vcs['rev_regex'] else: vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string # vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... kwargs['command'] = self.environment.get_build_command() + \ ['--internal', 'vcstagger', '@INPUT0@', '@OUTPUT0@', fallback, source_dir, replace_string, regex_selector] + vcs_cmd kwargs.setdefault('build_by_default', True) kwargs.setdefault('build_always_stale', True) return self._func_custom_target_impl(node, [kwargs['output']], kwargs) @FeatureNew('subdir_done', '0.46.0') @stringArgs def func_subdir_done(self, node, args, kwargs): if len(kwargs) > 0: raise InterpreterException('exit does not take named arguments') if len(args) > 0: raise InterpreterException('exit does not take any arguments') raise SubdirDoneRequest() @stringArgs @FeatureNewKwargs('custom_target', '0.48.0', ['console']) @FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale']) @FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default']) @permittedKwargs(permitted_kwargs['custom_target']) def func_custom_target(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name') if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']): FeatureNew('substitutions in custom_target depfile', '0.47.0').use(self.subproject) return self._func_custom_target_impl(node, args, kwargs) def _func_custom_target_impl(self, node, args, kwargs): 'Implementation-only, without FeatureNew checks, for internal use' name = args[0] kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'input' in kwargs: try: kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input')) except mesonlib.MesonException: mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s). This will become a hard error in the future.''' % kwargs['input'], location=self.current_node) tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['run_target']) def func_run_target(self, node, args, kwargs): if len(args) > 1: raise InvalidCode('Run_target takes only one positional argument: the target name.') elif len(args) == 1: if 'command' not in kwargs: raise InterpreterException('Missing "command" keyword argument') all_args = extract_as_list(kwargs, 'command') deps = unholder(extract_as_list(kwargs, 'depends')) else: raise InterpreterException('Run_target needs at least one positional argument.') cleaned_args = [] for i in unholder(listify(all_args)): if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)): mlog.debug('Wrong type:', str(i)) raise InterpreterException('Invalid argument to run_target.') if isinstance(i, dependencies.ExternalProgram) and not i.found(): raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name)) cleaned_args.append(i) name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') cleaned_deps = [] for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') cleaned_deps.append(d) command, *cmd_args = cleaned_args tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) full_name = (self.subproject, name) assert(full_name not in self.build.run_target_names) self.build.run_target_names.add(full_name) return tg @FeatureNew('alias_target', '0.52.0') @noKwargs def func_alias_target(self, node, args, kwargs): if len(args) < 2: raise InvalidCode('alias_target takes at least 2 arguments.') name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') deps = unholder(listify(args[1:])) for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['generator']) def func_generator(self, node, args, kwargs): gen = GeneratorHolder(self, args, kwargs) self.generators.append(gen) return gen @FeatureNewKwargs('benchmark', '0.46.0', ['depends']) @FeatureNewKwargs('benchmark', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['benchmark']) def func_benchmark(self, node, args, kwargs): # is_parallel isn't valid here, so make sure it isn't passed if 'is_parallel' in kwargs: del kwargs['is_parallel'] self.add_test(node, args, kwargs, False) @FeatureNewKwargs('test', '0.46.0', ['depends']) @FeatureNewKwargs('test', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['test']) def func_test(self, node, args, kwargs): if kwargs.get('protocol') == 'gtest': FeatureNew('"gtest" protocol for tests', '0.55.0').use(self.subproject) self.add_test(node, args, kwargs, True) def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables: envlist = kwargs.get('env', EnvironmentVariablesHolder()) if isinstance(envlist, EnvironmentVariablesHolder): env = envlist.held_object elif isinstance(envlist, dict): FeatureNew('environment dictionary', '0.52.0').use(self.subproject) env = EnvironmentVariablesHolder(envlist) env = env.held_object else: envlist = listify(envlist) # Convert from array to environment object env = EnvironmentVariablesHolder(envlist) env = env.held_object return env def add_test(self, node, args, kwargs, is_base_test): if len(args) != 2: raise InterpreterException('test expects 2 arguments, {} given'.format(len(args))) if not isinstance(args[0], str): raise InterpreterException('First argument of test must be a string.') exe = args[1] if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)): if isinstance(exe, mesonlib.File): exe = self.func_find_program(node, args[1], {}) else: raise InterpreterException('Second argument must be executable.') par = kwargs.get('is_parallel', True) if not isinstance(par, bool): raise InterpreterException('Keyword argument is_parallel must be a boolean.') cmd_args = unholder(extract_as_list(kwargs, 'args')) for i in cmd_args: if not isinstance(i, (str, mesonlib.File, build.Target)): raise InterpreterException('Command line arguments must be strings, files or targets.') env = self.unpack_env_kwarg(kwargs) should_fail = kwargs.get('should_fail', False) if not isinstance(should_fail, bool): raise InterpreterException('Keyword argument should_fail must be a boolean.') timeout = kwargs.get('timeout', 30) if 'workdir' in kwargs: workdir = kwargs['workdir'] if not isinstance(workdir, str): raise InterpreterException('Workdir keyword argument must be a string.') if not os.path.isabs(workdir): raise InterpreterException('Workdir keyword argument must be an absolute path.') else: workdir = None if not isinstance(timeout, int): raise InterpreterException('Timeout must be an integer.') protocol = kwargs.get('protocol', 'exitcode') if protocol not in {'exitcode', 'tap', 'gtest'}: raise InterpreterException('Protocol must be "exitcode", "tap", or "gtest".') suite = [] prj = self.subproject if self.is_subproject() else self.build.project_name for s in mesonlib.stringlistify(kwargs.get('suite', '')): if len(s) > 0: s = ':' + s suite.append(prj.replace(' ', '_').replace(':', '_') + s) depends = unholder(extract_as_list(kwargs, 'depends')) for dep in depends: if not isinstance(dep, (build.CustomTarget, build.BuildTarget)): raise InterpreterException('Depends items must be build targets.') priority = kwargs.get('priority', 0) if not isinstance(priority, int): raise InterpreterException('Keyword argument priority must be an integer.') t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args, env, should_fail, timeout, workdir, protocol, priority) if is_base_test: self.build.tests.append(t) mlog.debug('Adding test', mlog.bold(args[0], True)) else: self.build.benchmarks.append(t) mlog.debug('Adding benchmark', mlog.bold(args[0], True)) @FeatureNewKwargs('install_headers', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_headers']) def func_install_headers(self, node, args, kwargs): source_files = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) h = Headers(source_files, kwargs) self.build.headers.append(h) return h @FeatureNewKwargs('install_man', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_man']) def func_install_man(self, node, args, kwargs): fargs = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) m = Man(fargs, kwargs) self.build.man.append(m) return m @FeatureNewKwargs('subdir', '0.44.0', ['if_found']) @permittedKwargs(permitted_kwargs['subdir']) def func_subdir(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) mesonlib.check_direntry_issues(args) if '..' in args[0]: raise InvalidArguments('Subdir contains ..') if self.subdir == '' and args[0] == self.subproject_dir: raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.') if self.subdir == '' and args[0].startswith('meson-'): raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().') for i in mesonlib.extract_as_list(kwargs, 'if_found'): if not hasattr(i, 'found_method'): raise InterpreterException('Object used in if_found does not have a found method.') if not i.found_method([], {}): return prev_subdir = self.subdir subdir = os.path.join(prev_subdir, args[0]) if os.path.isabs(subdir): raise InvalidArguments('Subdir argument must be a relative path.') absdir = os.path.join(self.environment.get_source_dir(), subdir) symlinkless_dir = os.path.realpath(absdir) if symlinkless_dir in self.visited_subdirs: raise InvalidArguments('Tried to enter directory "%s", which has already been visited.' % subdir) self.visited_subdirs[symlinkless_dir] = True self.subdir = subdir os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True) buildfilename = os.path.join(self.subdir, environment.build_filename) self.build_def_files.append(buildfilename) absname = os.path.join(self.environment.get_source_dir(), buildfilename) if not os.path.isfile(absname): self.subdir = prev_subdir raise InterpreterException("Non-existent build file '{!s}'".format(buildfilename)) with open(absname, encoding='utf8') as f: code = f.read() assert(isinstance(code, str)) try: codeblock = mparser.Parser(code, absname).parse() except mesonlib.MesonException as me: me.file = absname raise me try: self.evaluate_codeblock(codeblock) except SubdirDoneRequest: pass self.subdir = prev_subdir def _get_kwarg_install_mode(self, kwargs): if kwargs.get('install_mode', None) is None: return None install_mode = [] mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int)) for m in mode: # We skip any arguments that are set to `false` if m is False: m = None install_mode.append(m) if len(install_mode) > 3: raise InvalidArguments('Keyword argument install_mode takes at ' 'most 3 arguments.') if len(install_mode) > 0 and install_mode[0] is not None and \ not isinstance(install_mode[0], str): raise InvalidArguments('Keyword argument install_mode requires the ' 'permissions arg to be a string or false') return FileMode(*install_mode) @FeatureNewKwargs('install_data', '0.46.0', ['rename']) @FeatureNewKwargs('install_data', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_data']) def func_install_data(self, node, args, kwargs): kwsource = mesonlib.stringlistify(kwargs.get('sources', [])) raw_sources = args + kwsource sources = [] source_strings = [] for s in raw_sources: if isinstance(s, mesonlib.File): sources.append(s) elif isinstance(s, str): source_strings.append(s) else: raise InvalidArguments('Argument must be string or file.') sources += self.source_strings_to_files(source_strings) install_dir = kwargs.get('install_dir', None) if not isinstance(install_dir, (str, type(None))): raise InvalidArguments('Keyword argument install_dir not a string.') install_mode = self._get_kwarg_install_mode(kwargs) rename = kwargs.get('rename', None) data = DataHolder(build.Data(sources, install_dir, install_mode, rename)) self.build.data.append(data.held_object) return data @FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories']) @FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_subdir']) @stringArgs def func_install_subdir(self, node, args, kwargs): if len(args) != 1: raise InvalidArguments('Install_subdir requires exactly one argument.') subdir = args[0] if 'install_dir' not in kwargs: raise InvalidArguments('Missing keyword argument install_dir') install_dir = kwargs['install_dir'] if not isinstance(install_dir, str): raise InvalidArguments('Keyword argument install_dir not a string.') if 'strip_directory' in kwargs: if not isinstance(kwargs['strip_directory'], bool): raise InterpreterException('"strip_directory" keyword must be a boolean.') strip_directory = kwargs['strip_directory'] else: strip_directory = False if 'exclude_files' in kwargs: exclude = extract_as_list(kwargs, 'exclude_files') for f in exclude: if not isinstance(f, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(f): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_files = set(exclude) else: exclude_files = set() if 'exclude_directories' in kwargs: exclude = extract_as_list(kwargs, 'exclude_directories') for d in exclude: if not isinstance(d, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(d): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_directories = set(exclude) else: exclude_directories = set() exclude = (exclude_files, exclude_directories) install_mode = self._get_kwarg_install_mode(kwargs) idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory) self.build.install_dirs.append(idir) return idir @FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding']) @FeatureNewKwargs('configure_file', '0.46.0', ['format']) @FeatureNewKwargs('configure_file', '0.41.0', ['capture']) @FeatureNewKwargs('configure_file', '0.50.0', ['install']) @FeatureNewKwargs('configure_file', '0.52.0', ['depfile']) @permittedKwargs(permitted_kwargs['configure_file']) def func_configure_file(self, node, args, kwargs): if len(args) > 0: raise InterpreterException("configure_file takes only keyword arguments.") if 'output' not in kwargs: raise InterpreterException('Required keyword argument "output" not defined.') actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys()) if len(actions) == 0: raise InterpreterException('Must specify an action with one of these ' 'keyword arguments: \'configuration\', ' '\'command\', or \'copy\'.') elif len(actions) == 2: raise InterpreterException('Must not specify both {!r} and {!r} ' 'keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) elif len(actions) == 3: raise InterpreterException('Must specify one of {!r}, {!r}, and ' '{!r} keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) if 'capture' in kwargs: if not isinstance(kwargs['capture'], bool): raise InterpreterException('"capture" keyword must be a boolean.') if 'command' not in kwargs: raise InterpreterException('"capture" keyword requires "command" keyword.') if 'format' in kwargs: fmt = kwargs['format'] if not isinstance(fmt, str): raise InterpreterException('"format" keyword must be a string.') else: fmt = 'meson' if fmt not in ('meson', 'cmake', 'cmake@'): raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".') if 'output_format' in kwargs: output_format = kwargs['output_format'] if not isinstance(output_format, str): raise InterpreterException('"output_format" keyword must be a string.') else: output_format = 'c' if output_format not in ('c', 'nasm'): raise InterpreterException('"format" possible values are "c" or "nasm".') if 'depfile' in kwargs: depfile = kwargs['depfile'] if not isinstance(depfile, str): raise InterpreterException('depfile file name must be a string') else: depfile = None # Validate input inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input')) inputs_abs = [] for f in inputs: if isinstance(f, mesonlib.File): inputs_abs.append(f.absolute_path(self.environment.source_dir, self.environment.build_dir)) self.add_build_def_file(f) else: raise InterpreterException('Inputs can only be strings or file objects') # Validate output output = kwargs['output'] if not isinstance(output, str): raise InterpreterException('Output file name must be a string') if inputs_abs: values = mesonlib.get_filenames_templates_dict(inputs_abs, None) outputs = mesonlib.substitute_values([output], values) output = outputs[0] if depfile: depfile = mesonlib.substitute_values([depfile], values)[0] ofile_rpath = os.path.join(self.subdir, output) if ofile_rpath in self.configure_file_outputs: mesonbuildfile = os.path.join(self.subdir, 'meson.build') current_call = "{}:{}".format(mesonbuildfile, self.current_lineno) first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath]) mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call) else: self.configure_file_outputs[ofile_rpath] = self.current_lineno if os.path.dirname(output) != '': raise InterpreterException('Output file name must not contain a subdirectory.') (ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output)) ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname) # Perform the appropriate action if 'configuration' in kwargs: conf = kwargs['configuration'] if isinstance(conf, dict): FeatureNew('configure_file.configuration dictionary', '0.49.0').use(self.subproject) conf = ConfigurationDataHolder(self.subproject, conf) elif not isinstance(conf, ConfigurationDataHolder): raise InterpreterException('Argument "configuration" is not of type configuration_data') mlog.log('Configuring', mlog.bold(output), 'using configuration') if len(inputs) > 1: raise InterpreterException('At most one input file can given in configuration mode') if inputs: os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) file_encoding = kwargs.setdefault('encoding', 'utf-8') missing_variables, confdata_useless = \ mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object, fmt, file_encoding) if missing_variables: var_list = ", ".join(map(repr, sorted(missing_variables))) mlog.warning( "The variable(s) %s in the input file '%s' are not " "present in the given configuration data." % ( var_list, inputs[0]), location=node) if confdata_useless: ifbase = os.path.basename(inputs_abs[0]) mlog.warning('Got an empty configuration_data() object and found no ' 'substitutions in the input file {!r}. If you want to ' 'copy a file to the build dir, use the \'copy:\' keyword ' 'argument added in 0.47.0'.format(ifbase), location=node) else: mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format) conf.mark_used() elif 'command' in kwargs: if len(inputs) > 1: FeatureNew('multiple inputs in configure_file()', '0.52.0').use(self.subproject) # We use absolute paths for input and output here because the cwd # that the command is run from is 'unspecified', so it could change. # Currently it's builddir/subdir for in_builddir else srcdir/subdir. values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs]) if depfile: depfile = os.path.join(self.environment.get_scratch_dir(), depfile) values['@DEPFILE@'] = depfile # Substitute @INPUT@, @OUTPUT@, etc here. cmd = mesonlib.substitute_values(kwargs['command'], values) mlog.log('Configuring', mlog.bold(output), 'with command') res = self.run_command_impl(node, cmd, {}, True) if res.returncode != 0: raise InterpreterException('Running configure command failed.\n%s\n%s' % (res.stdout, res.stderr)) if 'capture' in kwargs and kwargs['capture']: dst_tmp = ofile_abs + '~' file_encoding = kwargs.setdefault('encoding', 'utf-8') with open(dst_tmp, 'w', encoding=file_encoding) as f: f.writelines(res.stdout) if inputs_abs: shutil.copymode(inputs_abs[0], dst_tmp) mesonlib.replace_if_different(ofile_abs, dst_tmp) if depfile: mlog.log('Reading depfile:', mlog.bold(depfile)) with open(depfile, 'r') as f: df = DepFile(f.readlines()) deps = df.get_all_dependencies(ofile_fname) for dep in deps: self.add_build_def_file(dep) elif 'copy' in kwargs: if len(inputs_abs) != 1: raise InterpreterException('Exactly one input file must be given in copy mode') os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) shutil.copyfile(inputs_abs[0], ofile_abs) shutil.copystat(inputs_abs[0], ofile_abs) else: # Not reachable raise AssertionError # Install file if requested, we check for the empty string # for backwards compatibility. That was the behaviour before # 0.45.0 so preserve it. idir = kwargs.get('install_dir', '') if idir is False: idir = '' mlog.deprecation('Please use the new `install:` kwarg instead of passing ' '`false` to `install_dir:`', location=node) if not isinstance(idir, str): if isinstance(idir, list) and len(idir) == 0: mlog.deprecation('install_dir: kwarg must be a string and not an empty array. ' 'Please use the install: kwarg to enable or disable installation. ' 'This will be a hard error in the next release.') else: raise InterpreterException('"install_dir" must be a string') install = kwargs.get('install', idir != '') if not isinstance(install, bool): raise InterpreterException('"install" must be a boolean') if install: if not idir: raise InterpreterException('"install_dir" must be specified ' 'when "install" in a configure_file ' 'is true') cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname) install_mode = self._get_kwarg_install_mode(kwargs) self.build.data.append(build.Data([cfile], idir, install_mode)) return mesonlib.File.from_built_file(self.subdir, output) def extract_incdirs(self, kwargs): prospectives = unholder(extract_as_list(kwargs, 'include_directories')) result = [] for p in prospectives: if isinstance(p, build.IncludeDirs): result.append(p) elif isinstance(p, str): result.append(self.build_incdir_object([p]).held_object) else: raise InterpreterException('Include directory objects can only be created from strings or include directories.') return result @permittedKwargs(permitted_kwargs['include_directories']) @stringArgs def func_include_directories(self, node, args, kwargs): return self.build_incdir_object(args, kwargs.get('is_system', False)) def build_incdir_object(self, incdir_strings, is_system=False): if not isinstance(is_system, bool): raise InvalidArguments('Is_system must be boolean.') src_root = self.environment.get_source_dir() build_root = self.environment.get_build_dir() absbase_src = os.path.join(src_root, self.subdir) absbase_build = os.path.join(build_root, self.subdir) for a in incdir_strings: if a.startswith(src_root): raise InvalidArguments('Tried to form an absolute path to a source dir. ' 'You should not do that but use relative paths instead.' ''' To get include path to any directory relative to the current dir do incdir = include_directories(dirname) After this incdir will contain both the current source dir as well as the corresponding build dir. It can then be used in any subdirectory and Meson will take care of all the busywork to make paths work. Dirname can even be '.' to mark the current directory. Though you should remember that the current source and build directories are always put in the include directories by default so you only need to do include_directories('.') if you intend to use the result in a different subdirectory. ''') absdir_src = os.path.join(absbase_src, a) absdir_build = os.path.join(absbase_build, a) if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build): raise InvalidArguments('Include dir %s does not exist.' % a) i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system)) return i @permittedKwargs(permitted_kwargs['add_test_setup']) @stringArgs def func_add_test_setup(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('Add_test_setup needs one argument for the setup name.') setup_name = args[0] if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None: raise InterpreterException('Setup name may only contain alphanumeric characters.') if ":" not in setup_name: setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name try: inp = unholder(extract_as_list(kwargs, 'exe_wrapper')) exe_wrapper = [] for i in inp: if isinstance(i, str): exe_wrapper.append(i) elif isinstance(i, dependencies.ExternalProgram): if not i.found(): raise InterpreterException('Tried to use non-found executable.') exe_wrapper += i.get_command() else: raise InterpreterException('Exe wrapper can only contain strings or external binaries.') except KeyError: exe_wrapper = None gdb = kwargs.get('gdb', False) if not isinstance(gdb, bool): raise InterpreterException('Gdb option must be a boolean') timeout_multiplier = kwargs.get('timeout_multiplier', 1) if not isinstance(timeout_multiplier, int): raise InterpreterException('Timeout multiplier must be a number.') is_default = kwargs.get('is_default', False) if not isinstance(is_default, bool): raise InterpreterException('is_default option must be a boolean') if is_default: if self.build.test_setup_default_name is not None: raise InterpreterException('\'%s\' is already set as default. ' 'is_default can be set to true only once' % self.build.test_setup_default_name) self.build.test_setup_default_name = setup_name env = self.unpack_env_kwarg(kwargs) self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env) @permittedKwargs(permitted_kwargs['add_global_arguments']) @stringArgs def func_add_global_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_global_link_arguments']) @stringArgs def func_add_global_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_arguments']) @stringArgs def func_add_project_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_link_arguments']) @stringArgs def func_add_project_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs) def warn_about_builtin_args(self, args): warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic') optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os') for arg in args: if arg in warnargs: mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg), location=self.current_node) elif arg in optargs: mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-g': mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-pipe': mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.", location=self.current_node) elif arg.startswith('-fsanitize'): mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg), location=self.current_node) elif arg.startswith('-std=') or arg.startswith('/std:'): mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg), location=self.current_node) def add_global_arguments(self, node, argsdict, args, kwargs): if self.is_subproject(): msg = 'Function \'{}\' cannot be used in subprojects because ' \ 'there is no way to make that reliable.\nPlease only call ' \ 'this if is_subproject() returns false. Alternatively, ' \ 'define a variable that\ncontains your language-specific ' \ 'arguments and add it to the appropriate *_args kwarg ' \ 'in each target.'.format(node.func_name) raise InvalidCode(msg) frozen = self.project_args_frozen or self.global_args_frozen self.add_arguments(node, argsdict, frozen, args, kwargs) def add_project_arguments(self, node, argsdict, args, kwargs): if self.subproject not in argsdict: argsdict[self.subproject] = {} self.add_arguments(node, argsdict[self.subproject], self.project_args_frozen, args, kwargs) def add_arguments(self, node, argsdict, args_frozen, args, kwargs): if args_frozen: msg = 'Tried to use \'{}\' after a build target has been declared.\n' \ 'This is not permitted. Please declare all ' \ 'arguments before your targets.'.format(node.func_name) raise InvalidCode(msg) if 'language' not in kwargs: raise InvalidCode('Missing language definition in {}'.format(node.func_name)) self.warn_about_builtin_args(args) for lang in mesonlib.stringlistify(kwargs['language']): lang = lang.lower() argsdict[lang] = argsdict.get(lang, []) + args @noKwargs @noArgsFlattening def func_environment(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('environment takes only one optional positional arguments') elif len(args) == 1: FeatureNew('environment positional arguments', '0.52.0').use(self.subproject) initial_values = args[0] if not isinstance(initial_values, dict) and not isinstance(initial_values, list): raise InterpreterException('environment first argument must be a dictionary or a list') else: initial_values = {} return EnvironmentVariablesHolder(initial_values) @stringArgs @noKwargs def func_join_paths(self, node, args, kwargs): return self.join_path_strings(args) def run(self): super().run() mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets)))) FeatureNew.report(self.subproject) FeatureDeprecated.report(self.subproject) if not self.is_subproject(): self.print_extra_warnings() if self.subproject == '': self._print_summary() def print_extra_warnings(self): # TODO cross compilation for c in self.coredata.compilers.host.values(): if c.get_id() == 'clang': self.check_clang_asan_lundef() break def check_clang_asan_lundef(self): if 'b_lundef' not in self.coredata.base_options: return if 'b_sanitize' not in self.coredata.base_options: return if (self.coredata.base_options['b_lundef'].value and self.coredata.base_options['b_sanitize'].value != 'none'): mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef. This will probably not work. Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value), location=self.current_node) def evaluate_subproject_info(self, path_from_source_root, subproject_dirname): depth = 0 subproj_name = '' segs = PurePath(path_from_source_root).parts segs_spd = PurePath(subproject_dirname).parts while segs and segs[0] == segs_spd[0]: if len(segs_spd) == 1: subproj_name = segs[1] segs = segs[2:] depth += 1 else: segs_spd = segs_spd[1:] segs = segs[1:] return (depth, subproj_name) # Check that the indicated file is within the same subproject # as we currently are. This is to stop people doing # nasty things like: # # f = files('../../master_src/file.c') # # Note that this is validated only when the file # object is generated. The result can be used in a different # subproject than it is defined in (due to e.g. a # declare_dependency). def validate_within_subproject(self, subdir, fname): norm = os.path.normpath(os.path.join(subdir, fname)) if os.path.isabs(norm): if not norm.startswith(self.environment.source_dir): # Grabbing files outside the source tree is ok. # This is for vendor stuff like: # # /opt/vendorsdk/src/file_with_license_restrictions.c return norm = os.path.relpath(norm, self.environment.source_dir) assert(not os.path.isabs(norm)) (num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir) plain_filename = os.path.basename(norm) if num_sps == 0: if not self.is_subproject(): return raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) if num_sps > 1: raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename) if sproj_name != self.subproject_directory_name: raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) def source_strings_to_files(self, sources): results = [] mesonlib.check_direntry_issues(sources) if not isinstance(sources, list): sources = [sources] for s in sources: if isinstance(s, (mesonlib.File, GeneratedListHolder, TargetHolder, CustomTargetIndexHolder, GeneratedObjectsHolder)): pass elif isinstance(s, str): self.validate_within_subproject(self.subdir, s) s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s) else: raise InterpreterException('Source item is {!r} instead of ' 'string or File-type object'.format(s)) results.append(s) return results def add_target(self, name, tobj): if name == '': raise InterpreterException('Target name must not be empty.') if name.strip() == '': raise InterpreterException('Target name must not consist only of whitespace.') if name.startswith('meson-'): raise InvalidArguments("Target names starting with 'meson-' are reserved " "for Meson's internal use. Please rename.") if name in coredata.forbidden_target_names: raise InvalidArguments("Target name '%s' is reserved for Meson's " "internal use. Please rename." % name) # To permit an executable and a shared library to have the # same name, such as "foo.exe" and "libfoo.a". idname = tobj.get_id() if idname in self.build.targets: raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name) self.build.targets[idname] = tobj if idname not in self.coredata.target_guids: self.coredata.target_guids[idname] = str(uuid.uuid4()).upper() @FeatureNew('both_libraries', '0.46.0') def build_both_libraries(self, node, args, kwargs): shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder) # Check if user forces non-PIC static library. pic = True if 'pic' in kwargs: pic = kwargs['pic'] elif 'b_staticpic' in self.environment.coredata.base_options: pic = self.environment.coredata.base_options['b_staticpic'].value if pic: # Exclude sources from args and kwargs to avoid building them twice static_args = [args[0]] static_kwargs = kwargs.copy() static_kwargs['sources'] = [] static_kwargs['objects'] = shared_holder.held_object.extract_all_objects() else: static_args = args static_kwargs = kwargs static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder) return BothLibrariesHolder(shared_holder, static_holder, self) def build_library(self, node, args, kwargs): default_library = self.coredata.get_builtin_option('default_library', self.subproject) if default_library == 'shared': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif default_library == 'static': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif default_library == 'both': return self.build_both_libraries(node, args, kwargs) else: raise InterpreterException('Unknown default_library value: %s.', default_library) def build_target(self, node, args, kwargs, targetholder): @FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories']) @FeatureNewKwargs('build target', '0.41.0', ['rust_args']) @FeatureNewKwargs('build target', '0.40.0', ['build_by_default']) @FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility']) def build_target_decorator_caller(self, node, args, kwargs): return True build_target_decorator_caller(self, node, args, kwargs) if not args: raise InterpreterException('Target does not have a name.') name, *sources = args for_machine = self.machine_from_native_kwarg(kwargs) if 'sources' in kwargs: sources += listify(kwargs['sources']) sources = self.source_strings_to_files(sources) objs = extract_as_list(kwargs, 'objects') kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies') kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'extra_files' in kwargs: ef = extract_as_list(kwargs, 'extra_files') kwargs['extra_files'] = self.source_strings_to_files(ef) self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources) if targetholder == ExecutableHolder: targetclass = build.Executable elif targetholder == SharedLibraryHolder: targetclass = build.SharedLibrary elif targetholder == SharedModuleHolder: targetclass = build.SharedModule elif targetholder == StaticLibraryHolder: targetclass = build.StaticLibrary elif targetholder == JarHolder: targetclass = build.Jar else: mlog.debug('Unknown target type:', str(targetholder)) raise RuntimeError('Unreachable code') self.kwarg_strings_to_includedirs(kwargs) # Filter out kwargs from other target types. For example 'soversion' # passed to library() when default_library == 'static'. kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs} kwargs['include_directories'] = self.extract_incdirs(kwargs) target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs) target.project_version = self.project_version if not self.environment.machines.matches_build_machine(for_machine): self.add_cross_stdlib_info(target) l = targetholder(target, self) self.add_target(name, l.held_object) self.project_args_frozen = True return l def kwarg_strings_to_includedirs(self, kwargs): if 'd_import_dirs' in kwargs: items = mesonlib.extract_as_list(kwargs, 'd_import_dirs') cleaned_items = [] for i in items: if isinstance(i, str): # BW compatibility. This was permitted so we must support it # for a few releases so people can transition to "correct" # path declarations. if os.path.normpath(i).startswith(self.environment.get_source_dir()): mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead. This will become a hard error in the future.''', location=self.current_node) i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir)) i = self.build_incdir_object([i]) cleaned_items.append(i) kwargs['d_import_dirs'] = cleaned_items def get_used_languages(self, target): result = {} for i in target.sources: # TODO other platforms for lang, c in self.coredata.compilers.host.items(): if c.can_compile(i): result[lang] = True break return result def add_cross_stdlib_info(self, target): if target.for_machine != MachineChoice.HOST: return for l in self.get_used_languages(target): props = self.environment.properties.host if props.has_stdlib(l) \ and self.subproject != props.get_stdlib(l)[0]: target.add_deps(self.build.stdlibs.host[l]) def check_sources_exist(self, subdir, sources): for s in sources: if not isinstance(s, str): continue # This means a generated source and they always exist. fname = os.path.join(subdir, s) if not os.path.isfile(fname): raise InterpreterException('Tried to add non-existing source file %s.' % s) # Only permit object extraction from the same subproject def validate_extraction(self, buildtarget: InterpreterObject) -> None: if not self.subdir.startswith(self.subproject_dir): if buildtarget.subdir.startswith(self.subproject_dir): raise InterpreterException('Tried to extract objects from a subproject target.') else: if not buildtarget.subdir.startswith(self.subproject_dir): raise InterpreterException('Tried to extract objects from the main project from a subproject.') if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]: raise InterpreterException('Tried to extract objects from a different subproject.') def is_subproject(self): return self.subproject != '' @noKwargs @noArgsFlattening def func_set_variable(self, node, args, kwargs): if len(args) != 2: raise InvalidCode('Set_variable takes two arguments.') varname, value = args self.set_variable(varname, value) @noKwargs @noArgsFlattening def func_get_variable(self, node, args, kwargs): if len(args) < 1 or len(args) > 2: raise InvalidCode('Get_variable takes one or two arguments.') varname = args[0] if isinstance(varname, Disabler): return varname if not isinstance(varname, str): raise InterpreterException('First argument must be a string.') try: return self.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InterpreterException('Tried to get unknown variable "%s".' % varname) @stringArgs @noKwargs def func_is_variable(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_variable takes two arguments.') varname = args[0] return varname in self.variables @staticmethod def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice: native = kwargs.get('native', False) if not isinstance(native, bool): raise InvalidArguments('Argument to "native" must be a boolean.') return MachineChoice.BUILD if native else MachineChoice.HOST @FeatureNew('is_disabler', '0.52.0') @noKwargs def func_is_disabler(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_disabler takes one argument.') varname = args[0] return isinstance(varname, Disabler)
mesonbuild/interpreter.py
222,664
Implementation-only, without FeatureNew checks, for internal use Do a feature check on dependencies used by this subproject Check if the compiler prefixes _ (underscore) to global C symbols See: https://en.wikipedia.org/wiki/Name_mangling#C This function is deprecated and should not be used. It can be removed in a future version of Meson. Copyright 2012-2019 The Meson development team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Keep boolean value in kwargs to simplify other places where this kwarg is checked. a Unix-path starting with `/` that is not absolute on Windows. discard without failing for end-user ease of cross-platform directory arrays Warn when someone tries to use append() or prepend() on an env var which already has an operation set on it. People seem to think that multiple append/prepend operations stack, but they don't. These objects become immutable after use in configure_file. (val, desc) Interpreter objects can not be pickled so we must have these wrappers. This is a bit of a hack. Fix properly before merging. A machine that's statically known from the cross file Set to True only when called from self.func_shared_lib(). FIXME: This build target always represents the shared library, but that should be configurable. lgtm[py/unexpected-raise-in-special-method] lgtm[py/unexpected-raise-in-special-method] TODO add dependencies support? This is not 100% reliable but we can't use hash() because the Build object contains dicts and lists. The backend object is under-used right now, but we will need it: https://github.com/mesonbuild/meson/issues/1419global_args_for_build = self.interpreter.build.global_args.build,project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}), Many modules do for example self.interpreter.find_program_impl(), so we have to ensure they use the current interpreter and not the one that first imported that module, otherwise it will use outdated overrides. newline newline Prefer scripts in the current source directory T.List[str] This feels really hacky, but I'm not sure how else to fix this without completely rewriting install script handling. This is complicated by the fact that the install target depends on all. We return True when exe_wrap is defined, when it's not needed, and when we're compiling natively. The last two are semantically confusing. Need to revisit this. native: not specified Subproject directory is usually the name of the subproject, but can be different for dependencies provided by wrap files. implies self.project_args_frozen Passed from the outside, only used in subprojects. build_def_files needs to be defined before parse_project is called Re-initialize machine descriptions. We can do a better job now because we have the compilers needed to gain more knowledge, so wipe out old inference and start over. FIXME: This is special cased and not ideal: The first source is our new VapiTarget, the rest are deps Use relative path for files within source directory, and absolute path for system files. Skip files within build directory. Also skip not regular files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this is especially important to convert '/' to '\' on Windows. Because that is how they will come from pkg-config and cmake Prefer scripts in the current source directory If any file that was used as an argument to the command changes, we must re-run the configuration step. if the reason subproject execution failed was because the directory doesn't exist, try to give some helpful advice if it's a nested subproject that needs promotion... Invalid code is always an error Suppress the 'ERROR:' prefix because this exception is not fatal and VS CI treat any logs with "ERROR:" as fatal. Duplicates are possible when subproject uses files from project root Generate a meson ast and execute it with the normal do_subproject_meson Debug print the generated meson file Get class name, then option type as a string This is not a hard error to avoid dependency hell, the workaround when this happens is to simply set the subproject's option directly. The backend is already set when parsing subprojects Only init backend options on first invocation otherwise it would override values previously set from command line. Do not set default_options on reconfigure otherwise it would override values previously set from command line. That means that changing default_options in a project will trigger a reconfigure but won't have any effect. absent 'native' means 'both' for backwards compatibility Add automatic 'Supbrojects' section in main project. Print all summaries, main project last. newline Always points to a local (i.e. self generated) file. Search for scripts relative to current subdir. Do not cache found programs because find_program('foobar') might give different results when run from different source dirs. TODO update modules to always pass `for_machine`. It is bad-form to assume the host machine. Only store successful lookups Check if we want this as a build-time / build machine or runt-time / host machine dep. We don't implicitly override not-found dependencies, but user could have explicitly called meson.override_dependency() with a not-found dep. Assuming the subproject overridden the dependency we want If the dependency is not required, don't raise an exception Override this dependency to have consistent results in subsequent dependency lookups. writing just "dependency('')" is an error, because it can only fail If the dependency has already been configured, possibly by a higher level project, try to use it first. Only store found-deps in the cache Never add fallback deps to self.coredata.deps since we cannot cache them. They must always be evaluated else we won't actually read all the build files. default regex selector for custom command: use complete output Is the command an executable in path or maybe a script in the source tree? executing this cmd will fail in vcstagger.py and force to use the fallback string vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... is_parallel isn't valid here, so make sure it isn't passed Convert from array to environment object We skip any arguments that are set to `false` Validate input Validate output Perform the appropriate action We use absolute paths for input and output here because the cwd that the command is run from is 'unspecified', so it could change. Currently it's builddir/subdir for in_builddir else srcdir/subdir. Substitute @INPUT@, @OUTPUT@, etc here. Not reachable Install file if requested, we check for the empty string for backwards compatibility. That was the behaviour before 0.45.0 so preserve it. TODO cross compilation Check that the indicated file is within the same subproject as we currently are. This is to stop people doing nasty things like: f = files('../../master_src/file.c') Note that this is validated only when the file object is generated. The result can be used in a different subproject than it is defined in (due to e.g. a declare_dependency). Grabbing files outside the source tree is ok. This is for vendor stuff like: /opt/vendorsdk/src/file_with_license_restrictions.c To permit an executable and a shared library to have the same name, such as "foo.exe" and "libfoo.a". Check if user forces non-PIC static library. Exclude sources from args and kwargs to avoid building them twice Filter out kwargs from other target types. For example 'soversion' passed to library() when default_library == 'static'. BW compatibility. This was permitted so we must support it for a few releases so people can transition to "correct" path declarations. TODO other platforms This means a generated source and they always exist. Only permit object extraction from the same subproject
8,292
en
0.88037
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE import pytest # noqa: F401 import numpy as np # noqa: F401 import cupy as cp # noqa: F401 import awkward as ak # noqa: F401 def test_num_1(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bitmask = ak.layout.IndexU8(np.array([40, 34], dtype=np.uint8)) array = ak.Array(ak.layout.BitMaskedArray(bitmask, content, False, 9, False)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_2(): content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout bytemask = ak.layout.Index8(np.array([False, True, False], dtype=np.bool)) array = ak.Array(ak.layout.ByteMaskedArray(bytemask, content, True)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_3(): array = ak.Array(ak.layout.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5]))) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_4(): array = ak.Array( ak.layout.NumpyArray(np.array([[0.0, 1.1], [2.2, 3.3], [4.4, 5.5]])) ) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_5(): array = ak.Array(ak.layout.EmptyArray()) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) def test_num_6(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArray64(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_7(): content = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.IndexU32(np.array([0, 3, 3, 5, 6, 9])) array = ak.Array(ak.layout.ListOffsetArrayU32(offsets, content)) cuda_array = ak.to_kernels(array, "cuda") assert ak.num(cuda_array, 0) == ak.num(array, 0) assert ak.num(cuda_array, 1).tolist() == ak.num(array, 1).tolist() def test_num_8(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.10]) ) offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10])) listoffsetarray = ak.layout.ListOffsetArray64(offsets, content) regulararray = ak.layout.RegularArray(listoffsetarray, 2) starts = ak.layout.Index64(np.array([0, 1])) stops = ak.layout.Index64(np.array([2, 3])) listarray = ak.layout.ListArray64(starts, stops, regulararray) cuda_listoffsetarray = ak.to_kernels(listoffsetarray, "cuda") assert ak.num(cuda_listoffsetarray, 0) == ak.num(ak.Array(listoffsetarray), 0) assert ( ak.num(cuda_listoffsetarray, 1).tolist() == ak.num(ak.Array(listoffsetarray), 1).tolist() ) cuda_regulararray = ak.to_kernels(regulararray, "cuda") assert ak.num(cuda_regulararray, 0) == ak.num(ak.Array(regulararray), 0) assert ( ak.num(cuda_regulararray, 1).tolist() == ak.num(ak.Array(regulararray), 1).tolist() ) cuda_listarray = ak.to_kernels(listarray, "cuda") assert ak.num(cuda_listarray, 0) == ak.num(ak.Array(listarray), 0) assert ak.num(cuda_listarray, 1).tolist() == ak.num(ak.Array(listarray), 1).tolist() content1 = ak.layout.NumpyArray(np.array([1, 2, 3, 4, 5])) content2 = ak.layout.NumpyArray( np.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) offsets = ak.layout.Index32(np.array([0, 3, 3, 5, 6, 9])) recordarray = ak.Array( ak.layout.RecordArray( [content1, listoffsetarray, content2, content1], keys=["one", "two", "2", "wonky"], ) ) cuda_recordarray = ak.to_kernels(recordarray, "cuda") assert ak.num(cuda_recordarray, 0).tolist() == ak.num(recordarray, 0).tolist() content0 = ak.Array([[1.1, 2.2, 3.3], [], [4.4, 5.5]]).layout content = ak.Array( ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] ).layout tags = ak.layout.Index8(np.array([1, 1, 0, 0, 1, 0, 1, 1], dtype=np.int8)) index = ak.layout.Index32(np.array([0, 1, 0, 1, 2, 2, 4, 3], dtype=np.int32)) unionarray = ak.Array(ak.layout.UnionArray8_32(tags, index, [content0, content1])) cuda_unionarray = ak.to_kernels(unionarray, "cuda") assert ak.num(cuda_unionarray, 0) == ak.num(unionarray, 0) def test_num_9(): content = ak.layout.NumpyArray( np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) ) index = ak.layout.Index32(np.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=np.int64)) indexedarray = ak.Array(ak.layout.IndexedArray32(index, content)) cuda_indexedarray = ak.to_kernels(indexedarray, "cuda") assert ak.num(cuda_indexedarray, 0) == ak.num(indexedarray, 0) ioa = ak.Array( ak.layout.IndexedOptionArray32( ak.layout.Index32([-30, 19, 6, 7, -3, 21, 13, 22, 17, 9, -12, 16]), ak.layout.NumpyArray( np.array( [ 5.2, 1.7, 6.7, -0.4, 4.0, 7.8, 3.8, 6.8, 4.2, 0.3, 4.6, 6.2, 6.9, -0.7, 3.9, 1.6, 8.7, -0.7, 3.2, 4.3, 4.0, 5.8, 4.2, 7.0, 5.6, 3.8, ] ) ), ) ) cuda_ioa = ak.to_kernels(ioa, "cuda") ak.to_kernels(cuda_ioa, "cpu") assert ak.num(cuda_ioa, 0) == ak.num(ioa, 0)
tests-cuda/test_0345-cuda-num.py
6,599
BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE noqa: F401 noqa: F401 noqa: F401 noqa: F401
129
en
0.397612
from kfp.components import create_component_from_func, InputPath, OutputPath def keras_convert_hdf5_model_to_tf_saved_model( model_path: InputPath('KerasModelHdf5'), converted_model_path: OutputPath('TensorflowSavedModel'), ): '''Converts Keras HDF5 model to Tensorflow SavedModel format. Args: model_path: Keras model in HDF5 format. converted_model_path: Keras model in Tensorflow SavedModel format. Annotations: author: Alexey Volkov <[email protected]> ''' from pathlib import Path from tensorflow import keras model = keras.models.load_model(filepath=model_path) keras.models.save_model(model=model, filepath=converted_model_path, save_format='tf') if __name__ == '__main__': keras_convert_hdf5_model_to_tf_saved_model_op = create_component_from_func( keras_convert_hdf5_model_to_tf_saved_model, base_image='tensorflow/tensorflow:2.3.0', packages_to_install=['h5py==2.10.0'], output_component_file='component.yaml', annotations={ "author": "Alexey Volkov <[email protected]>", "canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.yaml", }, )
components/_converters/KerasModelHdf5/to_TensorflowSavedModel/component.py
1,328
Converts Keras HDF5 model to Tensorflow SavedModel format. Args: model_path: Keras model in HDF5 format. converted_model_path: Keras model in Tensorflow SavedModel format. Annotations: author: Alexey Volkov <[email protected]>
248
en
0.479191
from datetime import timedelta import pytest from django.utils import timezone from electeez_auth.models import User @pytest.mark.django_db def test_otp(client): user = User.objects.create(email='[email protected]') token = user.otp_new(redirect='valid') response = client.post(token.path) assert response['Location'] == 'valid' # can't use the link twice response = client.post(token.path) assert response['Location'] != 'valid' # try expired link token = user.otp_new() token.otp_expiry = timezone.now() - timedelta(minutes=1) token.save() response = client.post(token.path) assert response['Location'] != 'valid'
electeez_auth/test_otp.py
672
can't use the link twice try expired link
41
en
0.943466
import sys from query_common import filter_records, ProjectMixins from redcap import Project # note this is from PyCap.redcap from typing import List """ This class of functions are responsible of retrieving relevant data structures from the CNFUN tables """ class CNFUN_project(ProjectMixins): """ One baby can have many admissions CaseIDs. One hospital record can have many CaseIDs. One baby has only one hospital record number. """ def __init__( self, Token, URL, get_all_field=False, ): """ Create a project using PyCap :param Token: :param URL: :return: """ # Several key properties we'll use throughout self.project = Project(URL, Token) # These are very important ID fields from the fields_keyid = ["patientID", "cf_p_cnnpatientui"] # For now, make sure to onyl get the data related to these key ids to reduce load time self.data = self.get_fields(fields_keyid) # if specified, get all the records. if get_all_field: self.data = self.project.export_records() def filter_with_CNNPatientUI(self, CNNPatientUI: str or List[str]): """ Check the list, only retain the relevant records with matching PatientID are retained. :param dataset: CNBPIDs & record ID correspondence list. :param CNNPatientUI: :return: """ list_filtered = None filtered_field = "cf_p_cnnpatientui" # Handling when babyIDs is string instead of list (allowing batch function). if type(CNNPatientUI) is str: CNNPatientUI = [CNNPatientUI] list_filtered = filter_records(self.data, filtered_field, CNNPatientUI) return list_filtered def get_PatientID_with_CNNPatientUI(self, CNNPatientUI: str or List[str]): """ PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table. :return: """ # Listify the CNNPatientUI if type(CNNPatientUI) is str: CNNPatientUI = [CNNPatientUI] # Filter with the information list_filtered_dict = self.filter_with_CNNPatientUI(CNNPatientUI) # Aggregate the list_PatientID list_PatientID = [] for case in list_filtered_dict: list_PatientID.append(case["patientid"]) return list_PatientID def get_records_CNFUN(self, PatientID: str or List[str]): """ Retrieve the cases based on their INDEX which is the :param cases: :return: """ if type(PatientID) is str: PatientID = [PatientID] cases_data = self.project.export_records(records=PatientID) return cases_data
query_CNFUN.py
2,790
One baby can have many admissions CaseIDs. One hospital record can have many CaseIDs. One baby has only one hospital record number. Create a project using PyCap :param Token: :param URL: :return: Check the list, only retain the relevant records with matching PatientID are retained. :param dataset: CNBPIDs & record ID correspondence list. :param CNNPatientUI: :return: PatientID has 1:1 correspondence with CNNPatientUI which is the same as PatientUI from CNN Baby table. :return: Retrieve the cases based on their INDEX which is the :param cases: :return: note this is from PyCap.redcap Several key properties we'll use throughout These are very important ID fields from the For now, make sure to onyl get the data related to these key ids to reduce load time if specified, get all the records. Handling when babyIDs is string instead of list (allowing batch function). Listify the CNNPatientUI Filter with the information Aggregate the list_PatientID
955
en
0.887237
# coding: utf-8 """ ESP Documentation The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures. OpenAPI spec version: v2_sdk Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems from ..extensions.base_object import BaseObject import re class Role(BaseObject): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id=None, name=None, created_at=None, updated_at=None): """ Role - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'int', 'name': 'str', 'created_at': 'datetime', 'updated_at': 'datetime' } self.attribute_map = { 'id': 'id', 'name': 'name', 'created_at': 'created_at', 'updated_at': 'updated_at' } self._id = id self._name = name self._created_at = created_at self._updated_at = updated_at @property def id(self): """ Gets the id of this Role. Unique ID :return: The id of this Role. :rtype: int """ return self._id @id.setter def id(self, id): """ Sets the id of this Role. Unique ID :param id: The id of this Role. :type: int """ self._id = id @property def name(self): """ Gets the name of this Role. The name of the role :return: The name of this Role. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this Role. The name of the role :param name: The name of this Role. :type: str """ self._name = name @property def created_at(self): """ Gets the created_at of this Role. ISO 8601 timestamp when the resource was created :return: The created_at of this Role. :rtype: datetime """ return self._created_at @created_at.setter def created_at(self, created_at): """ Sets the created_at of this Role. ISO 8601 timestamp when the resource was created :param created_at: The created_at of this Role. :type: datetime """ self._created_at = created_at @property def updated_at(self): """ Gets the updated_at of this Role. ISO 8601 timestamp when the resource was updated :return: The updated_at of this Role. :rtype: datetime """ return self._updated_at @updated_at.setter def updated_at(self, updated_at): """ Sets the updated_at of this Role. ISO 8601 timestamp when the resource was updated :param updated_at: The updated_at of this Role. :type: datetime """ self._updated_at = updated_at def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, Role): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
esp_sdk/models/role.py
4,943
NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Returns true if both objects are equal Role - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. Returns true if both objects are not equal For `print` and `pprint` Gets the created_at of this Role. ISO 8601 timestamp when the resource was created :return: The created_at of this Role. :rtype: datetime Sets the created_at of this Role. ISO 8601 timestamp when the resource was created :param created_at: The created_at of this Role. :type: datetime Gets the id of this Role. Unique ID :return: The id of this Role. :rtype: int Sets the id of this Role. Unique ID :param id: The id of this Role. :type: int Gets the name of this Role. The name of the role :return: The name of this Role. :rtype: str Sets the name of this Role. The name of the role :param name: The name of this Role. :type: str Returns the model properties as a dict Returns the string representation of the model Gets the updated_at of this Role. ISO 8601 timestamp when the resource was updated :return: The updated_at of this Role. :rtype: datetime Sets the updated_at of this Role. ISO 8601 timestamp when the resource was updated :param updated_at: The updated_at of this Role. :type: datetime ESP Documentation The Evident Security Platform API (version 2.0) is designed to allow users granular control over their Amazon Web Service security experience by allowing them to review alerts, monitor signatures, and create custom signatures. OpenAPI spec version: v2_sdk Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8
1,845
en
0.793471
import pandas as pd import numpy as np import wave from scipy.io import wavfile import os import librosa import pydub import ffmpeg from librosa.feature import melspectrogram import warnings from sklearn.utils import shuffle from sklearn.utils import class_weight from PIL import Image import sklearn import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras import layers from tensorflow.keras import Input from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation from tensorflow.keras.layers import BatchNormalization, GlobalAveragePooling2D from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping from tensorflow.keras.utils import to_categorical from tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, LSTM, SimpleRNN, Conv1D, Input, BatchNormalization, GlobalAveragePooling2D from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import EfficientNetB0 from keras.models import load_model import boto3 import botocore def model_input(): # Load the trained model model = load_model("best_model.h5") #Access S3 Bucket and Download the audio file BUCKET_NAME = 'thunderstruck-duck' # replace with your bucket name KEY = "sample_mp3.mp3" # replace with your object key s3 = boto3.client('s3', aws_access_key_id='AKIAISITTOGCJRNF46HQ', aws_secret_access_key= 'bq/VRAme7BxDMqf3hgEMLZdrJNVvrtdQ4VmoGAdB', ) BUCKET_NAME = "thunderstruck-duck" try: s3.download_file(BUCKET_NAME, KEY, "sample_mp3.mp3") except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") # else: # raise #Load the audio data using librosa wave_data, wave_rate = librosa.load("sample_mp3.mp3") wave_data, _ = librosa.effects.trim(wave_data) #only take 5s samples and add them to the dataframe song_sample = [] sample_length = 5*wave_rate #The variable below is chosen mainly to create a 216x216 image N_mels=216 for idx in range(0,len(wave_data),sample_length): song_sample = wave_data[idx:idx+sample_length] if len(song_sample)>=sample_length: mel = melspectrogram(song_sample, n_mels=N_mels) db = librosa.power_to_db(mel) normalised_db = sklearn.preprocessing.minmax_scale(db) filename = "sample_mel.tif" db_array = (np.asarray(normalised_db)*255).astype(np.uint8) db_image = Image.fromarray(np.array([db_array, db_array, db_array]).T) db_image.save("{}{}".format("upload_mel/",filename)) #Create a DF that will take the created Melspectogram directory data_df = pd.DataFrame([{'bird': "sample bird", 'song_sample': f"/app/upload_mel/{filename}"}]) # Users/HyunsooKim/Desktop/Boot_Camp/Homework/BIRD_CALL/upload_mel/{filename}"}]) #Compile the model callbacks = [ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, factor=0.7), EarlyStopping(monitor='val_loss', patience=5), ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)] model.compile(loss="categorical_crossentropy", optimizer='adam') #Since we only have 1 melspectogram passing into the model, set batch size to 1 and the size of that image so the model can take the image file. validation_batch_size_full = 1 target_size = (216,216) train_datagen_full = ImageDataGenerator( rescale=1. / 255 ) #Pass the columns into the model validation_datagen_full = ImageDataGenerator(rescale=1. / 255) validation_generator_full = validation_datagen_full.flow_from_dataframe( dataframe = data_df, x_col='song_sample', y_col='bird', directory='/', target_size=target_size, shuffle=False, batch_size=validation_batch_size_full, class_mode='categorical') #Run the model preds = model.predict_generator(validation_generator_full) #We want to find the "INDEX" of maximum value within the pred, a numpy array. Use np.argmax and index into 0th element. result = np.argmax(preds[0]) #load in the index dataframe, so we can find the name of the bird that matches the index of our result index_df = pd.read_csv('xeno-canto_ca-nv_index.csv') #rename the english_cname to birds for better access and clearity bird_list = pd.DataFrame(index_df.english_cname.unique()) bird_list.columns = ["birds"] #We are almost done. Save the percentage and the name of the bird into a variable and print it out! percentage = preds[0][result] Name_of_bird = bird_list['birds'][result] print(f"This bird is {percentage} likely {Name_of_bird}") final_data = {"likelihood": percentage, "name_of_bird": Name_of_bird} return final_data if __name__ == "__main__": print(model_input())
import_and_model.py
5,115
Load the trained modelAccess S3 Bucket and Download the audio file replace with your bucket name replace with your object key else: raiseLoad the audio data using librosaonly take 5s samples and add them to the dataframeThe variable below is chosen mainly to create a 216x216 imageCreate a DF that will take the created Melspectogram directory Users/HyunsooKim/Desktop/Boot_Camp/Homework/BIRD_CALL/upload_mel/{filename}"}])Compile the modelSince we only have 1 melspectogram passing into the model, set batch size to 1 and the size of that image so the model can take the image file.Pass the columns into the modelRun the modelWe want to find the "INDEX" of maximum value within the pred, a numpy array. Use np.argmax and index into 0th element.load in the index dataframe, so we can find the name of the bird that matches the index of our resultrename the english_cname to birds for better access and clearityWe are almost done. Save the percentage and the name of the bird into a variable and print it out!
1,012
en
0.790607
# coding: utf-8 """ Wavefront REST API <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer &lt;&lt;API-TOKEN&gt;&gt;\" to your HTTP requests.</p> # noqa: E501 OpenAPI spec version: v2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import wavefront_api_client from wavefront_api_client.models.dashboard import Dashboard # noqa: E501 from wavefront_api_client.rest import ApiException class TestDashboard(unittest.TestCase): """Dashboard unit test stubs""" def setUp(self): pass def tearDown(self): pass def testDashboard(self): """Test Dashboard""" # FIXME: construct object with mandatory attributes with example values # model = wavefront_api_client.models.dashboard.Dashboard() # noqa: E501 pass if __name__ == '__main__': unittest.main()
test/test_dashboard.py
1,240
Dashboard unit test stubs Test Dashboard Wavefront REST API <p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header "Authorization: Bearer &lt;&lt;API-TOKEN&gt;&gt;" to your HTTP requests.</p> # noqa: E501 OpenAPI spec version: v2 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = wavefront_api_client.models.dashboard.Dashboard() noqa: E501
755
en
0.658038
# coding: utf-8 """Webmail tests.""" from __future__ import unicode_literals import os import shutil import tempfile try: import mock except ImportError: from unittest import mock from six import BytesIO from django.core import mail from django.urls import reverse from modoboa.admin import factories as admin_factories from modoboa.core import models as core_models from modoboa.lib.tests import ModoTestCase from . import data as tests_data BODYSTRUCTURE_SAMPLE_WITH_FLAGS = [ (b'19 (UID 19 FLAGS (\\Seen) RFC822.SIZE 100000 BODYSTRUCTURE (("text" "plain" ("charset" "ISO-8859-1" "format" "flowed") NIL NIL "7bit" 2 1 NIL NIL NIL NIL)("message" "rfc822" ("name*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74") NIL NIL "8bit" 3632 ("Wed, 13 Dec 2006 20:30:02 +0100" {70}', # noqa b"[INSCRIPTION] R\xe9c\xe9ption de votre dossier d'inscription Free Haut D\xe9bit"), # noqa (b' (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) (("Free Haut Debit" NIL "inscription" "freetelecom.fr")) ((NIL NIL "hautdebit" "freetelecom.fr")) ((NIL NIL "nguyen.antoine" "wanadoo.fr")) NIL NIL NIL "<[email protected]>") ("text" "plain" ("charset" "iso-8859-1") NIL NIL "8bit" 1428 38 NIL ("inline" NIL) NIL NIL) 76 NIL ("inline" ("filename*" "ISO-8859-1\'\'%5B%49%4E%53%43%52%49%50%54%49%4F%4E%5D%20%52%E9%63%E9%70%74%69%6F%6E%20%64%65%20%76%6F%74%72%65%20%64%6F%73%73%69%65%72%20%64%27%69%6E%73%63%72%69%70%74%69%6F%6E%20%46%72%65%65%20%48%61%75%74%20%44%E9%62%69%74")) NIL NIL) "mixed" ("boundary" "------------040706080908000209030901") NIL NIL NIL) BODY[HEADER.FIELDS (DATE FROM TO CC SUBJECT)] {266}', # noqa b'Date: Tue, 19 Dec 2006 19:50:13 +0100\r\nFrom: Antoine Nguyen <[email protected]>\r\nTo: Antoine Nguyen <[email protected]>\r\nSubject: [Fwd: [INSCRIPTION] =?ISO-8859-1?Q?R=E9c=E9ption_de_votre_?=\r\n =?ISO-8859-1?Q?dossier_d=27inscription_Free_Haut_D=E9bit=5D?=\r\n\r\n' ), b')' ] def get_gif(): """Return gif.""" gif = BytesIO( b"GIF87a\x01\x00\x01\x00\x80\x01\x00\x00\x00\x00ccc,\x00" b"\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;") gif.name = "image.gif" return gif class IMAP4Mock(object): """Fake IMAP4 client.""" def __init__(self, *args, **kwargs): self.untagged_responses = {} def _quote(self, data): return data def _simple_command(self, name, *args, **kwargs): if name == "CAPABILITY": self.untagged_responses["CAPABILITY"] = [b""] elif name == "LIST": self.untagged_responses["LIST"] = [b"() \".\" \"INBOX\""] elif name == "NAMESPACE": self.untagged_responses["NAMESPACE"] = [b'(("" "/")) NIL NIL'] return "OK", None def append(self, *args, **kwargs): pass def create(self, name): return "OK", None def delete(self, name): return "OK", None def list(self): return "OK", [b"() \".\" \"INBOX\""] def rename(self, oldname, newname): return "OK", None def uid(self, command, *args): if command == "SORT": return "OK", [b"19"] elif command == "FETCH": uid = int(args[0]) data = BODYSTRUCTURE_SAMPLE_WITH_FLAGS if uid == 46931: if args[1] == "(BODYSTRUCTURE)": data = tests_data.BODYSTRUCTURE_ONLY_4 elif "HEADER.FIELDS" in args[1]: data = tests_data.BODYSTRUCTURE_SAMPLE_4 else: data = tests_data.BODY_PLAIN_4 elif uid == 46932: if args[1] == "(BODYSTRUCTURE)": data = tests_data.BODYSTRUCTURE_ONLY_5 elif "HEADER.FIELDS" in args[1]: data = tests_data.BODYSTRUCTURE_SAMPLE_9 else: data = tests_data.BODYSTRUCTURE_SAMPLE_10 elif uid == 33: if args[1] == "(BODYSTRUCTURE)": data = tests_data.BODYSTRUCTURE_EMPTY_MAIL else: data = tests_data.EMPTY_BODY elif uid == 133872: data = tests_data.COMPLETE_MAIL return "OK", data elif command == "STORE": return "OK", [] class WebmailTestCase(ModoTestCase): """Check webmail backend.""" @classmethod def setUpTestData(cls): # noqa """Create some users.""" super(WebmailTestCase, cls).setUpTestData() admin_factories.populate_database() cls.user = core_models.User.objects.get(username="[email protected]") def setUp(self): """Connect with a simpler user.""" patcher = mock.patch("imaplib.IMAP4") self.mock_imap4 = patcher.start() self.mock_imap4.return_value = IMAP4Mock() self.addCleanup(patcher.stop) self.set_global_parameter("imap_port", 1435) self.workdir = tempfile.mkdtemp() os.mkdir("{}/webmail".format(self.workdir)) self.set_global_parameter("update_scheme", False, app="core") url = reverse("core:login") data = { "username": self.user.username, "password": "toto" } self.client.post(url, data) def tearDown(self): """Cleanup.""" shutil.rmtree(self.workdir) def test_listmailbox(self): """Check listmailbox action.""" url = reverse("modoboa_webmail:index") response = self.client.get(url) self.assertEqual(response.status_code, 200) response = self.client.get( "{}?action=listmailbox".format(url), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 200) self.assertIn( "[email protected]", response.json()["listing"]) response = self.client.get( "{}?action=listmailbox&pattern=Réception&criteria=Subject" .format(url), HTTP_X_REQUESTED_WITH="XMLHttpRequest" ) self.assertEqual(response.status_code, 200) self.assertIn( "[email protected]", response.json()["listing"]) def test_attachments(self): """Check attachments.""" url = reverse("modoboa_webmail:index") response = self.client.get("{}?action=compose".format(url)) self.assertEqual(response.status_code, 200) self.assertIn("compose_mail", self.client.session) url = reverse("modoboa_webmail:attachment_list") response = self.client.get(url) self.assertEqual(response.status_code, 200) self.set_global_parameters({"max_attachment_size": "10"}) with self.settings(MEDIA_ROOT=self.workdir): response = self.client.post(url, {"attachment": get_gif()}) self.assertContains(response, "Attachment is too big") self.set_global_parameters({"max_attachment_size": "10K"}) with self.settings(MEDIA_ROOT=self.workdir): response = self.client.post(url, {"attachment": get_gif()}) self.assertContains(response, "upload_success") self.assertEqual( len(self.client.session["compose_mail"]["attachments"]), 1) name = self.client.session["compose_mail"]["attachments"][0]["tmpname"] path = "{}/webmail/{}".format(self.workdir, name) self.assertTrue(os.path.exists(path)) url = reverse("modoboa_webmail:attachment_delete") with self.settings(MEDIA_ROOT=self.workdir): self.ajax_get("{}?name={}".format(url, name)) self.assertFalse(os.path.exists(path)) def test_delattachment_errors(self): """Check error cases.""" url = reverse("modoboa_webmail:index") response = self.client.get("{}?action=compose".format(url)) self.assertEqual(response.status_code, 200) self.assertIn("compose_mail", self.client.session) url = reverse("modoboa_webmail:attachment_delete") with self.settings(MEDIA_ROOT=self.workdir): response = self.ajax_get("{}?name=".format(url)) self.assertEqual(response["status"], "ko") self.assertEqual(response["respmsg"], "Bad query") with self.settings(MEDIA_ROOT=self.workdir): response = self.ajax_get("{}?name=test".format(url)) self.assertEqual(response["status"], "ko") self.assertEqual(response["respmsg"], "Unknown attachment") def test_send_mail(self): """Check compose form.""" url = "{}?action=compose".format(reverse("modoboa_webmail:index")) response = self.client.get(url) self.assertEqual(response.status_code, 200) response = self.client.post( url, { "from_": self.user.email, "to": "[email protected]", "subject": "test", "body": "Test" } ) self.assertEqual(len(mail.outbox), 1) self.assertEqual( mail.outbox[0].from_email, "[email protected]") # Try to send an email using HTML format self.user.first_name = "Antoine" self.user.last_name = "Nguyen" self.user.parameters.set_value("editor", "html") self.user.save() response = self.client.get(url) self.assertEqual(response.status_code, 200) mail.outbox = [] response = self.client.post( url, { "from_": self.user.email, "to": "[email protected]", "subject": "test", "body": "<p>Test</p>" } ) self.assertEqual(len(mail.outbox), 1) self.assertEqual( mail.outbox[0].from_email, '"Antoine Nguyen" <[email protected]>') def test_signature(self): """Check signature in different formats.""" signature = "Antoine Nguyen" self.user.parameters.set_value("signature", signature) self.user.save() response = self.client.get(reverse("modoboa_webmail:index")) self.assertEqual(response.status_code, 200) url = "{}?action=compose".format(reverse("modoboa_webmail:index")) response = self.ajax_get(url) self.assertIn(signature, response["listing"]) def test_custom_js_in_preferences(self): """Check that custom js is included.""" url = reverse("core:user_index") response = self.client.get(url) self.assertContains(response, "function toggleSignatureEditor()") def test_send_mail_errors(self): """Check error cases.""" url = "{}?action=compose".format(reverse("modoboa_webmail:index")) response = self.client.get(url) self.assertEqual(response.status_code, 200) response = self.ajax_post( url, {"to": "", "subject": "test", "body": "Test"}, 400 ) self.assertEqual(len(mail.outbox), 0) def test_new_folder(self): """Test folder creation.""" url = reverse("modoboa_webmail:folder_add") response = self.client.get(url) self.assertContains(response, "Create a new folder") response = self.ajax_post(url, {"name": "Test"}) self.assertIn("newmb", response) def test_edit_folder(self): """Test folder edition.""" url = reverse("modoboa_webmail:folder_change") response = self.client.get(url) self.assertContains(response, "Invalid request") url = "{}?name=Test".format(url) response = self.client.get(url) self.assertContains(response, "Edit folder") session = self.client.session session["webmail_navparams"] = {"inbox": "Test"} session.save() response = self.ajax_post(url, {"oldname": "Test", "name": "Toto"}) self.assertEqual(response["respmsg"], "Folder updated") def test_delete_folder(self): """Test folder removal.""" url = reverse("modoboa_webmail:folder_delete") self.ajax_get(url, status=400) url = "{}?name=Test".format(url) session = self.client.session session["webmail_navparams"] = {"inbox": "Test"} session.save() self.ajax_get(url) def test_reply_to_email(self): """Test reply form.""" url = "{}?action=reply&mbox=INBOX&mailid=46931".format( reverse("modoboa_webmail:index")) session = self.client.session session["lastaction"] = "compose" session.save() response = self.ajax_get(url) self.assertIn('id="id_origmsgid"', response["listing"]) response = self.client.post( url, { "from_": self.user.email, "to": "[email protected]", "subject": "test", "body": "Test", "origmsgid": "<id@localhost>" } ) self.assertEqual(len(mail.outbox), 1) self.assertEqual( mail.outbox[0].from_email, "[email protected]") self.assertIn("References", mail.outbox[0].extra_headers) def test_forward_email(self): """Test forward form.""" url = "{}?action=forward&mbox=INBOX&mailid=46932".format( reverse("modoboa_webmail:index")) session = self.client.session session["lastaction"] = "compose" session.save() with self.settings(MEDIA_ROOT=self.workdir): response = self.client.get( url, HTTP_X_REQUESTED_WITH="XMLHttpRequest") response = response.json() self.assertIn('id="id_origmsgid"', response["listing"]) self.assertEqual( len(self.client.session["compose_mail"]["attachments"]), 1) response = self.client.post( url, { "from_": self.user.email, "to": "[email protected]", "subject": "test", "body": "Test", "origmsgid": "<id@localhost>" } ) self.assertEqual(len(mail.outbox), 1) def test_getmailcontent_empty_mail(self): """Try to display an empty email.""" url = "{}?action=reply&mbox=INBOX&mailid=33".format( reverse("modoboa_webmail:mailcontent_get")) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_getmailsource(self): """Try to display a message's source.""" url = "{}?mbox=INBOX&mailid=133872".format( reverse("modoboa_webmail:mailsource_get")) response = self.client.get(url) self.assertContains(response, "Message-ID")
modoboa_webmail/tests/test_views.py
14,655
Fake IMAP4 client. Check webmail backend. Return gif. Connect with a simpler user. Create some users. Cleanup. Check attachments. Check that custom js is included. Check error cases. Test folder removal. Test folder edition. Test forward form. Try to display an empty email. Try to display a message's source. Check listmailbox action. Test folder creation. Test reply form. Check compose form. Check error cases. Check signature in different formats. Webmail tests. coding: utf-8 noqa noqa noqa noqa Try to send an email using HTML format
541
en
0.749436
# -*- coding: utf-8 -*- """ General description ------------------- This example illustrates the effect of activity_costs. There are the following components: - demand_heat: heat demand (constant, for the sake of simplicity) - fireplace: wood firing, burns "for free" if somebody is around - boiler: gas firing, consumes (paid) gas Notice that activity_costs is an attribute to NonConvex. This is because it relies on the activity status of a component which is only available for nonconvex flows. Installation requirements ------------------------- This example requires version 0.3 of oemof. Install by: pip install 'oemof.solph>=0.4,<0.5' """ import numpy as np import pandas as pd from oemof import solph try: import matplotlib.pyplot as plt except ImportError: plt = None ########################################################################## # Calculate parameters and initialize the energy system and ########################################################################## periods = 24 time = pd.date_range('1/1/2018', periods=periods, freq='H') demand_heat = np.full(periods, 5) demand_heat[:4] = 0 demand_heat[4:18] = 4 activity_costs = np.full(periods, 5) activity_costs[18:] = 0 es = solph.EnergySystem(timeindex=time) b_heat = solph.Bus(label='b_heat') es.add(b_heat) sink_heat = solph.Sink( label='demand', inputs={b_heat: solph.Flow(fix=demand_heat, nominal_value=1)}) fireplace = solph.Source( label='fireplace', outputs={b_heat: solph.Flow(nominal_value=3, variable_costs=0, nonconvex=solph.NonConvex( activity_costs=activity_costs))}) boiler = solph.Source( label='boiler', outputs={b_heat: solph.Flow(nominal_value=10, variable_costs=1)}) es.add(sink_heat, fireplace, boiler) ########################################################################## # Optimise the energy system ########################################################################## # create an optimization problem and solve it om = solph.Model(es) # solve model om.solve(solver='cbc', solve_kwargs={'tee': True}) ########################################################################## # Check and plot the results ########################################################################## results = solph.processing.results(om) # plot data if plt is not None: data = solph.views.node(results, 'b_heat')['sequences'] ax = data.plot(kind='line', drawstyle='steps-post', grid=True, rot=0) ax.set_xlabel('Time') ax.set_ylabel('Heat (arb. units)') plt.show()
oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py
2,684
General description ------------------- This example illustrates the effect of activity_costs. There are the following components: - demand_heat: heat demand (constant, for the sake of simplicity) - fireplace: wood firing, burns "for free" if somebody is around - boiler: gas firing, consumes (paid) gas Notice that activity_costs is an attribute to NonConvex. This is because it relies on the activity status of a component which is only available for nonconvex flows. Installation requirements ------------------------- This example requires version 0.3 of oemof. Install by: pip install 'oemof.solph>=0.4,<0.5' -*- coding: utf-8 -*- Calculate parameters and initialize the energy system and Optimise the energy system create an optimization problem and solve it solve model Check and plot the results plot data
837
en
0.81873
# Copyright (c) 2019 Graphcore Ltd. All rights reserved. import numpy as np import popart import torch import pytest from op_tester import op_tester def test_and(op_tester): d1 = (np.random.randn(2) > 0).astype(np.bool_) d2 = (np.random.randn(2) > 0).astype(np.bool_) def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.logical_and([i1, i2]) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1, dtype=torch.bool) t2 = torch.tensor(d2, dtype=torch.bool) out = t1 & t2 return [out] op_tester.run(init_builder, reference, step_type='infer') def test_broadcast_and(op_tester): d1 = (np.random.randn(2, 2) > 0).astype(np.bool_) d2 = (np.random.randn(2) > 0).astype(np.bool_) def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.logical_and([i1, i2]) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1, dtype=torch.bool) t2 = torch.tensor(d2, dtype=torch.bool) out = t1 & t2 return [out] op_tester.run(init_builder, reference, step_type='infer') def test_or(op_tester): d1 = (np.random.randn(2) > 0).astype(np.bool_) d2 = (np.random.randn(2) > 0).astype(np.bool_) def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.logical_or([i1, i2]) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1, dtype=torch.bool) t2 = torch.tensor(d2, dtype=torch.bool) out = t1 | t2 return [out] op_tester.run(init_builder, reference, step_type='infer') def test_broadcast_or(op_tester): d1 = (np.random.randn(2, 2) > 0).astype(np.bool_) d2 = (np.random.randn(2) > 0).astype(np.bool_) def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.logical_or([i1, i2]) print(o) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1, dtype=torch.bool) t2 = torch.tensor(d2, dtype=torch.bool) out = t1 | t2 return [out] op_tester.run(init_builder, reference, step_type='infer') def test_not(op_tester): d1 = (np.random.randn(2) > 0).astype(np.bool_) print(d1) def init_builder(builder): i1 = builder.addInputTensor(d1) o = builder.aiOnnx.logical_not([i1]) builder.addOutputTensor(o) return [o] def reference(ref_data): return [np.logical_not(d1)] op_tester.run(init_builder, reference, step_type='infer') def test_equal(op_tester): d1 = (np.random.randn(2)).astype(np.float32) d2 = (np.random.randn(2)).astype(np.float32) d2[0] = d1[0] def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.equal([i1, i2]) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1) t2 = torch.tensor(d2) out = torch.eq(t1, t2) return [out] op_tester.run(init_builder, reference, step_type='infer') def test_broadcast_equal(op_tester): d1 = (np.random.randn(2, 2)).astype(np.float32) d2 = (np.random.randn(2)).astype(np.float32) # d2[0][0] = d1[0] def init_builder(builder): i1 = builder.addInputTensor(d1) i2 = builder.addInputTensor(d2) o = builder.aiOnnx.equal([i1, i2]) builder.addOutputTensor(o) return [o] def reference(ref_data): t1 = torch.tensor(d1) t2 = torch.tensor(d2) out = torch.eq(t1, t2) return [out] op_tester.run(init_builder, reference, step_type='infer')
tests/integration/operators_test/boolean_test.py
4,023
Copyright (c) 2019 Graphcore Ltd. All rights reserved. d2[0][0] = d1[0]
71
en
0.6912
#!/bin/env python """ Module to display weather info on polybar """ # -*- coding: utf-8 -*- import argparse import datetime import logging import os import time import requests import importlib # pylint: disable=redefined-builtin from requests import ConnectionError from requests.exceptions import HTTPError, Timeout from util import color_polybar, color_bash as cb class MyInternetIsShitty(Exception): """ Custom exception """ pass def get_args(): """ Get script argument """ parser = argparse.ArgumentParser(description='Show current weather on polybar') parser.add_argument('log', nargs='?', help='Logging for debugging or not') parser.add_argument('-u', '--unit', default='metric', nargs='?', help='unit: metric or imperial. Default: metric') return parser.parse_args() def set_up_logging(): """ Set some logging parameter """ if importlib.util.find_spec('requests'): # Shut up the request module logger logging.getLogger("requests").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG) def get_day_or_night(): """ return 'day' or 'night' based on current hour """ hour = int(datetime.datetime.now().strftime('%H')) if hour >= 18 or hour <= 5: return 'night' return 'day' def get_weather_icon(weather_id): """ Get weather icon based on weather condition """ day_night_status = get_day_or_night() weather = { 'thunderstorm': 200 <= weather_id <= 232, 'rain': 300 <= weather_id <= 531, 'snow': 600 <= weather_id <= 622, 'atmosphere': 701 <= weather_id <= 781, 'squall': weather_id == 771, 'tornado': weather_id == 781 or weather_id == 900, 'clear_day': weather_id == 800 and day_night_status == 'day', 'clear_night': weather_id == 800 and day_night_status == 'night', 'tropical storm': weather_id == 901, 'hurricane': weather_id == 902, 'cold': weather_id == 903, 'hot': weather_id == 904, 'windy': weather_id == 905, 'cloudy': 801 <= weather_id <= 804, 'hail': weather_id == 906 } if weather['thunderstorm']: return '' elif weather['rain']: return '' elif weather['snow'] or weather['cold']: return '' elif weather['atmosphere'] or weather['windy']: return '' elif (weather['squall'] or weather['tornado'] or weather['tropical storm'] or weather['hurricane']): return '' elif weather['clear_day'] or weather['hot']: return '' elif weather['clear_night']: return '' elif weather['cloudy']: return '' elif weather['hail']: return '' def get_thermo_icon(temp_value, temp_unit): """ Get thermometer icon based on temperature """ if temp_unit == 'F': temp_value = convert_temp_unit(temp_unit, 'C') if temp_value <= -15: return '' elif -15 < temp_value <= 0: return '' elif 0 < temp_value <= 15: return '' elif 15 < temp_value <= 30: return '' elif temp_value > 30: return '' def convert_temp_unit(temp_value, temp_unit): """ Convert current temp_value to temp_unit """ if temp_unit == 'C': return round((temp_value - 32) / 1.8) elif temp_unit == 'F': return round(temp_value * 1.8 + 32) def get_api_key(): """ Get secret api key from a file on filesystem """ paren_dir = os.path.dirname(os.path.realpath(__file__)) api_path = os.path.join(paren_dir, 'weather_api.txt') with open(api_path, 'r') as file: api_key = file.read().replace('\n', '') return api_key def get_city_id(): """ Workaround to get city id based on my schedule """ region_code = { 'TPHCM': 1580578, 'TPHCM2': 1566083, 'Hai Duong': 1581326, 'Tan An': 1567069 } hour = int(datetime.datetime.now().strftime('%H')) weekday = datetime.datetime.now().strftime('%a') # 5pm Fri to 5pm Sun: Tan An, else Hai Duong if (hour >= 17 and weekday == 'Fri') or weekday == 'Sat' or (hour < 17 and weekday == 'Sun'): return region_code['Tan An'] return region_code['Hai Duong'] def update_weather(city_id, units, api_key): """ Update weather by using openweather api """ url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}' temp_unit = 'C' if units == 'metric' else 'K' error_icon = color_polybar('', 'red') try: req = requests.get(url.format(city_id, api_key, units)) try: description = req.json()['weather'][0]['description'].capitalize() except ValueError: print(error_icon, flush=True) raise MyInternetIsShitty temp_value = round(req.json()['main']['temp']) temp = str(temp_value) + '°' + temp_unit thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main') weather_id = req.json()['weather'][0]['id'] weather_icon = color_polybar(get_weather_icon(weather_id), 'main') print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True) except (HTTPError, Timeout, ConnectionError): print(error_icon, flush=True) raise MyInternetIsShitty def main(): """ main function """ arg = get_args() if arg.log == 'debug': set_up_logging() units = arg.unit api_key = get_api_key() city_id = get_city_id() while True: try: update_weather(city_id, units, api_key) except MyInternetIsShitty: logging.info(cb('update failed: ', 'red')) time.sleep(3) else: logging.info(cb('update success', 'green')) time.sleep(700) if __name__ == '__main__': main() # vim: nofoldenable
.config/polybar/weather/weather.py
5,497
Custom exception Convert current temp_value to temp_unit Get secret api key from a file on filesystem Get script argument Workaround to get city id based on my schedule return 'day' or 'night' based on current hour Get thermometer icon based on temperature Get weather icon based on weather condition main function Set some logging parameter Update weather by using openweather api Module to display weather info on polybar !/bin/env python -*- coding: utf-8 -*- pylint: disable=redefined-builtin Shut up the request module logger 5pm Fri to 5pm Sun: Tan An, else Hai Duong vim: nofoldenable
604
en
0.648886
#!/usr/bin/env python3 import sys import argparse import time import socket from socket import socket as Socket def main(): # Command line arguments. Use a server_port > 1024 by default so that we can run # server without sudo. parser = argparse.ArgumentParser() parser.add_argument('--server-port', '-p', default=2081, type=int, help='Server_Port to use') parser.add_argument('--run-server', '-s', action='store_true', help='Run a ping server') parser.add_argument('server_address', default='localhost', help='Server to ping, no effect if running as a server.') args = parser.parse_args() if args.run_server: return run_server(args.server_port) else: return run_client(args.server_address, args.server_port,) def run_server(server_port): """Run the UDP pinger server """ # Create the server socket (to handle UDP requests using ipv4), make sure # it is always closed by using with statement. with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket: # The socket stays connected even after this script ends. So in order # to allow the immediate reuse of the socket (so that we can kill and # re-run the server while debugging) we set the following option. This # is potentially dangerous in real code: in rare cases you may get junk # data arriving at the socket. server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Set the server port server_socket.bind(('', server_port)) # Start accepting ping requests print("Ping server ready on port", server_port) while True: # Receive message and send one back _, client_address = server_socket.recvfrom(1024) server_socket.sendto("".encode(), client_address) return 0 def run_client(server_address, server_port): """Ping a UDP pinger server running at the given address """ # Fill in the client side code here. raise NotImplementedError return 0 if __name__ == "__main__": sys.exit(main())
ping/ping.py
2,195
Ping a UDP pinger server running at the given address Run the UDP pinger server !/usr/bin/env python3 Command line arguments. Use a server_port > 1024 by default so that we can run server without sudo. Create the server socket (to handle UDP requests using ipv4), make sure it is always closed by using with statement. The socket stays connected even after this script ends. So in order to allow the immediate reuse of the socket (so that we can kill and re-run the server while debugging) we set the following option. This is potentially dangerous in real code: in rare cases you may get junk data arriving at the socket. Set the server port Start accepting ping requests Receive message and send one back Fill in the client side code here.
752
en
0.864424
#!/usr/bin/env python import re import time from tools.multiclass_shared import prepare_data # run with toy data [traindat, label_traindat, testdat, label_testdat] = prepare_data() # run with opt-digits if available #[traindat, label_traindat, testdat, label_testdat] = prepare_data(False) parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]] def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5): import shogun from shogun import ECOCStrategy, LinearMulticlassMachine from shogun import MulticlassAccuracy from shogun import MulticlassLabels import shogun as sg def nonabstract_class(name): try: getattr(shogun, name)() except TypeError: return False return True encoders = [x for x in dir(shogun) if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)] decoders = [x for x in dir(shogun) if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)] fea_train = sg.features(fm_train_real) fea_test = sg.features(fm_test_real) gnd_train = MulticlassLabels(label_train_multiclass) if label_test_multiclass is None: gnd_test = None else: gnd_test = MulticlassLabels(label_test_multiclass) base_classifier = sg.machine("LibLinear", liblinear_solver_type="L2R_L2LOSS_SVC", use_bias=True) #print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders))) #print('-' * 70) #format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s' #print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy')) def run_ecoc(ier, idr): encoder = getattr(shogun, encoders[ier])() decoder = getattr(shogun, decoders[idr])() # whether encoder is data dependent if hasattr(encoder, 'set_labels'): encoder.set_labels(gnd_train) encoder.set_features(fea_train) strategy = ECOCStrategy(encoder, decoder) classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train) classifier.train() label_pred = classifier.apply(fea_test) if gnd_test is not None: evaluator = MulticlassAccuracy() acc = evaluator.evaluate(label_pred, gnd_test) else: acc = None return (classifier.get_num_machines(), acc) for ier in range(len(encoders)): for idr in range(len(decoders)): t_begin = time.clock() (codelen, acc) = run_ecoc(ier, idr) if acc is None: acc_fmt = 's' acc = 'N/A' else: acc_fmt = '.4f' t_elapse = time.clock() - t_begin #print((format_str % ('d', '.3f', acc_fmt)) % # (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc)) if __name__=='__main__': print('MulticlassECOC') classifier_multiclass_ecoc(*parameter_list[0])
examples/undocumented/python/classifier_multiclass_ecoc.py
2,849
!/usr/bin/env python run with toy data run with opt-digits if available[traindat, label_traindat, testdat, label_testdat] = prepare_data(False)print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders)))print('-' * 70)format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s'print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy')) whether encoder is data dependentprint((format_str % ('d', '.3f', acc_fmt)) % (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc))
540
en
0.115034
# Import dependencies # Math/Torch import numpy as np import torch.nn as nn # Typing from typing import List # Instantiate class class MRR(nn.Module): """Compute MRR metric (Mean reciprocal rank)""" def __init__(self, max_rank = 10): super(MRR, self).__init__() # Set max mrr rank self.max_rank = max_rank def _calculate_reciprocal_rank(self, hypothesis_ids: np.ndarray, reference_id: int) -> float: """Calculate the reciprocal rank for a given hypothesis and reference Params: hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance reference_id: Reference id (as a integer) of the correct id of response Returns: reciprocal rank """ # Assure hypothesis_ids is a numpy array hypothesis_ids = np.asarray(hypothesis_ids) # Calculate rank try: rank = np.where(hypothesis_ids == reference_id)[0][0] + 1 except IndexError: rank = self.max_rank + 1 # Rank grater then max_rank is set to zero if rank > self.max_rank: reciprocal_rank = 0.0 else: # Calculate reciprocal rank reciprocal_rank = 1. / rank return reciprocal_rank def forward(self, batch_hypothesis_ids: List[np.ndarray], batch_reference_id: List[int]) -> float: """Score the mean reciprocal rank for the batch Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank >>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]] >>> batch_reference_id = [2, 2, 1] >>> mrr = MRR() >>> mrr(batch_hypothesis_ids, batch_reference_id) 0.61111111111111105 Args: batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance reference_id: Batch of reference id (as a integer) of the correct id of response Returns: Mean reciprocal rank (MRR) """ # Assure batches have same length assert len(batch_hypothesis_ids) == len(batch_reference_id), "Hypothesis batch and reference batch must have same length." # Size of batch batch_size = len(batch_hypothesis_ids) # MRR to be calculated mrr = 0 for hypothesis_ids, reference_id in zip(batch_hypothesis_ids, batch_reference_id): # Calculate reciprocal rank reciprocal_rank = self._calculate_reciprocal_rank(hypothesis_ids, reference_id) # Add to MRR mrr += reciprocal_rank/batch_size return mrr
tasks/retriever/mrr.py
2,758
Compute MRR metric (Mean reciprocal rank) Calculate the reciprocal rank for a given hypothesis and reference Params: hypothesis_ids: Iterator of hypothesis ids (as numpy array) ordered by its relevance reference_id: Reference id (as a integer) of the correct id of response Returns: reciprocal rank Score the mean reciprocal rank for the batch Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank >>> batch_hypothesis_ids = [[1, 0, 2], [0, 2, 1], [1, 0, 2]] >>> batch_reference_id = [2, 2, 1] >>> mrr = MRR() >>> mrr(batch_hypothesis_ids, batch_reference_id) 0.61111111111111105 Args: batch_hypothesis_ids: Batch of hypothesis ids (as numpy array) ordered by its relevance reference_id: Batch of reference id (as a integer) of the correct id of response Returns: Mean reciprocal rank (MRR) Import dependencies Math/Torch Typing Instantiate class Set max mrr rank Assure hypothesis_ids is a numpy array Calculate rank Rank grater then max_rank is set to zero Calculate reciprocal rank Assure batches have same length Size of batch MRR to be calculated Calculate reciprocal rank Add to MRR
1,131
en
0.808446
# # 1573. Number of Ways to Split a String # # Q: https://leetcode.com/problems/number-of-ways-to-split-a-string/ # A: https://leetcode.com/problems/number-of-ways-to-split-a-string/discuss/830433/Javascript-Python3-C%2B%2B-solutions # class Solution: def numWays(self, S: str, MOD = int(1e9 + 7)) -> int: N = len(S) cnt = len([c for c in S if c == '1']) # case 1: all zeros, return the sum of the series for the cardinality of S minus 1 if not cnt: return (N - 2) * (N - 1) // 2 % MOD # case 2: cannot evenly divide the ones into 3 equal paritions if cnt % 3: return 0 # case 3: return the product of the first and second accumulated "gaps of zeros" between each parition of equal ones K = cnt // 3 first = 0 second = 0 ones = 0 for i in range(N): if S[i] == '1': ones += 1 if ones == 1 * K and S[i] == '0': first +=1 if ones == 2 * K and S[i] == '0': second += 1 return (first + 1) * (second + 1) % MOD # ⭐️ +1 for "gaps of zeros" from i..j inclusive
1573_number_ways_to_split_string.py
1,143
1573. Number of Ways to Split a String Q: https://leetcode.com/problems/number-of-ways-to-split-a-string/ A: https://leetcode.com/problems/number-of-ways-to-split-a-string/discuss/830433/Javascript-Python3-C%2B%2B-solutions case 1: all zeros, return the sum of the series for the cardinality of S minus 1 case 2: cannot evenly divide the ones into 3 equal paritions case 3: return the product of the first and second accumulated "gaps of zeros" between each parition of equal ones ⭐️ +1 for "gaps of zeros" from i..j inclusive
526
en
0.809501
""" WSGI config for kongoauth project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kongoauth.settings") application = get_wsgi_application()
kongoauth/wsgi.py
396
WSGI config for kongoauth project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
216
en
0.741982
# -:- coding:utf8 -:- import base64 import hmac import json import sys import time import urllib import uuid from hashlib import sha1 import requests from flask import current_app from werkzeug.local import LocalProxy DEFAULT_URL = 'https://sms.aliyuncs.com' SMS = LocalProxy(lambda: current_app.extensions['kits_sms']) class SMSSender(object): def __init__(self, app_key, secret_key, url=DEFAULT_URL): self.app_key = app_key self.secret_key = secret_key self.url = url @staticmethod def percent_encode(content): # content = str(content) res = urllib.quote(content, '') res = res.replace('+', '%20') res = res.replace('*', '%2A') res = res.replace('%7E', '~') return res def sign(self, access_key_secret, params): params = sorted(params.items(), key=lambda param: param[0]) canonical_querystring = '' for (k, v) in params: canonical_querystring += '&' + self.percent_encode(k) + '=' + self.percent_encode(v) string_to_sign = 'GET&%2F&' + self.percent_encode(canonical_querystring[1:]) # 使用get请求方法 h = hmac.new(access_key_secret + "&", string_to_sign, sha1) signature = base64.encodestring(h.digest()).strip() return signature def make_url(self, params): timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) parameters = { 'Format': 'JSON', 'Version': '2016-09-27', 'AccessKeyId': self.app_key, 'SignatureVersion': '1.0', 'SignatureMethod': 'HMAC-SHA1', 'SignatureNonce': str(uuid.uuid1()), 'Timestamp': timestamp, } for key in params.keys(): parameters[key] = params[key] signature = self.sign(self.secret_key, parameters) parameters['Signature'] = signature url = self.url + "/?" + urllib.urlencode(parameters) return url def do_request(self, params): url = self.make_url(params) response = requests.get(url) print response.ok, response.content def send(self, template_code, sign_name, receive_num, param): params = { 'Action': 'SingleSendSms', 'SignName': sign_name, 'TemplateCode': template_code, 'RecNum': receive_num, 'ParamString': json.dumps(param) } url = self.make_url(params) response = requests.get(url) if not response.ok: current_app.logger.error(response.content) return response.ok def init_extension(kits, app): url = kits.get_parameter('SMS_URL', default=DEFAULT_URL) app_key = kits.get_parameter("SMS_APP_KEY") secret_key = kits.get_parameter('SMS_SECRET_KEY') app.extensions['kits_sms'] = SMSSender(app_key, secret_key, url) if __name__ == '__main__': sender = SMSSender('LTAIWLcy7iT5v7mr', 'gRL1rtYnyfKMDVZs7b4fhbosX0MAAo ') print sender.send("SMS_49485493", u"testing", "18708140165", param={'code': "123456", 'product': "benjamin"})
flask_kits/sms/__init__.py
3,186
-:- coding:utf8 -:- content = str(content) 使用get请求方法
52
ja
0.480272
import matplotlib matplotlib.use('Agg') import os from os.path import join import argparse import torch import numpy as np import pickle import sys import datetime sys.path.append('./utils') from torch import optim from torch import nn from torch import multiprocessing from torch.optim import lr_scheduler from torch.autograd import Variable from torch.utils.data import DataLoader, ConcatDataset from utils.builders import SingleViewDepthTripletBuilder, MultiViewDepthTripletBuilder, MultiViewTripletBuilder, SingleViewTripletBuilder from utils.builder_utils import distance, Logger, ensure_folder, collate_fn, time_stamped from utils.vocabulary import Vocabulary from ipdb import set_trace from sklearn.preprocessing import OneHotEncoder from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence from torchvision import transforms import torchvision.utils as vutils import torchvision.models as models from torchvision import datasets from tensorboardX import SummaryWriter import matplotlib.pyplot as plt from shutil import copy2 import importlib from pyquaternion import Quaternion from models.pose_predictor_euler_crop import define_model from utils.plot_utils import plot_mean from utils.rot_utils_old import create_rot_from_vector, rotationMatrixToEulerAngles, \ isRotationMatrix, eulerAnglesToRotationMatrix, \ norm_sincos, sincos2rotm from utils.network_utils import loss_rotation, loss_euler_reparametrize, loss_axisangle, batch_size, apply,\ loss_quat, loss_quat_single, euler_XYZ_to_reparam, loss_quat_huber from utils.plot_utils import plot_mean os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"]= "1,2,3" IMAGE_SIZE = (299, 299) NUM_VIEWS = 1 SAMPLE_SIZE = 40 VAL_SEQS =5 TRAIN_SEQS_PER_EPOCH = 80 LOSS_FN = loss_euler_reparametrize EXP_ROOT_DIR = '/media/hdd/msieb/data/tcn_data/experiments' sys.path.append(EXP_ROOT_DIR) class Trainer(object): def __init__(self, use_cuda, load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args, multi_gpu=True): self.use_cuda = use_cuda self.load_model = load_model self.model_folder = model_folder self.validation_directory = validation_directory self.train_directory = train_directory self.args = args self.builder = builder self.loss_fn = loss_fn self.logdir = join(model_folder, 'logs') self.writer = SummaryWriter(self.logdir) self.logger = Logger(self.args.log_file) self.itr = 0 # Create Model self.model = self.create_model() if multi_gpu: self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count())) # Build validation set validation_builder = builder(self.args.n_views, validation_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE) validation_set = [validation_builder.build_set() for i in range(VAL_SEQS)] validation_set = ConcatDataset(validation_set) self.len_validation_set = len(validation_set) del validation_builder self.validation_loader = DataLoader( validation_set, batch_size=8, shuffle=False, pin_memory=self.use_cuda, ) self.validation_calls = 0 # Build Training Set self.triplet_builder = builder(self.args.n_views, \ train_directory, IMAGE_SIZE, self.args, toRot=True, sample_size=SAMPLE_SIZE) self.training_queue = multiprocessing.Queue(1) dataset_builder_process = multiprocessing.Process(target=self.build_set, args=(self.training_queue, self.triplet_builder, self.logger), daemon=True) dataset_builder_process.start() # Get Logger # Model specific setup # self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9) self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08) # This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler self.learning_rate_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, 'min') # self.criterion = nn.CrossEntropyLoss() def train(self): trn_losses_ = [] val_losses_= [] val_acc_ = [] trn_acc_ = [] for epoch in range(self.args.start_epoch, self.args.start_epoch + self.args.epochs): print("=" * 20) self.logger.info("Starting epoch: {0} ".format(epoch)) dataset = self.training_queue.get() data_loader = DataLoader( dataset=dataset, batch_size=self.args.minibatch_size, # batch_size(epoch, self.args.max_minibatch_size), shuffle=True, pin_memory=self.use_cuda, ) train_embedding_features_buffer = [] train_images_buffer = [] train_labels = [] correct = 0 for _ in range(0, 1): losses = [] for minibatch in data_loader: if self.use_cuda: anchor_frames = minibatch[0].cuda() #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix # frames = Variable(minibatch) loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats) losses.append(loss.data.cpu().numpy()) correct += (torch.norm(a_pred - anchor_quats, 2) < 1).data.cpu().numpy().sum() # print(gradcheck(loss_fn, (tcn, minibatch,))) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Add embeddings train_labels.append(anchor_quats) train_embedding_features_buffer.append(anchor_quats) train_images_buffer.append(anchor_frames) print("logging to {}".format(self.logdir)) self.writer.add_scalar('data/train_loss', np.mean(losses), self.itr) self.writer.add_scalar('data/train_correct', correct / len(data_loader), self.itr) self.itr += 1 trn_losses_.append(np.mean(losses)) self.logger.info('train loss: ', np.mean(losses)) self.logger.info("Training score correct {correct}/{total}".format( correct=correct, total=len(data_loader) )) trn_acc_.append(correct) self.writer.add_image('frame_1', minibatch[0][0], self.itr) # self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr) self.writer.add_image('frame_2', minibatch[0][1], self.itr) # self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr) self.writer.add_image('frame_3', minibatch[0][2], self.itr) # self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr) self.writer.add_image('frame_4', minibatch[0][3], self.itr) # self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr) # Get embeddings features = torch.cat(train_embedding_features_buffer[:30]).squeeze_() labels = torch.cat(train_labels[:30]).squeeze_() # features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1) # label = torch.Tensor(np.asarray(label_buffer)) images = torch.cat(train_images_buffer[:30]).squeeze_()#/255.0, [0, 3, 1, 2] self.writer.add_embedding(features, metadata=labels, label_img=images, global_step=epoch) if epoch % 1 == 0: loss, correct = self.validate() self.learning_rate_scheduler.step(loss) val_losses_.append(loss) val_acc_.append(correct) if epoch % self.args.save_every == 0 and epoch != 0: self.logger.info('Saving model.') self.save_model(self.model, self.model_filename(self.args.model_name, epoch), join(self.model_folder, 'weight_files')) print("logging to {}".format(self.logdir)) plot_mean(trn_losses_, self.model_folder, 'train_loss') plot_mean(val_losses_, self.model_folder, 'validation_loss') plot_mean(trn_acc_, self.model_folder, 'train_acc') plot_mean(val_acc_, self.model_folder, 'validation_accuracy') # plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin') def validate(self): # Run model on validation data and log results correct = 0 losses = [] for minibatch in self.validation_loader: if self.use_cuda: anchor_frames = minibatch[0].cuda() #anchor_euler_reparam = minibatch[1].cuda() # load as 3x3 rotation matrix anchor_quats = minibatch[1].cuda() # load as 3x3 rotation matrix loss, a_pred = self.loss_fn(self.model, anchor_frames, anchor_quats) losses.append(loss.data.cpu().numpy()) correct += (torch.norm(a_pred - anchor_quats, 2) < 0.1).data.cpu().numpy().sum() self.writer.add_scalar('data/valid_loss', np.mean(losses), self.validation_calls) self.writer.add_scalar('data/validation_correct', correct / self.len_validation_set, self.validation_calls) self.validation_calls += 1 loss = np.mean(losses) self.logger.info("Validation score correct {correct}/{total}".format( correct=correct, total=self.len_validation_set )) self.logger.info('val loss: ',loss) return loss, correct def model_filename(self, model_name, epoch): return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch) def save_model(self, model, filename, model_folder): ensure_folder(model_folder) model_path = os.path.join(model_folder, filename) torch.save(model.state_dict(), model_path) def build_set(self, queue, triplet_builder, log): while 1: datasets = [] for i in range(TRAIN_SEQS_PER_EPOCH): dataset = triplet_builder.build_set() datasets.append(dataset) dataset = ConcatDataset(datasets) # log.info('Created {0} triplets'.format(len(dataset))) queue.put(dataset) def create_model(self): model = define_model(pretrained=True) # model = PosNet() if self.load_model: model_path = os.path.join( self.model_folder, self.load_model ) # map_location allows us to load models trained on cuda to cpu. model.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage)) if self.use_cuda: model = model.cuda() return model def batch_size(self, epoch, max_size): exponent = epoch // 100 return min(max(2 ** (exponent), 2), max_size) def main(args): # module = importlib.import_module(args.exp_name + '.config') # conf = getattr(module, 'Config_Isaac_Server')() # EXP_DIR = conf.EXP_DIR # MODEL_FOLDER = conf.MODEL_FOLDER # GPU Configuration device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') use_cuda = torch.cuda.is_available() # Load model model_folder = join(EXP_ROOT_DIR, args.exp_name, 'trained_models', args.run_name, time_stamped()) if not os.path.exists(model_folder): os.makedirs(model_folder) # Get data loader builder and loss function builder = getattr(importlib.import_module('utils.builders'), args.builder) loss_fn = LOSS_FN # Define train and validation directories train_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/train/') validation_directory = join(EXP_ROOT_DIR, args.exp_name, 'videos/valid/') # Copies of executed config if not os.path.exists('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments'): os.makedirs('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/experiments') copy2('/'.join(os.path.realpath(__file__).split('/')[:-1]) + '/train_tcn_no_captions.py', model_folder) copy2('/'.join(os.path.realpath(__file__).split('/')[:-2]) + '/gps-lfd' + '/config.py', model_folder) # Build training class trainer = Trainer(use_cuda, args.load_model, model_folder, train_directory, validation_directory, builder, loss_fn, args) trainer.train() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--start-epoch', type=int, default=0) parser.add_argument('--epochs', type=int, default=1000) parser.add_argument('--save-every', type=int, default=10) parser.add_argument('--load-model', type=str, required=False) parser.add_argument('--minibatch-size', type=int, default=8) parser.add_argument('--model-name', type=str, default='tcn') parser.add_argument('--log-file', type=str, default='./out.log') parser.add_argument('--lr-start', type=float, default=0.001) parser.add_argument('--n-views', type=int, default=NUM_VIEWS) parser.add_argument('--alpha', type=float, default=0.01, help='weighing factor of language loss to triplet loss') # Model parameters # Path parameters parser.add_argument('--exp-name', type=str, required=True) parser.add_argument('--run-name', type=str, required=True) parser.add_argument('--builder', type=str, required=True) args = parser.parse_args() print(args) main(args)
train_pose_euler_crop.py
14,230
see issue 152 Create Model Build validation set Build Training Set Get Logger Model specific setup self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr_start, momentum=0.9) This will diminish the learning rate at the milestones ///// 0.1, 0.01, 0.001 if not using automized scheduler self.criterion = nn.CrossEntropyLoss() batch_size(epoch, self.args.max_minibatch_size),anchor_euler_reparam = minibatch[1].cuda() load as 3x3 rotation matrix load as 3x3 rotation matrix frames = Variable(minibatch) print(gradcheck(loss_fn, (tcn, minibatch,))) Add embeddings self.writer.add_image('pose1', str(minibatch[1][0].data.detach().cpu().numpy()), self.itr) self.writer.add_image('pose_2', str(minibatch[1][1].data.detach().cpu().numpy()), self.itr) self.writer.add_image('pose_3', str(minibatch[1][2].data.detach().cpu().numpy()), self.itr) self.writer.add_image('pose_4', str(minibatch[1][3].data.detach().cpu().numpy()), self.itr) Get embeddings features = train_embedding_features_buffer.view(train_embedding_features_buffer.shape[0]*train_embedding_features_buffer.shape[1], -1) label = torch.Tensor(np.asarray(label_buffer))/255.0, [0, 3, 1, 2] plot_mean(val_acc_no_margin_, self.model_folder, 'validation_accuracy_no_margin') Run model on validation data and log resultsanchor_euler_reparam = minibatch[1].cuda() load as 3x3 rotation matrix load as 3x3 rotation matrix log.info('Created {0} triplets'.format(len(dataset))) model = PosNet() map_location allows us to load models trained on cuda to cpu. module = importlib.import_module(args.exp_name + '.config') conf = getattr(module, 'Config_Isaac_Server')() EXP_DIR = conf.EXP_DIR MODEL_FOLDER = conf.MODEL_FOLDER GPU Configuration Load model Get data loader builder and loss function Define train and validation directories Copies of executed config Build training class Model parameters Path parameters
1,888
en
0.405994
#!/usr/bin/env python3 #------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- from __future__ import print_function import os import sys import platform try: exec(open('systemds/project_info.py').read()) except IOError: print("Could not read project_info.py.", file=sys.stderr) sys.exit() ARTIFACT_NAME = __project_artifact_id__ ARTIFACT_VERSION = __project_version__ ARTIFACT_VERSION_SHORT = ARTIFACT_VERSION.split("-")[0] root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd()))) src_path_prefix = os.path.join(root_dir, 'src', 'main', 'python', 'dist', ARTIFACT_NAME + '-' + ARTIFACT_VERSION_SHORT) src_path = src_path_prefix + '.zip' if platform.system() == "Windows" and os.path.exists( src_path_prefix + '.zip') else src_path_prefix + '.tar.gz' os.rename( src_path, os.path.join(root_dir, 'target', ARTIFACT_NAME + '-' + ARTIFACT_VERSION + '-python.tar.gz')) wheel_name = '-'.join([ARTIFACT_NAME, ARTIFACT_VERSION_SHORT, 'py3', 'none', 'any.whl']) wheel = os.path.join(root_dir, 'src', 'main', 'python', 'dist', wheel_name) os.rename(wheel, os.path.join(root_dir, 'target', wheel_name))
src/main/python/post_setup.py
1,996
!/usr/bin/env python3------------------------------------------------------------- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.-------------------------------------------------------------
896
en
0.80726
import logging import numpy as np import math import psutil import time from autogluon.common.features.types import R_BOOL, R_CATEGORY, R_OBJECT, S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT from autogluon.core.constants import REGRESSION from autogluon.core.utils.exceptions import NotEnoughMemoryError from autogluon.core.models.abstract.model_trial import skip_hpo from autogluon.core.models import AbstractModel from autogluon.core.utils.utils import normalize_pred_probas logger = logging.getLogger(__name__) # TODO: Normalize data! class KNNModel(AbstractModel): """ KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html """ def __init__(self, **kwargs): super().__init__(**kwargs) self._X_unused_index = None # Keeps track of unused training data indices, necessary for LOO OOF generation def _get_model_type(self): if self.params_aux.get('use_daal', True): try: # TODO: Add more granular switch, currently this affects all future KNN models even if they had `use_daal=False` from sklearnex import patch_sklearn patch_sklearn("knn_classifier") patch_sklearn("knn_regressor") # daal backend for KNN seems to be 20-40x+ faster than native sklearn with no downsides. logger.log(15, '\tUsing daal4py KNN backend...') except: pass try: from ._knn_loo_variants import KNeighborsClassifier, KNeighborsRegressor except: from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor logger.warning('WARNING: Leave-one-out variants of KNN failed to import. Falling back to standard KNN implementations.') if self.problem_type == REGRESSION: return KNeighborsRegressor else: return KNeighborsClassifier def _preprocess(self, X, **kwargs): X = super()._preprocess(X, **kwargs) X = X.fillna(0).to_numpy(dtype=np.float32) return X def _set_default_params(self): default_params = { 'weights': 'uniform', 'n_jobs': -1, } for param, val in default_params.items(): self._set_default_param_value(param, val) def _get_default_auxiliary_params(self) -> dict: default_auxiliary_params = super()._get_default_auxiliary_params() extra_auxiliary_params = dict( ignored_type_group_raw=[R_BOOL, R_CATEGORY, R_OBJECT], # TODO: Eventually use category features ignored_type_group_special=[S_BOOL, S_TEXT_NGRAM, S_TEXT_SPECIAL, S_DATETIME_AS_INT], ) default_auxiliary_params.update(extra_auxiliary_params) return default_auxiliary_params @classmethod def _get_default_ag_args(cls) -> dict: default_ag_args = super()._get_default_ag_args() extra_ag_args = {'valid_stacker': False} default_ag_args.update(extra_ag_args) return default_ag_args @classmethod def _get_default_ag_args_ensemble(cls, **kwargs) -> dict: default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs) extra_ag_args_ensemble = {'use_child_oof': True} default_ag_args_ensemble.update(extra_ag_args_ensemble) return default_ag_args_ensemble # TODO: Enable HPO for KNN def _get_default_searchspace(self): spaces = {} return spaces def _fit(self, X, y, time_limit=None, sample_weight=None, **kwargs): time_start = time.time() X = self.preprocess(X) self._validate_fit_memory_usage(X=X) # TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately if sample_weight is not None: # TODO: support logger.log(15, "sample_weight not yet supported for KNNModel, this model will ignore them in training.") num_rows_max = len(X) # FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model. if time_limit is None or num_rows_max <= 10000: self.model = self._get_model_type()(**self._get_model_params()).fit(X, y) else: self.model = self._fit_with_samples(X=X, y=y, time_limit=time_limit - (time.time() - time_start)) def _validate_fit_memory_usage(self, X): max_memory_usage_ratio = self.params_aux['max_memory_usage_ratio'] model_size_bytes = 4 * X.shape[0] * X.shape[1] # Assuming float32 types expected_final_model_size_bytes = model_size_bytes * 3.6 # Roughly what can be expected of the final KNN model in memory size if expected_final_model_size_bytes > 10000000: # Only worth checking if expected model size is >10MB available_mem = psutil.virtual_memory().available model_memory_ratio = expected_final_model_size_bytes / available_mem if model_memory_ratio > (0.15 * max_memory_usage_ratio): logger.warning(f'\tWarning: Model is expected to require {round(model_memory_ratio * 100, 2)}% of available memory...') if model_memory_ratio > (0.20 * max_memory_usage_ratio): raise NotEnoughMemoryError # don't train full model to avoid OOM error # TODO: Won't work for RAPIDS without modification # TODO: Technically isn't OOF, but can be used inplace of OOF. Perhaps rename to something more accurate? def get_oof_pred_proba(self, X, normalize=None, **kwargs): """X should be the same X passed to `.fit`""" y_oof_pred_proba = self._get_oof_pred_proba(X=X, **kwargs) if normalize is None: normalize = self.normalize_pred_probas if normalize: y_oof_pred_proba = normalize_pred_probas(y_oof_pred_proba, self.problem_type) y_oof_pred_proba = y_oof_pred_proba.astype(np.float32) return y_oof_pred_proba def _get_oof_pred_proba(self, X, **kwargs): if callable(getattr(self.model, "predict_proba_loo", None)): y_oof_pred_proba = self.model.predict_proba_loo() elif callable(getattr(self.model, "predict_loo", None)): y_oof_pred_proba = self.model.predict_loo() else: raise AssertionError(f'Model class {type(self.model)} does not support out-of-fold prediction generation.') y_oof_pred_proba = self._convert_proba_to_unified_form(y_oof_pred_proba) if X is not None and self._X_unused_index: X_unused = X.iloc[self._X_unused_index] y_pred_proba_new = self.predict_proba(X_unused) X_unused_index = set(self._X_unused_index) num_rows = len(X) X_used_index = [i for i in range(num_rows) if i not in X_unused_index] oof_pred_shape = y_oof_pred_proba.shape if len(oof_pred_shape) == 1: y_oof_tmp = np.zeros(num_rows, dtype=np.float32) y_oof_tmp[X_used_index] = y_oof_pred_proba y_oof_tmp[self._X_unused_index] = y_pred_proba_new else: y_oof_tmp = np.zeros((num_rows, oof_pred_shape[1]), dtype=np.float32) y_oof_tmp[X_used_index, :] = y_oof_pred_proba y_oof_tmp[self._X_unused_index, :] = y_pred_proba_new y_oof_pred_proba = y_oof_tmp return y_oof_pred_proba # TODO: Consider making this fully generic and available to all models def _fit_with_samples(self, X, y, time_limit, start_samples=10000, max_samples=None, sample_growth_factor=2, sample_time_growth_factor=8): """ Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used. X and y must already be preprocessed. Parameters ---------- X : np.ndarray The training data features (preprocessed). y : Series The training data ground truth labels. time_limit : float, default = None Time limit in seconds to adhere to when fitting model. start_samples : int, default = 10000 Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples. For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...] max_samples : int, default = None The maximum number of samples to use. If None or greater than the number of rows in X, then it is set equal to the number of rows in X. sample_growth_factor : float, default = 2 The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit. sample_time_growth_factor : float, default = 8 The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds. If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early. """ time_start = time.time() num_rows_samples = [] if max_samples is None: num_rows_max = len(X) else: num_rows_max = min(len(X), max_samples) num_rows_cur = start_samples while True: num_rows_cur = min(num_rows_cur, num_rows_max) num_rows_samples.append(num_rows_cur) if num_rows_cur == num_rows_max: break num_rows_cur *= sample_growth_factor num_rows_cur = math.ceil(num_rows_cur) if num_rows_cur * 1.5 >= num_rows_max: num_rows_cur = num_rows_max def sample_func(chunk, frac): # Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba) n = max(math.ceil(len(chunk) * frac), 1) return chunk.sample(n=n, replace=False, random_state=0) if self.problem_type != REGRESSION: y_df = y.to_frame(name='label').reset_index(drop=True) else: y_df = None time_start_sample_loop = time.time() time_limit_left = time_limit - (time_start_sample_loop - time_start) model_type = self._get_model_type() idx = None for i, samples in enumerate(num_rows_samples): if samples != num_rows_max: if self.problem_type == REGRESSION: idx = np.random.choice(num_rows_max, size=samples, replace=False) else: idx = y_df.groupby('label', group_keys=False).apply(sample_func, frac=samples/num_rows_max).index X_samp = X[idx, :] y_samp = y.iloc[idx] else: X_samp = X y_samp = y idx = None self.model = model_type(**self._get_model_params()).fit(X_samp, y_samp) time_limit_left_prior = time_limit_left time_fit_end_sample = time.time() time_limit_left = time_limit - (time_fit_end_sample - time_start) time_fit_sample = time_limit_left_prior - time_limit_left time_required_for_next = time_fit_sample * sample_time_growth_factor logger.log(15, f'\t{round(time_fit_sample, 2)}s \t= Train Time (Using {samples}/{num_rows_max} rows) ({round(time_limit_left, 2)}s remaining time)') if time_required_for_next > time_limit_left and i != len(num_rows_samples) - 1: logger.log(20, f'\tNot enough time to train KNN model on all training rows. Fit {samples}/{num_rows_max} rows. (Training KNN model on {num_rows_samples[i+1]} rows is expected to take {round(time_required_for_next, 2)}s)') break if idx is not None: idx = set(idx) self._X_unused_index = [i for i in range(num_rows_max) if i not in idx] return self.model # TODO: Add HPO def _hyperparameter_tune(self, **kwargs): return skip_hpo(self, **kwargs) def _more_tags(self): return {'valid_oof': True} class FAISSModel(KNNModel): def _get_model_type(self): from .knn_utils import FAISSNeighborsClassifier, FAISSNeighborsRegressor if self.problem_type == REGRESSION: return FAISSNeighborsRegressor else: return FAISSNeighborsClassifier def _set_default_params(self): default_params = { 'index_factory_string': 'Flat', } for param, val in default_params.items(): self._set_default_param_value(param, val) super()._set_default_params() @classmethod def _get_default_ag_args_ensemble(cls, **kwargs) -> dict: default_ag_args_ensemble = super()._get_default_ag_args_ensemble(**kwargs) extra_ag_args_ensemble = {'use_child_oof': False} default_ag_args_ensemble.update(extra_ag_args_ensemble) return default_ag_args_ensemble def _more_tags(self): return {'valid_oof': False}
tabular/src/autogluon/tabular/models/knn/knn_model.py
13,614
KNearestNeighbors model (scikit-learn): https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html Fit model with samples of the data repeatedly, gradually increasing the amount of data until time_limit is reached or all data is used. X and y must already be preprocessed. Parameters ---------- X : np.ndarray The training data features (preprocessed). y : Series The training data ground truth labels. time_limit : float, default = None Time limit in seconds to adhere to when fitting model. start_samples : int, default = 10000 Number of samples to start with. This will be multiplied by sample_growth_factor after each model fit to determine the next number of samples. For example, if start_samples=10000, sample_growth_factor=2, then the number of samples per model fit would be [10000, 20000, 40000, 80000, ...] max_samples : int, default = None The maximum number of samples to use. If None or greater than the number of rows in X, then it is set equal to the number of rows in X. sample_growth_factor : float, default = 2 The rate of growth in sample size between each model fit. If 2, then the sample size doubles after each fit. sample_time_growth_factor : float, default = 8 The multiplier to the expected fit time of the next model. If `sample_time_growth_factor=8` and a model took 10 seconds to train, the next model fit will be expected to take 80 seconds. If an expected time is greater than the remaining time in `time_limit`, the model will not be trained and the method will return early. X should be the same X passed to `.fit` TODO: Normalize data! Keeps track of unused training data indices, necessary for LOO OOF generation TODO: Add more granular switch, currently this affects all future KNN models even if they had `use_daal=False` daal backend for KNN seems to be 20-40x+ faster than native sklearn with no downsides. TODO: Eventually use category features TODO: Enable HPO for KNN TODO: Can incorporate this into samples, can fit on portion of data to satisfy memory instead of raising exception immediately TODO: support FIXME: v0.1 Must store final num rows for refit_full or else will use everything! Worst case refit_full could train far longer than the original model. Assuming float32 types Roughly what can be expected of the final KNN model in memory size Only worth checking if expected model size is >10MB don't train full model to avoid OOM error TODO: Won't work for RAPIDS without modification TODO: Technically isn't OOF, but can be used inplace of OOF. Perhaps rename to something more accurate? TODO: Consider making this fully generic and available to all models Guarantee at least 1 sample (otherwise log_loss would crash or model would return different column counts in pred_proba) TODO: Add HPO
2,822
en
0.846049
from utils import * row_units = [cross(r, cols) for r in rows] column_units = [cross(rows, c) for c in cols] square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')] unitlist = row_units + column_units + square_units # TODO: Update the unit list to add the new diagonal units diagonal1 = [['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9']] diagonal2 = [['A9', 'B8', 'C7', 'D6', 'E5', 'F4', 'G3', 'H2', 'I1']] unitlist = unitlist + diagonal1 + diagonal2 # Must be called after all units (including diagonals) are added to the unitlist units = extract_units(unitlist, boxes) peers = extract_peers(units, boxes) def naked_twins(values): """Eliminate values using the naked twins strategy. The naked twins strategy says that if you have two or more unallocated boxes in a unit and there are only two digits that can go in those two boxes, then those two digits can be eliminated from the possible assignments of all other boxes in the same unit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the naked twins eliminated from peers Notes ----- Your solution can either process all pairs of naked twins from the input once, or it can continue processing pairs of naked twins until there are no such pairs remaining -- the project assistant test suite will accept either convention. However, it will not accept code that does not process all pairs of naked twins from the original input. (For example, if you start processing pairs of twins and eliminate another pair of twins before the second pair is processed then your code will fail the PA test suite.) The first convention is preferred for consistency with the other strategies, and because it is simpler (since the reduce_puzzle function already calls this strategy repeatedly). See Also -------- Pseudocode for this algorithm on github: https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md """ """ out = values.copy() len_2_boxes = [box for box in values if len(values[box]) == 2] for boxA in len_2_boxes: boxAPeers = peers[boxA] for boxB in boxAPeers: if values[boxA] == values[boxB]: intersect = [val for val in boxAPeers if val in peers[boxB]] for peer in intersect: out[peer] = out[peer].replace(values[boxA], '') return out """ out = values.copy() for boxA in values: boxAPeers = peers[boxA] for boxB in boxAPeers: if values[boxA] == values[boxB] and len(values[boxA]) == 2: intersect = [val for val in boxAPeers if val in peers[boxB]] for peer in intersect: for digit in values[boxA]: out[peer] = out[peer].replace(digit, '') return out def eliminate(values): """Apply the eliminate strategy to a Sudoku puzzle The eliminate strategy says that if a box has a value assigned, then none of the peers of that box can have the same value. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the assigned values eliminated from peers """ solved_values = [box for box in values.keys() if len(values[box]) == 1] for box in solved_values: digit = values[box] for peer in peers[box]: values[peer] = values[peer].replace(digit,'') return values def only_choice(values): """Apply the only choice strategy to a Sudoku puzzle The only choice strategy says that if only one box in a unit allows a certain digit, then that box must be assigned that digit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with all single-valued boxes assigned Notes ----- You should be able to complete this function by copying your code from the classroom """ for unit in unitlist: for digit in '123456789': dplaces = [box for box in unit if digit in values[box]] if len(dplaces) == 1: values[dplaces[0]] = digit return values def reduce_puzzle(values): """Reduce a Sudoku puzzle by repeatedly applying all constraint strategies Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary after continued application of the constraint strategies no longer produces any changes, or False if the puzzle is unsolvable """ solved_values = [box for box in values.keys() if len(values[box]) == 1] stalled = False while not stalled: solved_values_before = len([box for box in values.keys() if len(values[box]) == 1]) values = eliminate(values) values = only_choice(values) values = naked_twins(values) solved_values_after = len([box for box in values.keys() if len(values[box]) == 1]) stalled = solved_values_before == solved_values_after if len([box for box in values.keys() if len(values[box]) == 0]): return False return values def search(values): """Apply depth first search to solve Sudoku puzzles in order to solve puzzles that cannot be solved by repeated reduction alone. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary with all boxes assigned or False Notes ----- You should be able to complete this function by copying your code from the classroom and extending it to call the naked twins strategy. """ "Using depth-first search and propagation, try all possible values." # First, reduce the puzzle using the previous function values = reduce_puzzle(values) if values is False: return False ## Failed earlier if all(len(values[s]) == 1 for s in boxes): return values ## Solved! # Choose one of the unfilled squares with the fewest possibilities n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1) # Now use recurrence to solve each one of the resulting sudokus, and for value in values[s]: new_sudoku = values.copy() new_sudoku[s] = value attempt = search(new_sudoku) if attempt: return attempt def solve(grid): """Find the solution to a Sudoku puzzle using search and constraint propagation Parameters ---------- grid(string) a string representing a sudoku grid. Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' Returns ------- dict or False The dictionary representation of the final sudoku grid or False if no solution exists. """ values = grid2values(grid) values = search(values) return values if __name__ == "__main__": diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' display(grid2values(diag_sudoku_grid)) result = solve(diag_sudoku_grid) display(result) try: import PySudoku PySudoku.play(grid2values(diag_sudoku_grid), result, history) except SystemExit: pass except: print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
Projects/1_Sudoku/solution.py
7,761
Apply the eliminate strategy to a Sudoku puzzle The eliminate strategy says that if a box has a value assigned, then none of the peers of that box can have the same value. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the assigned values eliminated from peers Eliminate values using the naked twins strategy. The naked twins strategy says that if you have two or more unallocated boxes in a unit and there are only two digits that can go in those two boxes, then those two digits can be eliminated from the possible assignments of all other boxes in the same unit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with the naked twins eliminated from peers Notes ----- Your solution can either process all pairs of naked twins from the input once, or it can continue processing pairs of naked twins until there are no such pairs remaining -- the project assistant test suite will accept either convention. However, it will not accept code that does not process all pairs of naked twins from the original input. (For example, if you start processing pairs of twins and eliminate another pair of twins before the second pair is processed then your code will fail the PA test suite.) The first convention is preferred for consistency with the other strategies, and because it is simpler (since the reduce_puzzle function already calls this strategy repeatedly). See Also -------- Pseudocode for this algorithm on github: https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md Apply the only choice strategy to a Sudoku puzzle The only choice strategy says that if only one box in a unit allows a certain digit, then that box must be assigned that digit. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict The values dictionary with all single-valued boxes assigned Notes ----- You should be able to complete this function by copying your code from the classroom Reduce a Sudoku puzzle by repeatedly applying all constraint strategies Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary after continued application of the constraint strategies no longer produces any changes, or False if the puzzle is unsolvable Apply depth first search to solve Sudoku puzzles in order to solve puzzles that cannot be solved by repeated reduction alone. Parameters ---------- values(dict) a dictionary of the form {'box_name': '123456789', ...} Returns ------- dict or False The values dictionary with all boxes assigned or False Notes ----- You should be able to complete this function by copying your code from the classroom and extending it to call the naked twins strategy. Find the solution to a Sudoku puzzle using search and constraint propagation Parameters ---------- grid(string) a string representing a sudoku grid. Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3' Returns ------- dict or False The dictionary representation of the final sudoku grid or False if no solution exists. TODO: Update the unit list to add the new diagonal units Must be called after all units (including diagonals) are added to the unitlist First, reduce the puzzle using the previous function Failed earlier Solved! Choose one of the unfilled squares with the fewest possibilities Now use recurrence to solve each one of the resulting sudokus, and
3,710
en
0.768712
""" Module: 'uheapq' on micropython-v1.16-esp32 """ # MCU: {'ver': 'v1.16', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.16.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.16.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'} # Stubber: 1.5.4 from typing import Any def heapify(*args, **kwargs) -> Any: ... def heappop(*args, **kwargs) -> Any: ... def heappush(*args, **kwargs) -> Any: ...
stubs/micropython-v1_16-esp32/uheapq.py
522
Module: 'uheapq' on micropython-v1.16-esp32 MCU: {'ver': 'v1.16', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.16.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.16.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'} Stubber: 1.5.4
346
en
0.057214
#!/usr/bin/env python2.5 # # Copyright 2009 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module that contains base class for Melange Expando models. """ __authors__ = [ '"Lennard de Rijk" <[email protected]>', ] from google.appengine.ext import db from soc.logic import dicts class ExpandoBase(db.Expando): """Expando Base model. This might later on contain general functionalities like the ModelWithFieldAttributes model. """ toDict = dicts.toDict
src/melange/src/soc/models/expando_base.py
997
Expando Base model. This might later on contain general functionalities like the ModelWithFieldAttributes model. Module that contains base class for Melange Expando models. !/usr/bin/env python2.5 Copyright 2009 the Melange authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
756
en
0.833272
from sqlalchemy.orm.exc import NoResultFound from zeeguu_core.model import User, Language, UserWord, Text, Bookmark def own_or_crowdsourced_translation(user, word: str, from_lang_code: str, context: str): own_past_translation = get_own_past_translation(user, word, from_lang_code, context) if own_past_translation: translations = [{'translation': own_past_translation, 'service_name': 'Own Last Translation', 'quality': 100}] return translations others_past_translation = get_others_past_translation(word, from_lang_code, context) if others_past_translation: translations = [{'translation': others_past_translation, 'service_name': 'Contributed Translation', 'quality': 100}] return translations return None def get_others_past_translation(word: str, from_lang_code: str, context: str): return _get_past_translation(word, from_lang_code, context) def get_own_past_translation(user, word: str, from_lang_code: str, context: str): return _get_past_translation(word, from_lang_code, context, user) def _get_past_translation(word: str, from_lang_code: str, context: str, user: User = None): try: from_language = Language.find(from_lang_code) origin_word = UserWord.find(word, from_language) text = Text.query.filter_by(content=context).one() query = Bookmark.query.filter_by(origin_id=origin_word.id, text_id=text.id) if user: query = query.filter_by(user_id=user.id) # prioritize older users query.order_by(Bookmark.user_id.asc()) return query.first().translation.word except Exception as e: print(e) return None
zeeguu_core/crowd_translations/__init__.py
1,791
prioritize older users
22
en
0.678888
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import argparse import logging import os import time import detectron2.utils.comm as comm import torch from d2go.config import ( CfgNode as CN, auto_scale_world_size, reroute_config_path, temp_defrost, ) from d2go.distributed import get_local_rank, get_num_processes_per_machine from d2go.runner import GeneralizedRCNNRunner, create_runner from d2go.utils.launch_environment import get_launch_environment from detectron2.utils.collect_env import collect_env_info from detectron2.utils.logger import setup_logger from detectron2.utils.serialize import PicklableWrapper from d2go.utils.helper import run_once from detectron2.utils.file_io import PathManager from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail logger = logging.getLogger(__name__) def basic_argument_parser( distributed=True, requires_config_file=True, requires_output_dir=True, ): """ Basic cli tool parser for Detectron2Go binaries """ parser = argparse.ArgumentParser(description="PyTorch Object Detection Training") parser.add_argument( "--runner", type=str, default="d2go.runner.GeneralizedRCNNRunner", help="Full class name, i.e. (package.)module.class", ) parser.add_argument( "--config-file", help="path to config file", default="", required=requires_config_file, metavar="FILE", ) parser.add_argument( "--output-dir", help="When given, this will override the OUTPUT_DIR in the config-file", required=requires_output_dir, default=None, type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) if distributed: parser.add_argument( "--num-processes", type=int, default=1, help="number of gpus per machine" ) parser.add_argument("--num-machines", type=int, default=1) parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)", ) parser.add_argument( "--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time()) ) parser.add_argument("--dist-backend", type=str, default="NCCL") if not requires_config_file: # NOTE if not passing yaml file, user should explicitly set the # following args, and use `opts` for non-common usecase. parser.add_argument( "--datasets", type=str, nargs="+", required=True, help="cfg.DATASETS.TEST", ) parser.add_argument( "--min_size", type=int, required=True, help="cfg.INPUT.MIN_SIZE_TEST", ) parser.add_argument( "--max_size", type=int, required=True, help="cfg.INPUT.MAX_SIZE_TEST", ) return parser return parser def create_cfg_from_cli_args(args, default_cfg): """ Instead of loading from defaults.py, this binary only includes necessary configs building from scratch, and overrides them from args. There're two levels of config: _C: the config system used by this binary, which is a sub-set of training config, override by configurable_cfg. It can also be override by args.opts for convinience. configurable_cfg: common configs that user should explicitly specify in the args. """ _C = CN() _C.INPUT = default_cfg.INPUT _C.DATASETS = default_cfg.DATASETS _C.DATALOADER = default_cfg.DATALOADER _C.TEST = default_cfg.TEST if hasattr(default_cfg, "D2GO_DATA"): _C.D2GO_DATA = default_cfg.D2GO_DATA if hasattr(default_cfg, "TENSORBOARD"): _C.TENSORBOARD = default_cfg.TENSORBOARD # NOTE configs below might not be necessary, but must add to make code work _C.MODEL = CN() _C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE _C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON _C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON _C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support" _C.OUTPUT_DIR = args.output_dir configurable_cfg = [ "DATASETS.TEST", args.datasets, "INPUT.MIN_SIZE_TEST", args.min_size, "INPUT.MAX_SIZE_TEST", args.max_size, ] cfg = _C.clone() cfg.merge_from_list(configurable_cfg) cfg.merge_from_list(args.opts) return cfg def prepare_for_launch(args): """ Load config, figure out working directory, create runner. - when args.config_file is empty, returned cfg will be the default one - returned output_dir will always be non empty, args.output_dir has higher priority than cfg.OUTPUT_DIR. """ print(args) runner = create_runner(args.runner) cfg = runner.get_default_cfg() if args.config_file: with PathManager.open(reroute_config_path(args.config_file), "r") as f: print("Loaded config file {}:\n{}".format(args.config_file, f.read())) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) else: cfg = create_cfg_from_cli_args(args, default_cfg=cfg) cfg.freeze() assert args.output_dir or args.config_file output_dir = args.output_dir or cfg.OUTPUT_DIR return cfg, output_dir, runner def setup_after_launch(cfg, output_dir, runner): """ Set things up after entering DDP, including - creating working directory - setting up logger - logging environment - initializing runner """ create_dir_on_global_main_process(output_dir) comm.synchronize() setup_loggers(output_dir) cfg.freeze() if cfg.OUTPUT_DIR != output_dir: with temp_defrost(cfg): logger.warning( "Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format( cfg.OUTPUT_DIR, output_dir ) ) cfg.OUTPUT_DIR = output_dir logger.info("Initializing runner ...") runner = initialize_runner(runner, cfg) log_info(cfg, runner) dump_cfg(cfg, os.path.join(output_dir, "config.yaml")) auto_scale_world_size(cfg, new_world_size=comm.get_world_size()) @run_once() def setup_loggers(output_dir, color=None): if not color: color = get_launch_environment() == "local" d2_logger = setup_logger( output_dir, distributed_rank=comm.get_rank(), color=color, name="detectron2", abbrev_name="d2", ) fvcore_logger = setup_logger( output_dir, distributed_rank=comm.get_rank(), color=color, name="fvcore", ) d2go_logger = setup_logger( output_dir, distributed_rank=comm.get_rank(), color=color, name="d2go", abbrev_name="d2go", ) mobile_cv_logger = setup_logger( output_dir, distributed_rank=comm.get_rank(), color=color, name="mobile_cv", abbrev_name="mobile_cv", ) # NOTE: all above loggers have FileHandler pointing to the same file as d2_logger. # Those files are opened upon creation, but it seems fine in 'a' mode. # NOTE: the root logger might has been configured by other applications, # since this already sub-top level, just don't propagate to root. d2_logger.propagate = False fvcore_logger.propagate = False d2go_logger.propagate = False mobile_cv_logger.propagate = False def log_info(cfg, runner): num_processes = get_num_processes_per_machine() logger.info( "Using {} processes per machine. Rank of current process: {}".format( num_processes, comm.get_rank() ) ) logger.info("Environment info:\n" + collect_env_info()) logger.info("Running with full config:\n{}".format(cfg)) logger.info("Running with runner: {}".format(runner)) def dump_cfg(cfg, path): if comm.is_main_process(): with PathManager.open(path, "w") as f: f.write(cfg.dump()) logger.info("Full config saved to {}".format(path)) def create_dir_on_local_main_process(dir): if get_local_rank() == 0 and dir: PathManager.mkdirs(dir) def create_dir_on_global_main_process(dir): if comm.get_rank() == 0 and dir: PathManager.mkdirs(dir) def initialize_runner(runner, cfg): runner = runner or GeneralizedRCNNRunner() runner._initialize(cfg) return runner def caffe2_global_init(logging_print_net_summary=0, num_threads=None): if num_threads is None: if get_num_processes_per_machine() > 1: # by default use single thread when DDP with multiple processes num_threads = 1 else: # GlobalInit will clean PyTorch's num_threads and set it to 1, # thus keep PyTorch's default value to make it truly default. num_threads = torch.get_num_threads() if not get_local_rank() == 0: logging_print_net_summary = 0 # only enable for local main process from caffe2.python import workspace workspace.GlobalInit( [ "caffe2", "--caffe2_log_level=2", "--caffe2_logging_print_net_summary={}".format(logging_print_net_summary), "--caffe2_omp_num_threads={}".format(num_threads), "--caffe2_mkl_num_threads={}".format(num_threads), ] ) logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads())) def post_mortem_if_fail_for_main(main_func): def new_main_func(cfg, output_dir, *args, **kwargs): pdb_ = ( MultiprocessingPdb(FolderLock(output_dir)) if comm.get_world_size() > 1 else None # fallback to use normal pdb for single process ) return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs) return PicklableWrapper(new_main_func)
d2go/setup.py
10,315
Basic cli tool parser for Detectron2Go binaries Instead of loading from defaults.py, this binary only includes necessary configs building from scratch, and overrides them from args. There're two levels of config: _C: the config system used by this binary, which is a sub-set of training config, override by configurable_cfg. It can also be override by args.opts for convinience. configurable_cfg: common configs that user should explicitly specify in the args. Load config, figure out working directory, create runner. - when args.config_file is empty, returned cfg will be the default one - returned output_dir will always be non empty, args.output_dir has higher priority than cfg.OUTPUT_DIR. Set things up after entering DDP, including - creating working directory - setting up logger - logging environment - initializing runner !/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved NOTE if not passing yaml file, user should explicitly set the following args, and use `opts` for non-common usecase. NOTE configs below might not be necessary, but must add to make code work NOTE: all above loggers have FileHandler pointing to the same file as d2_logger. Those files are opened upon creation, but it seems fine in 'a' mode. NOTE: the root logger might has been configured by other applications, since this already sub-top level, just don't propagate to root. by default use single thread when DDP with multiple processes GlobalInit will clean PyTorch's num_threads and set it to 1, thus keep PyTorch's default value to make it truly default. only enable for local main process fallback to use normal pdb for single process
1,728
en
0.860506
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from ax.exceptions.model import ModelError from ax.models.torch.utils import ( _generate_sobol_points, is_noiseless, normalize_indices, subset_model, tensor_callable_to_array_callable, ) from ax.utils.common.testutils import TestCase from botorch.models import HeteroskedasticSingleTaskGP, ModelListGP, SingleTaskGP from torch import Tensor class TorchUtilsTest(TestCase): def test_is_noiseless(self): x = torch.zeros(1, 1) y = torch.zeros(1, 1) se = torch.zeros(1, 1) model = SingleTaskGP(x, y) self.assertTrue(is_noiseless(model)) model = HeteroskedasticSingleTaskGP(x, y, se) self.assertFalse(is_noiseless(model)) with self.assertRaises(ModelError): is_noiseless(ModelListGP()) def testNormalizeIndices(self): indices = [0, 2] nlzd_indices = normalize_indices(indices, 3) self.assertEqual(nlzd_indices, indices) nlzd_indices = normalize_indices(indices, 4) self.assertEqual(nlzd_indices, indices) indices = [0, -1] nlzd_indices = normalize_indices(indices, 3) self.assertEqual(nlzd_indices, [0, 2]) with self.assertRaises(ValueError): nlzd_indices = normalize_indices([3], 3) with self.assertRaises(ValueError): nlzd_indices = normalize_indices([-4], 3) def testSubsetModel(self): x = torch.zeros(1, 1) y = torch.rand(1, 2) obj_t = torch.rand(2) model = SingleTaskGP(x, y) self.assertEqual(model.num_outputs, 2) # basic test, can subset obj_weights = torch.tensor([1.0, 0.0]) subset_model_results = subset_model(model, obj_weights) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertIsNone(ocs_sub) self.assertIsNone(obj_t_sub) self.assertEqual(model_sub.num_outputs, 1) self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0]))) # basic test, cannot subset obj_weights = torch.tensor([1.0, 2.0]) subset_model_results = subset_model(model, obj_weights) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertIsNone(ocs_sub) self.assertIsNone(obj_t_sub) self.assertIs(model_sub, model) # check identity self.assertIs(obj_weights_sub, obj_weights) # check identity self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1]))) # test w/ outcome constraints, can subset obj_weights = torch.tensor([1.0, 0.0]) ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0])) subset_model_results = subset_model(model, obj_weights, ocs) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertEqual(model_sub.num_outputs, 1) self.assertIsNone(obj_t_sub) self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0]))) self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]]))) self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0]))) self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0]))) # test w/ outcome constraints, cannot subset obj_weights = torch.tensor([1.0, 0.0]) ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0])) subset_model_results = subset_model(model, obj_weights, ocs) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertIs(model_sub, model) # check identity self.assertIsNone(obj_t_sub) self.assertIs(obj_weights_sub, obj_weights) # check identity self.assertIs(ocs_sub, ocs) # check identity self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1]))) # test w/ objective thresholds, cannot subset obj_weights = torch.tensor([1.0, 0.0]) ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0])) subset_model_results = subset_model(model, obj_weights, ocs, obj_t) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertIs(model_sub, model) # check identity self.assertIs(obj_t, obj_t_sub) self.assertIs(obj_weights_sub, obj_weights) # check identity self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1]))) self.assertIs(ocs_sub, ocs) # check identity # test w/ objective thresholds, can subset obj_weights = torch.tensor([1.0, 0.0]) ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0])) subset_model_results = subset_model(model, obj_weights, ocs, obj_t) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0]))) self.assertEqual(model_sub.num_outputs, 1) self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0]))) self.assertTrue(torch.equal(obj_t_sub, obj_t[:1])) self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]]))) self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0]))) # test unsupported yvar = torch.ones(1, 2) model = HeteroskedasticSingleTaskGP(x, y, yvar) subset_model_results = subset_model(model, obj_weights) model_sub = subset_model_results.model obj_weights_sub = subset_model_results.objective_weights ocs_sub = subset_model_results.outcome_constraints obj_t_sub = subset_model_results.objective_thresholds self.assertIsNone(ocs_sub) self.assertIs(model_sub, model) # check identity self.assertIs(obj_weights_sub, obj_weights) # check identity self.assertTrue(torch.equal(subset_model_results.indices, torch.tensor([0, 1]))) # test error on size inconsistency obj_weights = torch.ones(3) with self.assertRaises(RuntimeError): subset_model(model, obj_weights) def testGenerateSobolPoints(self): bounds = [(0.0, 1.0) for _ in range(3)] linear_constraints = ( torch.tensor([[1, -1, 0]], dtype=torch.double), torch.tensor([[0]], dtype=torch.double), ) def test_rounding_func(x: Tensor) -> Tensor: return x gen_sobol = _generate_sobol_points( n_sobol=100, bounds=bounds, device=torch.device("cpu"), linear_constraints=linear_constraints, rounding_func=test_rounding_func, ) self.assertEqual(len(gen_sobol), 100) self.assertIsInstance(gen_sobol, Tensor) def testTensorCallableToArrayCallable(self): def tensor_func(x: Tensor) -> Tensor: return np.exp(x) new_func = tensor_callable_to_array_callable( tensor_func=tensor_func, device=torch.device("cpu") ) self.assertTrue(callable(new_func)) self.assertIsInstance(new_func(np.array([1.0, 2.0])), np.ndarray)
ax/models/tests/test_torch_model_utils.py
8,207
!/usr/bin/env python3 Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. basic test, can subset basic test, cannot subset check identity check identity test w/ outcome constraints, can subset test w/ outcome constraints, cannot subset check identity check identity check identity test w/ objective thresholds, cannot subset check identity check identity check identity test w/ objective thresholds, can subset test unsupported check identity check identity test error on size inconsistency
607
en
0.81579
#!/usr/bin/env python # -*- coding: utf-8 -*- """ .. py:currentmodule:: trim.montecarlo.source .. moduleauthor:: Hendrix Demers <[email protected]> """ # Copyright 2019 Hendrix Demers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Standard library modules. # Third party modules. # Local modules. # Project modules. from trim.montecarlo.math import Point # Globals and constants variables. GROUP_SOURCE = "source" GROUP_POSITIONS = "position (nm)" GROUP_DIRECTION = "direction" ATTRIBUTE_KINETIC_ENERGY = "kinetic energy (keV)" ATTRIBUTE_MASS = "mass (amu)" ATTRIBUTE_ATOMIC_NUMBER = "atomic number" class Source: def __init__(self): # Default to Ar at 6 keV self.position_nm = Point(0.0, 0.0, 0.0) self.direction = Point(0.0, 0.0, -1.0) self.kinetic_energy_keV = 6.0 self.mass_amu = 39.962 self.atomic_number = 18 def write(self, parent): group = parent.require_group(GROUP_SOURCE) position_group = group.require_group(GROUP_POSITIONS) self.position_nm.write(position_group) direction_group = group.require_group(GROUP_DIRECTION) self.direction.write(direction_group) group.attrs[ATTRIBUTE_KINETIC_ENERGY] = self.kinetic_energy_keV group.attrs[ATTRIBUTE_MASS] = self.mass_amu group.attrs[ATTRIBUTE_ATOMIC_NUMBER] = self.atomic_number def read(self, parent): group = parent.require_group(GROUP_SOURCE) position_group = group.require_group(GROUP_POSITIONS) self.position_nm.read(position_group) direction_group = group.require_group(GROUP_DIRECTION) self.direction.read(direction_group) self.kinetic_energy_keV = group.attrs[ATTRIBUTE_KINETIC_ENERGY] self.mass_amu = group.attrs[ATTRIBUTE_MASS] self.atomic_number = group.attrs[ATTRIBUTE_ATOMIC_NUMBER]
trim/montecarlo/options/source.py
2,373
.. py:currentmodule:: trim.montecarlo.source .. moduleauthor:: Hendrix Demers <[email protected]> !/usr/bin/env python -*- coding: utf-8 -*- Copyright 2019 Hendrix Demers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Standard library modules. Third party modules. Local modules. Project modules. Globals and constants variables. Default to Ar at 6 keV
841
en
0.695874
#!/usr/bin/env python3 import utils utils.check_version((3,7)) # make sure we are running at least Python 3.7 utils.clear() # clear the screen print('Greetings!') color = '' while (color != 'red'):color = input("What is my favorite color? ") while (color != 'red'): color = color.lower().strip() if (color == 'red'): print('Correct!') elif (color == 'pink'): print('Close!') else: print('Sorry, try again.')
main8.py
482
!/usr/bin/env python3 make sure we are running at least Python 3.7 clear the screen
83
en
0.821779
from qtpy.QtWidgets import QDialog, QLineEdit, QPushButton, QLabel, QVBoxLayout from brainrender_gui.style import style, update_css class AddRegionsWindow(QDialog): left = 250 top = 250 width = 400 height = 300 label_msg = ( "Write the acronyms of brainregions " + "you wish to add.\n[as 'space' separated strings (e.g.: STN TH)]" ) def __init__(self, main_window, palette): """ Creates a new window for user to input which regions to add to scene. Arguments: ---------- main_window: reference to the App's main window palette: main_window's palette, used to style widgets """ super().__init__() self.setWindowTitle("Add brain regions") self.ui() self.main_window = main_window self.setStyleSheet(update_css(style, palette)) def ui(self): """ Define UI's elements """ self.setGeometry(self.left, self.top, self.width, self.height) layout = QVBoxLayout() # Regions label = QLabel(self) label.setObjectName("PopupLabel") label.setText(self.label_msg) self.textbox = QLineEdit(self) # Alpha alpha_label = QLabel(self) alpha_label.setObjectName("PopupLabel") alpha_label.setText("Alpha") self.alpha_textbox = QLineEdit(self) self.alpha_textbox.setText(str(1.0)) # Color color_label = QLabel(self) color_label.setObjectName("PopupLabel") color_label.setText("Color") self.color_textbox = QLineEdit(self) self.color_textbox.setText("atlas") # Create a button in the window self.button = QPushButton("Add regions", self) self.button.clicked.connect(self.on_click) self.button.setObjectName("RegionsButton") layout.addWidget(label) layout.addWidget(self.textbox) layout.addWidget(alpha_label) layout.addWidget(self.alpha_textbox) layout.addWidget(color_label) layout.addWidget(self.color_textbox) layout.addWidget(self.button) self.setLayout(layout) self.show() def on_click(self): """ On click or 'Enter' get the regions from the input and call the add_regions method of the main window """ regions = self.textbox.text().split(" ") self.main_window.add_regions( regions, self.alpha_textbox.text(), self.color_textbox.text() ) self.close()
brainrender_gui/widgets/add_regions.py
2,615
Creates a new window for user to input which regions to add to scene. Arguments: ---------- main_window: reference to the App's main window palette: main_window's palette, used to style widgets On click or 'Enter' get the regions from the input and call the add_regions method of the main window Define UI's elements Regions Alpha Color Create a button in the window
370
en
0.71245
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import distro import logging import platform from pathlib import Path from typing import ( Union, ) from mozphab import environment from .bmo import BMOAPIError from .config import config from .environment import MOZPHAB_VERSION from .helpers import prompt from .logger import logger from .user import user_data class Telemetry: def __init__(self): """Initiate Glean, load pings and metrics.""" import glean logging.getLogger("glean").setLevel(logging.DEBUG) logger.debug("Initializing Glean...") glean.Glean.initialize( application_id="MozPhab", application_version=MOZPHAB_VERSION, upload_enabled=True, configuration=glean.Configuration(), data_dir=Path(environment.MOZBUILD_PATH) / "telemetry-data", ) self._pings = glean.load_pings(environment.MOZPHAB_MAIN_DIR / "pings.yaml") self._metrics = glean.load_metrics( environment.MOZPHAB_MAIN_DIR / "metrics.yaml" ) @property def environment(self): return self._metrics.mozphab.environment @property def usage(self): return self._metrics.mozphab.usage @property def user(self): return self._metrics.mozphab.user @property def submission(self): return self._metrics.mozphab.submission def _set_os(self): """Collect human readable information about the OS version. For Linux it is setting a distribution name and version. """ system, node, release, version, machine, processor = platform.uname() if system == "Linux": distribution_name, distribution_number, _ = distro.linux_distribution( full_distribution_name=False ) distribution_version = " ".join([distribution_name, distribution_number]) elif system == "Windows": _release, distribution_version, _csd, _ptype = platform.win32_ver() elif system == "Darwin": distribution_version, _versioninfo, _machine = platform.mac_ver() else: distribution_version = release self.environment.distribution_version.set(distribution_version) def _set_python(self): self.environment.python_version.set(platform.python_version()) def set_vcs(self, repo): self.environment.vcs.name.set(repo.vcs) self.environment.vcs.version.set(repo.vcs_version) def submit(self): self._pings.usage.submit() logger.debug("Telemetry submit called.") def set_metrics(self, args): """Sets metrics common to all commands.""" self.usage.command.set(args.command) self._set_os() self._set_python() self.usage.override_switch.set( getattr(args, "force_vcs", False) or getattr(args, "force", False) ) self.usage.command_time.start() self.user.installation.set(user_data.installation_id) self.user.id.set(user_data.user_code) class TelemetryDisabled: """Dummy class that does nothing.""" def __init__(*args, **kwargs): pass def __call__(self, *args, **kwargs): return self def __getattr__(self, *args, **kwargs): return self def update_user_data(): """Update user_data to enable or disable Telemetry. If employment data has been changed Telemetry might be switched on automatically. The opt-in decision is taken for the new employee. Non employees will have an option to enable data collection. """ is_employee_changed = user_data.set_user_data() if not is_employee_changed: return # Switch on Telemetry for employee or ask to opt-in for non-employee if user_data.is_employee: logger.warning( "Enabled collecting MozPhab usage data.\n" "See https://moz-conduit.readthedocs.io/en/latest" "/mozphab-data-collection.html" ) config.telemetry_enabled = True else: # user is new or no longer employee opt_in = ( prompt( "Would you like to allow MozPhab to collect usage data?", ["Yes", "No"], ) == "Yes" ) if opt_in: config.telemetry_enabled = True else: logger.info( "MozPhab usage data collection disabled.\n" "See https://moz-conduit.readthedocs.io/en/latest" "/mozphab-data-collection.html" ) config.telemetry_enabled = False config.write() def configure_telemetry(args): if args.command == "install-certificate": # Collecting data without a certificate is not possible. _Globals.telemetry = TelemetryDisabled() return if args.command == "self-update": # Avoid locking issues on Windows by not loading Glean when we're updating _Globals.telemetry = TelemetryDisabled() return # `user_data` file will remain empty until user calls MozPhab with a command # requiring existence of the Repository. if args.needs_repo: try: update_user_data() except BMOAPIError as err: # Error in retrieving user status. # We quietly allow to work without enabling Telemetry. logger.debug("BMOAPIErrori: %s", err) _Globals.telemetry = TelemetryDisabled() return # We can't call telemetry if user data was never collected. if not config.telemetry_enabled or not user_data.is_data_collected: _Globals.telemetry = TelemetryDisabled() return # Enable telemetry by swapping the telemetry global with a Glean backed object. _Globals.telemetry = Telemetry() telemetry().set_metrics(args) def telemetry(): return _Globals.telemetry class _Globals: """Container for holding globals in a way that can be easily replaced.""" telemetry: Union[Telemetry, TelemetryDisabled] = TelemetryDisabled()
mozphab/telemetry.py
6,235
Dummy class that does nothing. Container for holding globals in a way that can be easily replaced. Initiate Glean, load pings and metrics. Collect human readable information about the OS version. For Linux it is setting a distribution name and version. Sets metrics common to all commands. Update user_data to enable or disable Telemetry. If employment data has been changed Telemetry might be switched on automatically. The opt-in decision is taken for the new employee. Non employees will have an option to enable data collection. This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. Switch on Telemetry for employee or ask to opt-in for non-employee user is new or no longer employee Collecting data without a certificate is not possible. Avoid locking issues on Windows by not loading Glean when we're updating `user_data` file will remain empty until user calls MozPhab with a command requiring existence of the Repository. Error in retrieving user status. We quietly allow to work without enabling Telemetry. We can't call telemetry if user data was never collected. Enable telemetry by swapping the telemetry global with a Glean backed object.
1,294
en
0.897786
import os import numpy as np import pickle import pathlib from random import shuffle, choice def get_info_dataset(dataset_path, update=False): # TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data) storing_data_path = dataset_path + "/info.txt" if update and os.path.exists(dataset_path + "/info.txt"): os.remove(dataset_path + "/info.txt") if os.path.isfile(storing_data_path): with open(storing_data_path, 'rb') as filehandle: data = pickle.load(filehandle) class_info = data['class_info'] ds_info = data['ds_info'] # CHECKS if the paths stored match the DB # TODO: This check just pick 3 elements and check existence, can be improved if not os.path.exists(choice(ds_info['train_paths'])) or not os.path.exists(choice(ds_info['val_paths'])) \ or not os.path.exists(choice(ds_info['test_paths'])): print(f"Dataset paths seem incorrect, " f"you should update the dataset info running '-m DATA -d {dataset_path}") exit() # Shuffle elements else: shuffle(ds_info['train_paths']) shuffle(ds_info['val_paths']) shuffle(ds_info['final_training_paths']) shuffle(ds_info['test_paths']) else: # Create dataset filepaths train_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/train") for file in f if ".png" in file or ".jpg" in file] val_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training/val") for file in f if ".png" in file or ".jpg" in file] final_training_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/training") for file in f if ".png" in file or ".jpg" in file] test_paths = [os.path.join(r, file) for r, d, f in os.walk(dataset_path + "/test") for file in f if ".png" in file or ".jpg" in file] ds_info = {'ds_type': 'images', 'train_paths': train_paths, 'val_paths': val_paths, 'test_paths': test_paths, 'final_training_paths': final_training_paths} temp_class_names = np.array([item.name for item in pathlib.Path(dataset_path + "/training/train").glob('*')]) # Sort class_names to keep same order, which influence training in one-hot encore, over different machines class_names = np.sort(temp_class_names, axis=-1) nclasses = len(class_names) class_info = {"class_names": class_names, "n_classes": nclasses} # GENERAL STATS size_train = len(train_paths) size_val = len(val_paths) size_test = len(test_paths) class_info.update({"train_size": size_train, "val_size": size_val, "test_size": size_test, 'info': {}}) for name in class_names: size_trainf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/train/{}".format(name))]) size_valf = sum([len(files) for r, d, files in os.walk(dataset_path + "/training/val/{}".format(name))]) size_testf = sum([len(files) for r, d, files in os.walk(dataset_path + "/test/{}".format(name))]) class_info['info']["{}".format(name)] = {} class_info['info']["{}".format(name)]['TRAIN'] = size_trainf class_info['info']["{}".format(name)]['VAL'] = size_valf class_info['info']["{}".format(name)]['TEST'] = size_testf class_info['info']["{}".format(name)]['TOT'] = size_testf + size_valf + size_trainf with open(storing_data_path, 'wb') as filehandle: data = {'ds_info': ds_info, 'class_info': class_info} pickle.dump(data, filehandle) return class_info, ds_info
utils/preprocessing_data.py
3,907
TODO: Implements some checks to verify edits to the dataset from last pickle.dump(data) CHECKS if the paths stored match the DB TODO: This check just pick 3 elements and check existence, can be improved Shuffle elements Create dataset filepaths Sort class_names to keep same order, which influence training in one-hot encore, over different machines GENERAL STATS
363
en
0.755581
import json import logging import sys from typing import Any, Callable, Dict, List from dhis2.core.http import BaseHttpRequest from dhis2.core.inventory import HostResolved, Inventory, resolve_one from fhir.resources.bundle import Bundle from .models.svcm import CodeList, SVCMConfig from .svcm_resources import build_bundle log = logging.getLogger(__name__) def get_source(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]: host = resolve_one(config.source.id, inventory) if "dhis2" not in host.type: log.error("Only 'dhis2' source type is currently supported") sys.exit(-1) log.info(f"Creating source from '{host.key}' with base url '{host.baseUrl}'") def fn(): filters = [] # https://docs.dhis2.org/2.35/en/developer/html/webapi_metadata_object_filter.html if config.source.lastUpdated: filters.append(f"lastUpdated:ge:{config.source.lastUpdated}") option_sets_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.optionSets)) option_sets_filter.extend(filters) option_sets = BaseHttpRequest(host).get( "api/optionSets", params={ "fields": "id,code,name,version,translations,options[id,code,name,translations]", "rootJunction": "OR", "filter": option_sets_filter, "paging": False, }, ) categories_filter = list(map(lambda x: f"id:eq:{x}", config.source.filters.categories)) categories_filter.extend(filters) categories = BaseHttpRequest(host).get( "api/categories", params={ "fields": "id,code,name,translations,categoryOptions::rename(options)[id,code,name,translations]", "rootJunction": "OR", "filter": categories_filter, "paging": False, }, ) data = { "optionSets": option_sets.get("optionSets", []), "categories": categories.get("categories", []), } return ( host, data, ) return fn def get_target(config: SVCMConfig, inventory: Inventory) -> Callable[[Any], Any]: id = config.target.id if "log://" == id: log.info("Creating 'log://' target") def target_log(data: Any): log.info("Writing result to stdout") print(json.dumps(data[1].as_json(), indent=2)) return target_log elif "null://" == id: log.info("Creating 'null://' target") def target_null(data: Any): log.info("Doing nothing with result") return target_null host = resolve_one(id, inventory) if "dhis2" in host.type: log.error("'dhis2' target type is not currently supported") sys.exit(-1) log.info(f"Creating target from '{host.key}' with base url '{host.baseUrl}'") def target_push(data: Any): payload: Bundle = data[1] return BaseHttpRequest(host).post("", data=payload.as_json()) return target_push def transform(config: SVCMConfig, data: Any): host: HostResolved = data[0] payload: Dict[str, Any] = data[1] code_lists: List[CodeList] = [] option_sets = payload.get("optionSets", []) categories = payload.get("categories", []) for option_set in option_sets: code_lists.append(CodeList(**option_set)) for category in categories: code_lists.append(CodeList(**category, type="categories")) return ( host, build_bundle(code_lists, host.baseUrl), ) def run(config: SVCMConfig, inventory: Inventory): log.info(f"SVCM job '{config.id}'' starting") source = get_source(config, inventory) target = get_target(config, inventory) data = source() data = transform(config, data) data = target(data) if data: log.info(f"Got response from target system {data}") log.info(f"SVCM job '{config.id}' finished")
dhis2_core/src/dhis2/code_list/svcm.py
4,005
https://docs.dhis2.org/2.35/en/developer/html/webapi_metadata_object_filter.html
80
en
0.513464
#!/usr/bin/env python """ This is a crude script for detecting reference leaks in the C-based cbor2 implementation. It is by no means fool-proof and won't pick up all possible ref leaks, but it is a reasonable "confidence test" that things aren't horribly wrong. The script assumes you're in an environment with objgraph and cbor2 installed. The script outputs a nicely formatted table of the tests run, and the number of "extra" objects that existed after the tests (indicating a ref-leak), or "-" if no extra objects existed. The ideal output is obviously "-" in all rows. """ import sys import objgraph import tracemalloc from datetime import datetime, timezone, timedelta from fractions import Fraction from decimal import Decimal from collections import namedtuple, OrderedDict def import_cbor2(): # Similar hack to that used in tests/conftest to get separate C and Python # implementations import cbor2 import cbor2.types import cbor2.encoder import cbor2.decoder class Module(object): # Mock module class pass py_cbor2 = Module() for source in (cbor2.types, cbor2.encoder, cbor2.decoder): for name in dir(source): setattr(py_cbor2, name, getattr(source, name)) return cbor2, py_cbor2 c_cbor2, py_cbor2 = import_cbor2() UTC = timezone.utc TEST_VALUES = [ # label, kwargs, value ('None', {}, None), ('10e0', {}, 1), ('10e12', {}, 1000000000000), ('10e29', {}, 100000000000000000000000000000), ('-10e0', {}, -1), ('-10e12', {}, -1000000000000), ('-10e29', {}, -100000000000000000000000000000), ('float1', {}, 1.0), ('float2', {}, 3.8), ('str', {}, 'foo'), ('bigstr', {}, 'foobarbaz ' * 1000), ('bytes', {}, b'foo'), ('bigbytes', {}, b'foobarbaz\x00' * 1000), ('datetime', {'timezone': UTC}, datetime(2019, 5, 9, 22, 4, 5, 123456)), ('decimal', {}, Decimal('1.1')), ('fraction', {}, Fraction(1, 5)), ('intlist', {}, [1, 2, 3]), ('bigintlist', {}, [1, 2, 3] * 1000), ('strlist', {}, ['foo', 'bar', 'baz']), ('bigstrlist', {}, ['foo', 'bar', 'baz'] * 1000), ('dict', {}, {'a': 1, 'b': 2, 'c': 3}), ('bigdict', {}, {'a' * i: i for i in range(1000)}), ('set', {}, {1, 2, 3}), ('bigset', {}, set(range(1000))), ('bigdictlist', {}, [{'a' * i: i for i in range(100)}] * 100), ('objectdict', {'timezone': UTC}, {'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}), ('objectdictlist', {'timezone': UTC}, [{'name': 'Foo', 'species': 'cat', 'dob': datetime(2013, 5, 20), 'weight': 4.1}] * 100), ] Leaks = namedtuple('Leaks', ('count', 'comparison')) Tests = namedtuple('Test', ('objgraph', 'malloc')) Result = namedtuple('Result', ('encoding', 'decoding', 'roundtrip')) peak = {} def growth(): return objgraph.growth(limit=None, peak_stats=peak) def test_malloc(op): count = 0 start = datetime.now() # NOTE: Filter pointing to the op() line in the loop below, because we're # only interested in memory allocated by that line. Naturally, if this file # is edited, the lineno parameter below must be adjusted! only_op = tracemalloc.Filter(True, __file__, lineno=102, all_frames=True) tracemalloc.start(10) try: # Perform a pre-run of op so that any one-time memory allocation # (module imports, etc.) don't affect the later diffs op() before = tracemalloc.take_snapshot().filter_traces([only_op]) while True: count += 1 op() if datetime.now() - start > timedelta(seconds=0.2): break after = tracemalloc.take_snapshot().filter_traces([only_op]) diff = after.compare_to(before, 'traceback') diff = [entry for entry in diff if entry.size_diff > 0] return count, diff finally: tracemalloc.stop() def test_objgraph(op): count = 0 start = datetime.now() # See notes above op() growth() while True: count += 1 op() if datetime.now() - start > timedelta(seconds=0.2): break return count, growth() def test(op): return Tests(Leaks(*test_objgraph(op)), Leaks(*test_malloc(op))) def format_leaks(result): if result.objgraph.comparison: return '%d objs (/%d)' % ( sum(leak[-1] for leak in result.objgraph.comparison), result.objgraph.count) elif result.malloc.comparison and ( result.malloc.count < result.malloc.comparison[0].size_diff): # Running the loop always results in *some* memory allocation, but as # long as the bytes allocated are less than the number of loops it's # unlikely to be an actual leak return '%d bytes (/%d)' % ( result.malloc.comparison[0].size_diff, result.malloc.count) else: return '-' def output_table(results): # Build table content head = ('Test', 'Encoding', 'Decoding', 'Round-trip') rows = [head] + [ ( label, format_leaks(result.encoding), format_leaks(result.decoding), format_leaks(result.roundtrip), ) for label, result in results.items() ] # Format table output cols = zip(*rows) col_widths = [max(len(row) for row in col) for col in cols] sep = ''.join(( '+-', '-+-'.join('-' * width for width in col_widths), '-+', )) print(sep) print(''.join(( '| ', ' | '.join( '{value:<{width}}'.format(value=value, width=width) for value, width in zip(head, col_widths) ), ' |', ))) print(sep) for row in rows[1:]: print(''.join(( '| ', ' | '.join( '{value:<{width}}'.format(value=value, width=width) for value, width in zip(row, col_widths) ), ' |', ))) print(sep) print() print("""\ There *will* be false positives in the table above. Ignore leaks involving a tiny number of objects (e.g. 1) or a small number of bytes (e.g. < 8Kb) as such allocations are quite normal. In the case of a ref-leak of an object that can reference others (lists, sets, dicts, or anything with a __dict__), expect to see 100s or 1000s of "objs" leaked. In the case of a ref-leak of a simple object (int, str, bytes, etc.), expect to see a few hundred Kb allocated. If leaks occur across the board, it's likely to be in something universal like dump/load. If it's restricted to a type, check the encoding and decoding methods for that type. """) def main(): results = OrderedDict() sys.stderr.write("Testing") sys.stderr.flush() for name, kwargs, value in TEST_VALUES: encoded = py_cbor2.dumps(value, **kwargs) results[name] = Result( encoding=test(lambda: c_cbor2.dumps(value, **kwargs)), decoding=test(lambda: c_cbor2.loads(encoded)), roundtrip=test(lambda: c_cbor2.loads(c_cbor2.dumps(value, **kwargs))), ) sys.stderr.write(".") sys.stderr.flush() sys.stderr.write("\n") sys.stderr.write("\n") output_table(results) sys.stderr.write("\n") if __name__ == '__main__': main()
scripts/ref_leak_test.py
7,625
This is a crude script for detecting reference leaks in the C-based cbor2 implementation. It is by no means fool-proof and won't pick up all possible ref leaks, but it is a reasonable "confidence test" that things aren't horribly wrong. The script assumes you're in an environment with objgraph and cbor2 installed. The script outputs a nicely formatted table of the tests run, and the number of "extra" objects that existed after the tests (indicating a ref-leak), or "-" if no extra objects existed. The ideal output is obviously "-" in all rows. !/usr/bin/env python Similar hack to that used in tests/conftest to get separate C and Python implementations Mock module class label, kwargs, value NOTE: Filter pointing to the op() line in the loop below, because we're only interested in memory allocated by that line. Naturally, if this file is edited, the lineno parameter below must be adjusted! Perform a pre-run of op so that any one-time memory allocation (module imports, etc.) don't affect the later diffs See notes above Running the loop always results in *some* memory allocation, but as long as the bytes allocated are less than the number of loops it's unlikely to be an actual leak Build table content Format table output
1,248
en
0.908048
# # Copyright 2018 Analytics Zoo Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np import tempfile import os import torch from zoo.chronos.model.forecast.lstm_forecaster import LSTMForecaster from zoo.orca import init_orca_context, stop_orca_context from unittest import TestCase import pytest def create_data(): num_train_samples = 1000 num_val_samples = 400 num_test_samples = 400 input_time_steps = 24 input_feature_dim = 2 output_time_steps = 1 output_feature_dim = 2 def get_x_y(num_samples): x = np.random.rand(num_samples, input_time_steps, input_feature_dim).astype(np.float32) y = x[:, -output_time_steps:, :]*2 + \ np.random.rand(num_samples, output_time_steps, output_feature_dim).astype(np.float32) return x, y train_data = get_x_y(num_train_samples) val_data = get_x_y(num_val_samples) test_data = get_x_y(num_test_samples) return train_data, val_data, test_data class TestChronosModelLSTMForecaster(TestCase): def setUp(self): pass def tearDown(self): pass def test_tcn_forecaster_fit_eva_pred(self): train_data, val_data, test_data = create_data() forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01) train_loss = forecaster.fit(train_data, epochs=2) test_pred = forecaster.predict(test_data[0]) assert test_pred.shape == test_data[1].shape test_mse = forecaster.evaluate(test_data) def test_tcn_forecaster_onnx_methods(self): train_data, val_data, test_data = create_data() forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01) forecaster.fit(train_data, epochs=2) try: import onnx import onnxruntime pred = forecaster.predict(test_data[0]) pred_onnx = forecaster.predict_with_onnx(test_data[0]) np.testing.assert_almost_equal(pred, pred_onnx, decimal=5) mse = forecaster.evaluate(test_data, multioutput="raw_values") mse_onnx = forecaster.evaluate_with_onnx(test_data, multioutput="raw_values") np.testing.assert_almost_equal(mse, mse_onnx, decimal=5) mse = forecaster.evaluate(test_data) mse_onnx = forecaster.evaluate_with_onnx(test_data) np.testing.assert_almost_equal(mse, mse_onnx, decimal=5) except ImportError: pass def test_tcn_forecaster_save_load(self): train_data, val_data, test_data = create_data() forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01) train_mse = forecaster.fit(train_data, epochs=2) with tempfile.TemporaryDirectory() as tmp_dir_name: ckpt_name = os.path.join(tmp_dir_name, "ckpt") test_pred_save = forecaster.predict(test_data[0]) forecaster.save(ckpt_name) forecaster.load(ckpt_name) test_pred_load = forecaster.predict(test_data[0]) np.testing.assert_almost_equal(test_pred_save, test_pred_load) def test_tcn_forecaster_runtime_error(self): train_data, val_data, test_data = create_data() forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01) with pytest.raises(RuntimeError): with tempfile.TemporaryDirectory() as tmp_dir_name: ckpt_name = os.path.join(tmp_dir_name, "ckpt") forecaster.save(ckpt_name) with pytest.raises(RuntimeError): forecaster.predict(test_data[0]) with pytest.raises(RuntimeError): forecaster.evaluate(test_data) def test_tcn_forecaster_shape_error(self): train_data, val_data, test_data = create_data() forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=1, loss="mae", lr=0.01) with pytest.raises(AssertionError): forecaster.fit(train_data, epochs=2) def test_tcn_forecaster_xshard_input(self): train_data, val_data, test_data = create_data() print("original", train_data[0].dtype) init_orca_context(cores=4, memory="2g") from zoo.orca.data import XShards def transform_to_dict(data): return {'x': data[0], 'y': data[1]} def transform_to_dict_x(data): return {'x': data[0]} train_data = XShards.partition(train_data).transform_shard(transform_to_dict) val_data = XShards.partition(val_data).transform_shard(transform_to_dict) test_data = XShards.partition(test_data).transform_shard(transform_to_dict_x) for distributed in [True, False]: forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01, distributed=distributed) forecaster.fit(train_data, epochs=2) distributed_pred = forecaster.predict(test_data) distributed_eval = forecaster.evaluate(val_data) stop_orca_context() def test_tcn_forecaster_distributed(self): train_data, val_data, test_data = create_data() init_orca_context(cores=4, memory="2g") forecaster = LSTMForecaster(past_seq_len=24, input_feature_num=2, output_feature_num=2, loss="mae", lr=0.01, distributed=True) forecaster.fit(train_data, epochs=2) distributed_pred = forecaster.predict(test_data[0]) distributed_eval = forecaster.evaluate(val_data) model = forecaster.get_model() assert isinstance(model, torch.nn.Module) forecaster.to_local() local_pred = forecaster.predict(test_data[0]) local_eval = forecaster.evaluate(val_data) np.testing.assert_almost_equal(distributed_pred, local_pred, decimal=5) try: import onnx import onnxruntime local_pred_onnx = forecaster.predict_with_onnx(test_data[0]) local_eval_onnx = forecaster.evaluate_with_onnx(val_data) np.testing.assert_almost_equal(distributed_pred, local_pred_onnx, decimal=5) except ImportError: pass model = forecaster.get_model() assert isinstance(model, torch.nn.Module) stop_orca_context()
pyzoo/test/zoo/chronos/model/forecast/test_lstm_forecaster.py
8,045
Copyright 2018 Analytics Zoo Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
559
en
0.86347
from pypy.objspace.std.iterobject import W_SeqIterObject from pypy.interpreter.error import OperationError class TestW_IterObject: def body3(self, w_iter): w = self.space.wrap assert self.space.eq_w(self.space.next(w_iter), w(5)) assert self.space.eq_w(self.space.next(w_iter), w(3)) assert self.space.eq_w(self.space.next(w_iter), w(99)) self.body0(w_iter) def body0(self, w_iter): raises(OperationError, self.space.next, w_iter) raises(OperationError, self.space.next, w_iter) def test_iter(self): w = self.space.wrap w_tuple = self.space.newtuple([w(5), w(3), w(99)]) w_iter = W_SeqIterObject(w_tuple) self.body3(w_iter) def test_iter_builtin(self): w = self.space.wrap w_tuple = self.space.newtuple([w(5), w(3), w(99)]) w_iter = self.space.iter(w_tuple) self.body3(w_iter) def test_emptyiter(self): w_list = self.space.newlist([]) w_iter = W_SeqIterObject(w_list) self.body0(w_iter) def test_emptyiter_builtin(self): w_list = self.space.newlist([]) w_iter = self.space.iter(w_list) self.body0(w_iter) class AppTestW_IterObjectApp: def test_user_iter(self): class C(object): def next(self): raise StopIteration def __iter__(self): return self assert list(C()) == [] def test_iter_getitem(self): class C(object): def __getitem__(self, i): return range(2)[i] assert list(C()) == range(2) def test_iter_fail_noseq(self): class C(object): pass raises(TypeError, iter, C()) class AppTest_IterObject(object): def test_no_len_on_list_iter(self): iterable = [1,2,3,4] raises(TypeError, len, iter(iterable)) def test_no_len_on_tuple_iter(self): iterable = (1,2,3,4) raises(TypeError, len, iter(iterable)) def test_no_len_on_deque_iter(self): from _collections import deque iterable = deque([1,2,3,4]) raises(TypeError, len, iter(iterable)) def test_no_len_on_reversed(self): it = reversed("foobar") raises(TypeError, len, it) def test_no_len_on_reversed_seqiter(self): # this one fails on CPython. See http://bugs.python.org/issue3689 it = reversed([5,6,7]) raises(TypeError, len, it) def test_no_len_on_UserList_iter_reversed(self): import sys, _abcoll sys.modules['collections'] = _abcoll from UserList import UserList iterable = UserList([1,2,3,4]) raises(TypeError, len, iter(iterable)) raises(TypeError, len, reversed(iterable)) del sys.modules['collections'] def test_reversed_frees_empty(self): import gc for typ in list, unicode: free = [False] class U(typ): def __del__(self): free[0] = True r = reversed(U()) raises(StopIteration, next, r) gc.collect(); gc.collect(); gc.collect() assert free[0] def test_reversed_mutation(self): n = 10 d = range(n) it = reversed(d) next(it) next(it) assert it.__length_hint__() == n-2 d.append(n) assert it.__length_hint__() == n-2 d[1:] = [] assert it.__length_hint__() == 0 assert list(it) == [] d.extend(xrange(20)) assert it.__length_hint__() == 0 def test_no_len_on_set_iter(self): iterable = set([1,2,3,4]) raises(TypeError, len, iter(iterable)) def test_no_len_on_xrange(self): iterable = xrange(10) raises(TypeError, len, iter(iterable)) def test_contains(self): logger = [] class Foo(object): def __init__(self, value, name=None): self.value = value self.name = name or value def __repr__(self): return '<Foo %s>' % self.name def __eq__(self, other): logger.append((self, other)) return self.value == other.value foo1, foo2, foo3 = Foo(1), Foo(2), Foo(3) foo42 = Foo(42) foo_list = [foo1, foo2, foo3] foo42 in (x for x in foo_list) logger_copy = logger[:] # prevent re-evaluation during pytest error print assert logger_copy == [(foo42, foo1), (foo42, foo2), (foo42, foo3)] del logger[:] foo2_bis = Foo(2, '2 bis') foo2_bis in (x for x in foo_list) logger_copy = logger[:] # prevent re-evaluation during pytest error print assert logger_copy == [(foo2_bis, foo1), (foo2_bis, foo2)]
idea2/pypyjs-3/deps/pypy/pypy/objspace/std/test/test_iterobject.py
4,860
this one fails on CPython. See http://bugs.python.org/issue3689 prevent re-evaluation during pytest error print prevent re-evaluation during pytest error print
160
en
0.805747
# -*- coding: utf-8 -*- """ """ import argparse import os import sys if __name__ == '__main__': pass
audy/mix/noise.py
110
-*- coding: utf-8 -*-
21
en
0.767281
import numpy as np # Thinning morphological operation applied using lookup tables. # We convert the 3x3 neighbourhood surrounding a pixel to an index # used to lookup the output in a lookup table. # Bit masks for each neighbour # 1 2 4 # 8 16 32 # 64 128 256 NEIGH_MASK_EAST = 32 NEIGH_MASK_NORTH_EAST = 4 NEIGH_MASK_NORTH = 2 NEIGH_MASK_NORTH_WEST = 1 NEIGH_MASK_WEST = 8 NEIGH_MASK_SOUTH_WEST = 64 NEIGH_MASK_SOUTH = 128 NEIGH_MASK_SOUTH_EAST = 256 NEIGH_MASK_CENTRE = 16 # Masks in a list # MASKS[0] = centre # MASKS[1..8] = start from east, counter-clockwise MASKS = [NEIGH_MASK_CENTRE, NEIGH_MASK_EAST, NEIGH_MASK_NORTH_EAST, NEIGH_MASK_NORTH, NEIGH_MASK_NORTH_WEST, NEIGH_MASK_WEST, NEIGH_MASK_SOUTH_WEST, NEIGH_MASK_SOUTH, NEIGH_MASK_SOUTH_EAST, ] # Constant listing all indices _LUT_INDS = np.arange(512) def binary_image_to_lut_indices(x): """ Convert a binary image to an index image that can be used with a lookup table to perform morphological operations. Non-zero elements in the image are interpreted as 1, zero elements as 0 :param x: a 2D NumPy array. :return: a 2D NumPy array, same shape as x """ if x.ndim != 2: raise ValueError('x should have 2 dimensions, not {}'.format(x.ndim)) # If the dtype of x is not bool, convert if x.dtype != np.bool: x = x != 0 # Add x = np.pad(x, [(1, 1), (1, 1)], mode='constant') # Convert to LUT indices lut_indices = x[:-2, :-2] * NEIGH_MASK_NORTH_WEST + \ x[:-2, 1:-1] * NEIGH_MASK_NORTH + \ x[:-2, 2:] * NEIGH_MASK_NORTH_EAST + \ x[1:-1, :-2] * NEIGH_MASK_WEST + \ x[1:-1, 1:-1] * NEIGH_MASK_CENTRE + \ x[1:-1, 2:] * NEIGH_MASK_EAST + \ x[2:, :-2] * NEIGH_MASK_SOUTH_WEST + \ x[2:, 1:-1] * NEIGH_MASK_SOUTH + \ x[2:, 2:] * NEIGH_MASK_SOUTH_EAST return lut_indices.astype(np.int32) def apply_lut(x, lut): """ Perform a morphological operation on the binary image x using the supplied lookup table :param x: :param lut: :return: """ if lut.ndim != 1: raise ValueError('lut should have 1 dimension, not {}'.format(lut.ndim)) if lut.shape[0] != 512: raise ValueError('lut should have 512 entries, not {}'.format(lut.shape[0])) lut_indices = binary_image_to_lut_indices(x) return lut[lut_indices] def identity_lut(): """ Create identity lookup tablef :return: """ lut = np.zeros((512,), dtype=bool) inds = np.arange(512) lut[(inds & NEIGH_MASK_CENTRE) != 0] = True return lut def _lut_mutate_mask(lut): """ Get a mask that shows which neighbourhood shapes result in changes to the image :param lut: lookup table :return: mask indicating which lookup indices result in changes """ return lut != identity_lut() def lut_masks_zero(neigh): """ Create a LUT index mask for which the specified neighbour is 0 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask """ if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) == 0 def lut_masks_one(neigh): """ Create a LUT index mask for which the specified neighbour is 1 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask """ if neigh > 8: neigh -= 8 return (_LUT_INDS & MASKS[neigh]) != 0 def _thin_cond_g1(): """ Thinning morphological operation; condition G1 :return: a LUT index mask """ b = np.zeros(512, dtype=int) for i in range(1, 5): b += lut_masks_zero(2 * i - 1) & (lut_masks_one(2 * i) | lut_masks_one(2 * i + 1)) return b == 1 def _thin_cond_g2(): """ Thinning morphological operation; condition G2 :return: a LUT index mask """ n1 = np.zeros(512, dtype=int) n2 = np.zeros(512, dtype=int) for k in range(1, 5): n1 += (lut_masks_one(2 * k - 1) | lut_masks_one(2 * k)) n2 += (lut_masks_one(2 * k) | lut_masks_one(2 * k + 1)) m = np.minimum(n1, n2) return (m >= 2) & (m <= 3) def _thin_cond_g3(): """ Thinning morphological operation; condition G3 :return: a LUT index mask """ return ((lut_masks_one(2) | lut_masks_one(3) | lut_masks_zero(8)) & lut_masks_one(1)) == 0 def _thin_cond_g3_prime(): """ Thinning morphological operation; condition G3' :return: a LUT index mask """ return ((lut_masks_one(6) | lut_masks_one(7) | lut_masks_zero(4)) & lut_masks_one(5)) == 0 def _thin_iter_1_lut(): """ Thinning morphological operation; lookup table for iteration 1 :return: lookup table """ lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3() lut[cond] = False return lut def _thin_iter_2_lut(): """ Thinning morphological operation; lookup table for iteration 2 :return: lookup table """ lut = identity_lut() cond = _thin_cond_g1() & _thin_cond_g2() & _thin_cond_g3_prime() lut[cond] = False return lut def binary_thin(x, max_iter=None): """ Binary thinning morphological operation :param x: a binary image, or an image that is to be converted to a binary image :param max_iter: maximum number of iterations; default is `None` that results in an infinite number of iterations (note that `binary_thin` will automatically terminate when no more changes occur) :return: """ thin1 = _thin_iter_1_lut() thin2 = _thin_iter_2_lut() thin1_mut = _lut_mutate_mask(thin1) thin2_mut = _lut_mutate_mask(thin2) iter_count = 0 while max_iter is None or iter_count < max_iter: # Iter 1 lut_indices = binary_image_to_lut_indices(x) x_mut = thin1_mut[lut_indices] if x_mut.sum() == 0: break x = thin1[lut_indices] # Iter 2 lut_indices = binary_image_to_lut_indices(x) x_mut = thin2_mut[lut_indices] if x_mut.sum() == 0: break x = thin2[lut_indices] iter_count += 1 return x
Benchmarking/bsds500/bsds/thin.py
6,261
Get a mask that shows which neighbourhood shapes result in changes to the image :param lut: lookup table :return: mask indicating which lookup indices result in changes Thinning morphological operation; condition G1 :return: a LUT index mask Thinning morphological operation; condition G2 :return: a LUT index mask Thinning morphological operation; condition G3 :return: a LUT index mask Thinning morphological operation; condition G3' :return: a LUT index mask Thinning morphological operation; lookup table for iteration 1 :return: lookup table Thinning morphological operation; lookup table for iteration 2 :return: lookup table Perform a morphological operation on the binary image x using the supplied lookup table :param x: :param lut: :return: Convert a binary image to an index image that can be used with a lookup table to perform morphological operations. Non-zero elements in the image are interpreted as 1, zero elements as 0 :param x: a 2D NumPy array. :return: a 2D NumPy array, same shape as x Binary thinning morphological operation :param x: a binary image, or an image that is to be converted to a binary image :param max_iter: maximum number of iterations; default is `None` that results in an infinite number of iterations (note that `binary_thin` will automatically terminate when no more changes occur) :return: Create identity lookup tablef :return: Create a LUT index mask for which the specified neighbour is 1 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask Create a LUT index mask for which the specified neighbour is 0 :param neigh: neighbour index; counter-clockwise from 1 staring at the eastern neighbour :return: a LUT index mask Thinning morphological operation applied using lookup tables. We convert the 3x3 neighbourhood surrounding a pixel to an index used to lookup the output in a lookup table. Bit masks for each neighbour 1 2 4 8 16 32 64 128 256 Masks in a list MASKS[0] = centre MASKS[1..8] = start from east, counter-clockwise Constant listing all indices If the dtype of x is not bool, convert Add Convert to LUT indices Iter 1 Iter 2
2,161
en
0.745834
#!/usr/bin/env python3 """ Usage: program | ./memcheck.py """ import fileinput import pdb with fileinput.input() as f: data = "".join(f) s = {} for l in data.splitlines(): if "malloc:" in l: c = l.split(":") s[c[-1].strip()] = l # print("malloc:%s" %c[-1].strip()) if "free:" in l: c = l.split(":") del s[c[-1].strip()] # print("free:%s" %c[-1].strip()) # print("size: %d" % len(s)) print("以下内存申请可能未释放,请检查:") for l in s: print(s[l]) else: print("没有需要处理的")
py/memcheck.py
620
Usage: program | ./memcheck.py !/usr/bin/env python3 print("malloc:%s" %c[-1].strip()) print("free:%s" %c[-1].strip()) print("size: %d" % len(s))
146
en
0.296118
import random class Card: def __init__(self, suit, rank): self.suit = suit self.rank = rank def __str__(self): return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}" class Hand: def __init__(self): self.cards = [] # start with empty list self.value = 0 self.aces = 0 def adjust_for_ace(self): while self.value > 21 and self.aces: self.value -= 10 self.aces -= 1 def add_card(self, card): self.cards.append(card) self.value += BlackJack.values[card.rank] if card.rank == 'Ace': self.aces += 1 def __str__(self): return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n" class Deck: def __init__(self, card_game): self.game = card_game # create deck with all 52 cards self.cards = list() for suit in self.game.suits: for rank in self.game.ranks: self.cards.append(Card(suit, rank)) def shuffle(self): random.shuffle(self.cards) def deal_card(self): return self.cards.pop() def __str__(self): return f"{[x for x in self.cards]}" class Chips: def __init__(self, total=100): self.total = total self.bet = 0 def win_bet(self): self.total += self.bet self.bet = 0 def lose_bet(self): self.total -= self.bet self.bet = 0 def make_bet(self, bet): if bet <= self.total: self.bet = bet else: raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})") def __str__(self): return f"Total: {self.total}\nCurrent Bet:{self.bet}\n" class Player: def __init__(self, name): self.name = name self.wins = 0 self.lost_games = 0 self.chips = Chips() def __str__(self): return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n" class BlackJack: suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs') ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace') values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10, 'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11} def __init__(self, player): self.player = player self.deck = Deck(self) self.playing = False def greeting(self): print("WELCOME TO BLACKJACK!") def take_bet(self): while True: try: # Ask the Player for their bet bet = int(input("Please put your bet: ")) # Make sure that the Player's bet does not exceed their available chips self.player.chips.make_bet(bet) break except TypeError: print("Invalid input. Please try again") except ValueError as exc: print(f"{exc} Please try again") def hit(self, hand): cd = self.deck.deal_card() # print(f"Deal Card: {cd}") hand.add_card(cd) hand.adjust_for_ace() def hit_or_stand(self, hand): while True: print(f"{self.player.name}: current {hand.value}") action = input("Hit or Stand? Enter 'h' or 's': ") if action[0].lower() == 's': print("STAY\n") self.playing = False elif action[0].lower() == 'h': print("HIT\n") self.hit(hand) else: print(f"Sorry, I do not understand your choice '{action}'. Please try again") continue break def player_busts(self, p_hand, d_hand): print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!") self.player.chips.lose_bet() self.player.lost_games += 1 def player_wins(self, p_hand, d_hand): print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ") self.player.chips.win_bet() self.player.wins += 1 def dealer_busts(self, p_hand, d_hand): print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!") self.player.chips.win_bet() self.player.wins += 1 def dealer_wins(self, p_hand, d_hand): print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS") self.player.chips.lose_bet() self.player.lost_games += 1 def push(self, p_hand, d_hand): print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!") def show_some(self, p_hand, d_hand): # Show only one of the Dealer's cards, the other remains hidden print(f"Dealer's card (one hidden): {d_hand.cards[0]}") # Show both of the Player's cards print(f"{self.player.name}'s Cards:") for card in p_hand.cards: print(card) print(f"total= {p_hand.value}") def show_all_cards(self, p_hand, d_hand): # Show both of the Player's cards print(f"{self.player.name}'s Cards:") for card in p_hand.cards: print(card) print(f"total= {p_hand.value}") # Show both of the Player's cards print(f"Dealer's Cards:") for card in d_hand.cards: print(card) print(f"total= {d_hand.value}") def play(self): """ # 1. Create a deck of 52 cards # 2. Shuffle the deck # 3. Ask the Player for their bet # 4. Make sure that the Player's bet does not exceed their available chips # 5. Deal two cards to the Dealer and two cards to the Player # 6. Show only one of the Dealer's cards, the other remains hidden # 7. Show both of the Player's cards # 8. Ask the Player if they wish to Hit, and take another card # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. # 10. If a Player Stands, play the Dealer's hand. # The dealer will always Hit until the Dealer's value meets or exceeds 17 # 11. Determine the winner and adjust the Player's chips accordingly # 12. Ask the Player if they'd like to play again """ print("--NEW GAME---") self.playing = True self.deck.shuffle() dealer_hand = Hand() player_hand = Hand() # Deal two cards to the Dealer and two cards to the Player player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) player_hand.add_card(self.deck.deal_card()) dealer_hand.add_card(self.deck.deal_card()) self.take_bet() # show cards, but keep one dealer card hidden self.show_some(player_hand, dealer_hand) while self.playing: # Ask the Player if they wish to Hit, and take another card # If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. self.hit_or_stand(player_hand) self.show_some(player_hand, dealer_hand) if player_hand.value > 21: # player busts - lost his bet self.player_busts(player_hand, dealer_hand) break # If Player has not busted if player_hand.value <= 21: # The dealer will always Hit until the Dealer's value meets or exceeds 17 while dealer_hand.value < 17: self.hit(dealer_hand) # Determine for the winner - show all cards self.show_all_cards(player_hand, dealer_hand) # Determine the winner and adjust the Player's chips accordingly if dealer_hand.value > 21: self.dealer_busts(player_hand, dealer_hand) elif player_hand.value > dealer_hand.value: self.player_wins(player_hand, dealer_hand) elif player_hand.value < dealer_hand.value: self.dealer_wins(player_hand, dealer_hand) else: self.push(player_hand, dealer_hand) if __name__ == "__main__": game_on = True # Play a new game of BlackJack with Player Daniela player = Player('Daniela') game = BlackJack(player) game.greeting() while game_on: game.play() print(f"GAME DONE.\nGame Stats:\n\n{player}") # Ask the Player if they'd like to play again if input("Would you like another game? y/n: ") != 'y': game_on = False
BlackJack.py
8,624
# 1. Create a deck of 52 cards # 2. Shuffle the deck # 3. Ask the Player for their bet # 4. Make sure that the Player's bet does not exceed their available chips # 5. Deal two cards to the Dealer and two cards to the Player # 6. Show only one of the Dealer's cards, the other remains hidden # 7. Show both of the Player's cards # 8. Ask the Player if they wish to Hit, and take another card # 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. # 10. If a Player Stands, play the Dealer's hand. # The dealer will always Hit until the Dealer's value meets or exceeds 17 # 11. Determine the winner and adjust the Player's chips accordingly # 12. Ask the Player if they'd like to play again start with empty list create deck with all 52 cards Ask the Player for their bet Make sure that the Player's bet does not exceed their available chips print(f"Deal Card: {cd}") Show only one of the Dealer's cards, the other remains hidden Show both of the Player's cards Show both of the Player's cards Show both of the Player's cards Deal two cards to the Dealer and two cards to the Player show cards, but keep one dealer card hidden Ask the Player if they wish to Hit, and take another card If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again. player busts - lost his bet If Player has not busted The dealer will always Hit until the Dealer's value meets or exceeds 17 Determine for the winner - show all cards Determine the winner and adjust the Player's chips accordingly Play a new game of BlackJack with Player Daniela Ask the Player if they'd like to play again
1,624
en
0.964548
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import io from queue import Queue import time import unittest import unittest.mock as mock from ossdbtoolsservice.hosting.json_rpc_server import ( JSONRPCServer, IncomingMessageConfiguration, NotificationContext, RequestContext ) from ossdbtoolsservice.hosting.json_message import JSONRPCMessage, JSONRPCMessageType from ossdbtoolsservice.hosting.json_reader import JSONRPCReader from ossdbtoolsservice.hosting.json_writer import JSONRPCWriter import tests.utils as utils class JSONRPCServerTests(unittest.TestCase): def test_handler_init(self): # If: I create a Handler class handler = JSONRPCServer.Handler('class', 'handler') # Then: The values should be available self.assertEqual(handler.class_, 'class') self.assertEqual(handler.handler, 'handler') def test_server_init(self): # Setup: Create objects to init the server with input_stream = io.BytesIO() output_stream = io.BytesIO() logger = utils.get_mock_logger() # If: I create a server server = JSONRPCServer(input_stream, output_stream, logger=logger) # Then: The state should be initialized as defined self.assertIsInstance(server.writer, JSONRPCWriter) self.assertIsInstance(server.reader, JSONRPCReader) self.assertIs(server._logger, logger) self.assertEqual(server._version, '0') self.assertFalse(server._stop_requested) # ... The output queue should be empty self.assertIsInstance(server._output_queue, Queue) self.assertTrue(server._output_queue.all_tasks_done) self.assertDictEqual(server._notification_handlers, {}) self.assertListEqual(server._shutdown_handlers, []) # ... The threads shouldn't be assigned yet self.assertIsNone(server._output_consumer) self.assertIsNone(server._input_consumer) # ... The built-in handlers should be assigned self.assertTrue('echo' in server._request_handlers) self.assertIsNotNone(server._request_handlers['echo']) self.assertTrue('version' in server._request_handlers) self.assertIsNotNone(server._request_handlers['version'].handler) self.assertTrue('shutdown' in server._request_handlers) self.assertIsNotNone(server._request_handlers['shutdown'].handler) self.assertTrue('exit' in server._request_handlers) self.assertIsNotNone(server._request_handlers['exit'].handler) def test_add_shutdown_handler(self): # If: I add a shutdown handler handler = mock.MagicMock() server = JSONRPCServer(None, None) server.add_shutdown_handler(handler) # Then: The shutdown handlers should contain the handler self.assertTrue(handler in server._shutdown_handlers) def test_set_request_handler(self): # If: I add a request handler params = IncomingMessageConfiguration('test/test', int) handler = mock.MagicMock() server = JSONRPCServer(None, None) server.set_request_handler(params, handler) # Then: The request handler should contain the handler self.assertTrue(params.method in server._request_handlers) self.assertIsNotNone(server._request_handlers[params.method]) self.assertIs(server._request_handlers[params.method].class_, int) self.assertIs(server._request_handlers[params.method].handler, handler) def test_set_notification_handler(self): # If: I add a notification handler params = IncomingMessageConfiguration('test/test', int) handler = mock.MagicMock() server = JSONRPCServer(None, None) server.set_notification_handler(params, handler) # Then: The request handler should contain the handler self.assertTrue(params.method in server._notification_handlers) self.assertIsNotNone(server._notification_handlers[params.method]) self.assertIs(server._notification_handlers[params.method].class_, int) self.assertIs(server._notification_handlers[params.method].handler, handler) # BUILT-IN HANDLER TESTS ############################################### @staticmethod def test_echo_request(): # If: I send a request for an echo rc = utils.MockRequestContext() params = {} JSONRPCServer._handle_echo_request(rc, params) # Then: The params should have been echoed back rc.send_response.assert_called_once_with(params) rc.send_notification.assert_not_called() rc.send_error.assert_not_called() @staticmethod def test_version_request(): # If: I send a request for the version rc = utils.MockRequestContext() server = JSONRPCServer(None, None) server._handle_version_request(rc, None) # Then: I should get a response rc.send_response.assert_called_once_with(server._version) rc.send_error.assert_not_called() rc.send_notification.assert_not_called() def test_shutdown_request(self): # If: I send a request for the service to shutdown rc = utils.MockRequestContext() handler = mock.MagicMock() server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server.add_shutdown_handler(handler) server._handle_shutdown_request(rc, None) # Then: # ... The server should be shutting down self.assertTrue(server._stop_requested) # ... The shutdown handler should be called handler.assert_called_once() # RequestContext TESTS ################################################# def test_request_context_init_test(self): # If: I create a request context queue = Queue() message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}}) rc = RequestContext(message, queue) # Then: The internal state should be set up correctly self.assertIs(rc._message, message) self.assertIs(rc._queue, queue) def test_request_context_send_response(self): # Setup: Create a request context queue = Queue() in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}}) rc = RequestContext(in_message, queue) # If: I send a response via the response handler params = {} rc.send_response(params) # Then: # ... There should be a message in the outbound queue self.assertTrue(queue.not_empty) out_message = queue.get_nowait() self.assertIsInstance(out_message, JSONRPCMessage) # .. The message must be a response with the proper id self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseSuccess) self.assertEqual(out_message.message_id, '123') self.assertEqual(out_message.message_result, params) def test_request_context_send_notification(self): # Setup: Create a request context queue = Queue() in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}}) rc = RequestContext(in_message, queue) # If: I send a notification params = {} method = 'test/test' rc.send_notification(method, params) # Then: # ... There should be a message in the outbound queue self.assertTrue(queue.not_empty) out_message = queue.get_nowait() self.assertIsInstance(out_message, JSONRPCMessage) # .. The message must be a response with the proper id self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification) self.assertIsNone(out_message.message_id) self.assertEqual(out_message.message_params, params) def test_request_context_send_error(self): # Setup: Create a request context queue = Queue() in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}}) rc = RequestContext(in_message, queue) # If: I send an error params = {} rc.send_error(params) # Then: # ... There should be a message in the outbound queue self.assertTrue(queue.not_empty) out_message = queue.get_nowait() self.assertIsInstance(out_message, JSONRPCMessage) # .. The message must be a response with the proper id self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseError) self.assertEqual(out_message.message_id, '123') self.assertIsInstance(out_message.message_error, dict) self.assertIs(out_message.message_error['message'], params) # DISPATCHER TESTS ##################################################### @staticmethod def test_dispatch_response_success(): # TODO: Replace with robust logic once response routing is implemented # If: I dispatch a response message message = JSONRPCMessage.create_response('123', {}) server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server._dispatch_message(message) # Then: Nothing should have happened @staticmethod def test_dispatch_response_error(): # TODO: Replace with robust logic once error routing is implemented # If: I dispatch an error message message = JSONRPCMessage.create_error('123', 0, message='', data={}) server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server._dispatch_message(message) # Then: Nothing should have happened @staticmethod def test_dispatch_invalid(): # If: I dispatch an invalid message message = JSONRPCMessage('invalidType') server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server._dispatch_message(message) # Then: Nothing should have happened @staticmethod def test_dispatch_request_no_handler(): # If: I dispatch a message that has no handler logger = utils.get_mock_logger() message = JSONRPCMessage.create_request('123', 'non_existent', {}) server = JSONRPCServer(None, None, logger=logger) server._dispatch_message(message) # Then: # ... Nothing should have happened # TODO: Capture that an error was sent # ... A warning should have been logged logger.warn.assert_called_once() def test_dispatch_request_none_class(self): # Setup: Create a server with a single handler that has none for the deserialization class config = IncomingMessageConfiguration('test/test', None) handler = mock.MagicMock() server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server.set_request_handler(config, handler) # If: I dispatch a message that has none set for the deserialization class params = {} message = JSONRPCMessage.create_request('123', 'test/test', params) server._dispatch_message(message) # Then: # ... The handler should have been called handler.assert_called_once() # ... The parameters to the handler should have been a request context and params self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext) self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue) self.assertIs(handler.mock_calls[0][1][0]._message, message) self.assertIs(handler.mock_calls[0][1][1], params) def test_dispatch_request_normal(self): # Setup: Create a server with a single handler that has none for the deserialization class config = IncomingMessageConfiguration('test/test', _TestParams) handler = mock.MagicMock() server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server.set_request_handler(config, handler) # If: I dispatch a message that has none set for the deserialization class params = {} message = JSONRPCMessage.create_request('123', 'test/test', params) server._dispatch_message(message) # Then: # ... The handler should have been called handler.assert_called_once() # ... The parameters to the handler should have been a request context and params self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext) self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue) self.assertIs(handler.mock_calls[0][1][0]._message, message) self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams) @staticmethod def test_dispatch_notification_no_handler(): # If: I dispatch a message that has no handler logger = utils.get_mock_logger() message = JSONRPCMessage.create_notification('non_existent', {}) server = JSONRPCServer(None, None, logger=logger) server._dispatch_message(message) # Then: # ... Nothing should have happened # TODO: Capture that an error was sent # ... A warning should have been logged logger.warn.assert_called_once() def test_dispatch_notification_none_class(self): # Setup: Create a server with a single handler that has none for the deserialization class config = IncomingMessageConfiguration('test/test', None) handler = mock.MagicMock() server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server.set_notification_handler(config, handler) # If: I dispatch a message that has none set for the deserialization class params = {} message = JSONRPCMessage.create_notification('test/test', params) server._dispatch_message(message) # Then: # ... The handler should have been called handler.assert_called_once() # ... The parameters to the handler should have been a request context and params self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext) self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue) self.assertIs(handler.mock_calls[0][1][1], params) def test_dispatch_notification_normal(self): # Setup: Create a server with a single handler that has none for the deserialization class config = IncomingMessageConfiguration('test/test', _TestParams) handler = mock.MagicMock() server = JSONRPCServer(None, None, logger=utils.get_mock_logger()) server.set_notification_handler(config, handler) # If: I dispatch a message that has none set for the deserialization class params = {} message = JSONRPCMessage.create_notification('test/test', params) server._dispatch_message(message) # Then: # ... The handler should have been called handler.assert_called_once() # ... The parameters to the handler should have been a request context and params self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext) self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue) self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams) # RequestContext TESTS ################################################# def test_notification_context_init_test(self): # If: I create a notification context queue = Queue() nc = NotificationContext(queue) # Then: The internal state should be set up correctly self.assertIs(nc._queue, queue) def test_notification_context_send(self): # Setup: Create a request context queue = Queue() nc = NotificationContext(queue) # If: I send a response via the response handler method = 'test/test' params = {} nc.send_notification(method, params) # Then: # ... There should be a message in the outbound queue self.assertTrue(queue.not_empty) out_message = queue.get_nowait() self.assertIsInstance(out_message, JSONRPCMessage) # .. The message must be a response with the proper id self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification) self.assertIsNone(out_message.message_id) self.assertEqual(out_message.message_params, params) self.assertEqual(out_message.message_method, method) # END-TO-END TESTS ##################################################### def test_request_enqueued(self): # Setup: Create empty io streams input_stream = io.BytesIO() output_stream = io.BytesIO() # If: I submit an outbound request test_client = JSONRPCServer(input_stream, output_stream) test_client.send_request('test/test', {'test': 'test'}) # Then: # ... There should be one request in the outbound queue request = test_client._output_queue.get() # ... The queued message should match the request we sent self.assertEqual(request.message_method, 'test/test') self.assertDictEqual(request.message_params, {'test': 'test'}) def test_notification_enqueued(self): # Setup: Create empty io streams input_stream = io.BytesIO() output_stream = io.BytesIO() # If: I submit an outbound request test_client = JSONRPCServer(input_stream, output_stream) test_client.send_notification('test/test', {'test': 'test'}) # Then: # ... There should be one request in the outbound queue request = test_client._output_queue.get() # ... The queued message should match the request we sent self.assertEqual(request.message_method, 'test/test') self.assertDictEqual(request.message_params, {'test': 'test'}) def test_reads_message(self): # Setup: # ... Create an input stream with a single message input_stream = io.BytesIO(b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}') output_stream = io.BytesIO() # ... Create a server that uses the input and output streams server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger()) # ... Patch the server to not dispatch a message dispatch_mock = mock.MagicMock() server._dispatch_message = dispatch_mock # If: I start the server, run it for a bit, and stop it # TODO: Remove explicit sleep and add spin-locks server.start() time.sleep(1) server.stop() server.wait_for_exit() # Then: The dispatch method should have been called expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}}) dispatch_mock.assert_called_once() self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary) # Teardown: All background threads should be shut down. self.assertFalse(server._input_consumer.isAlive()) self.assertFalse(server._output_consumer.isAlive()) def test_read_multiple_messages(self): # Setup: # ... Create an input stream with two messages test_bytes = b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}' input_stream = io.BytesIO(test_bytes + test_bytes) output_stream = io.BytesIO() # ... Create a server that uses the input and output streams server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger()) # ... Patch the server to not dispatch a message dispatch_mock = mock.MagicMock() server._dispatch_message = dispatch_mock # If: I start the server, run it for a bit, and stop it server.start() time.sleep(1) server.stop() server.wait_for_exit() # Then: The dispatch method should have been called twice expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}}) self.assertEqual(len(dispatch_mock.mock_calls), 2) self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary) self.assertDictEqual(dispatch_mock.mock_calls[1][1][0].dictionary, expected_output.dictionary) # Teardown: All background threads should be shut down. self.assertFalse(server._input_consumer.isAlive()) self.assertFalse(server._output_consumer.isAlive()) class _TestParams: @classmethod def from_dict(cls, dictionary): return _TestParams() def __init__(self): pass if __name__ == '__main__': unittest.main()
tests/hosting/test_server.py
20,950
-------------------------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------------------------- If: I create a Handler class Then: The values should be available Setup: Create objects to init the server with If: I create a server Then: The state should be initialized as defined ... The output queue should be empty ... The threads shouldn't be assigned yet ... The built-in handlers should be assigned If: I add a shutdown handler Then: The shutdown handlers should contain the handler If: I add a request handler Then: The request handler should contain the handler If: I add a notification handler Then: The request handler should contain the handler BUILT-IN HANDLER TESTS If: I send a request for an echo Then: The params should have been echoed back If: I send a request for the version Then: I should get a response If: I send a request for the service to shutdown Then: ... The server should be shutting down ... The shutdown handler should be called RequestContext TESTS If: I create a request context Then: The internal state should be set up correctly Setup: Create a request context If: I send a response via the response handler Then: ... There should be a message in the outbound queue .. The message must be a response with the proper id Setup: Create a request context If: I send a notification Then: ... There should be a message in the outbound queue .. The message must be a response with the proper id Setup: Create a request context If: I send an error Then: ... There should be a message in the outbound queue .. The message must be a response with the proper id DISPATCHER TESTS TODO: Replace with robust logic once response routing is implemented If: I dispatch a response message Then: Nothing should have happened TODO: Replace with robust logic once error routing is implemented If: I dispatch an error message Then: Nothing should have happened If: I dispatch an invalid message Then: Nothing should have happened If: I dispatch a message that has no handler Then: ... Nothing should have happened TODO: Capture that an error was sent ... A warning should have been logged Setup: Create a server with a single handler that has none for the deserialization class If: I dispatch a message that has none set for the deserialization class Then: ... The handler should have been called ... The parameters to the handler should have been a request context and params Setup: Create a server with a single handler that has none for the deserialization class If: I dispatch a message that has none set for the deserialization class Then: ... The handler should have been called ... The parameters to the handler should have been a request context and params If: I dispatch a message that has no handler Then: ... Nothing should have happened TODO: Capture that an error was sent ... A warning should have been logged Setup: Create a server with a single handler that has none for the deserialization class If: I dispatch a message that has none set for the deserialization class Then: ... The handler should have been called ... The parameters to the handler should have been a request context and params Setup: Create a server with a single handler that has none for the deserialization class If: I dispatch a message that has none set for the deserialization class Then: ... The handler should have been called ... The parameters to the handler should have been a request context and params RequestContext TESTS If: I create a notification context Then: The internal state should be set up correctly Setup: Create a request context If: I send a response via the response handler Then: ... There should be a message in the outbound queue .. The message must be a response with the proper id END-TO-END TESTS Setup: Create empty io streams If: I submit an outbound request Then: ... There should be one request in the outbound queue ... The queued message should match the request we sent Setup: Create empty io streams If: I submit an outbound request Then: ... There should be one request in the outbound queue ... The queued message should match the request we sent Setup: ... Create an input stream with a single message ... Create a server that uses the input and output streams ... Patch the server to not dispatch a message If: I start the server, run it for a bit, and stop it TODO: Remove explicit sleep and add spin-locks Then: The dispatch method should have been called Teardown: All background threads should be shut down. Setup: ... Create an input stream with two messages ... Create a server that uses the input and output streams ... Patch the server to not dispatch a message If: I start the server, run it for a bit, and stop it Then: The dispatch method should have been called twice Teardown: All background threads should be shut down.
5,022
en
0.80631
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from ... import _utilities, _tables from . import outputs from ._inputs import * __all__ = ['Account'] class Account(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, account_name: Optional[pulumi.Input[str]] = None, active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None, location: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None, __name__=None, __opts__=None): """ NetApp account resource :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: The name of the NetApp account :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() __props__['account_name'] = account_name __props__['active_directories'] = active_directories __props__['location'] = location if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__['resource_group_name'] = resource_group_name __props__['tags'] = tags __props__['name'] = None __props__['provisioning_state'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Account, __self__).__init__( 'azure-native:netapp/v20200901:Account', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Account': """ Get an existing Account resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() __props__["active_directories"] = None __props__["location"] = None __props__["name"] = None __props__["provisioning_state"] = None __props__["tags"] = None __props__["type"] = None return Account(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="activeDirectories") def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]: """ Active Directories """ return pulumi.get(self, "active_directories") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Resource location """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ Azure lifecycle management """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ Resource tags """ return pulumi.get(self, "tags") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Resource type """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
sdk/python/pulumi_azure_native/netapp/v20200901/account.py
7,762
NetApp account resource :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] account_name: The name of the NetApp account :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories :param pulumi.Input[str] location: Resource location :param pulumi.Input[str] resource_group_name: The name of the resource group. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags Active Directories Get an existing Account resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. Resource location Resource name Azure lifecycle management Resource tags Resource type coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
1,141
en
0.608374
import os import logging from django.conf import settings from django.utils import translation from django.utils.translation import gettext_lazy as _ from django.db import transaction from django.core.files.base import ContentFile from celery.exceptions import SoftTimeLimitExceeded from froide.celery import app as celery_app from froide.publicbody.models import PublicBody from froide.upload.models import Upload from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject from .foi_mail import _process_mail, _fetch_mail from .notifications import send_classification_reminder logger = logging.getLogger(__name__) @celery_app.task( name="froide.foirequest.tasks.process_mail", acks_late=True, time_limit=60 ) def process_mail(*args, **kwargs): translation.activate(settings.LANGUAGE_CODE) with transaction.atomic(): _process_mail(*args, **kwargs) @celery_app.task(name="froide.foirequest.tasks.fetch_mail", expires=60) def fetch_mail(): for mail_uid, rfc_data in _fetch_mail(): process_mail.delay(rfc_data, mail_uid=mail_uid) @celery_app.task def detect_overdue(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_to_be_overdue(): foirequest.set_overdue() @celery_app.task def detect_asleep(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_to_be_asleep(): foirequest.set_asleep() @celery_app.task def classification_reminder(): translation.activate(settings.LANGUAGE_CODE) for foirequest in FoiRequest.objects.get_unclassified(): send_classification_reminder(foirequest) @celery_app.task def check_delivery_status(message_id, count=None, extended=False): try: message = FoiMessage.objects.get(id=message_id) except FoiMessage.DoesNotExist: return message.check_delivery_status(count=count, extended=extended) @celery_app.task def create_project_requests(project_id, publicbody_ids, **kwargs): for seq, pb_id in enumerate(publicbody_ids): create_project_request.delay(project_id, pb_id, sequence=seq, **kwargs) @celery_app.task def create_project_request(project_id, publicbody_id, sequence=0, **kwargs): from .services import CreateRequestFromProjectService try: project = FoiProject.objects.get(id=project_id) except FoiProject.DoesNotExist: # project does not exist anymore? return try: pb = PublicBody.objects.get(id=publicbody_id) except PublicBody.DoesNotExist: # pb was deleted? return kwargs.update( { "project": project, "publicbody": pb, "subject": project.title, "user": project.user, "body": project.description, "public": project.public, "reference": project.reference, "tags": [t.name for t in project.tags.all()], "project_order": sequence, } ) service = CreateRequestFromProjectService(kwargs) foirequest = service.execute() if project.request_count == project.foirequest_set.all().count(): project.status = FoiProject.STATUS_READY project.save() return foirequest.pk @celery_app.task(name="froide.foirequest.tasks.convert_attachment_task", time_limit=60) def convert_attachment_task(instance_id): try: att = FoiAttachment.objects.get(pk=instance_id) except FoiAttachment.DoesNotExist: return if att.can_convert_to_pdf(): return convert_attachment(att) def ocr_pdf_attachment(att): if att.converted: ocred_att = att.converted else: name, ext = os.path.splitext(att.name) name = _("{name}_ocr{ext}").format(name=name, ext=".pdf") ocred_att = FoiAttachment.objects.create( name=name, belongs_to=att.belongs_to, approved=False, filetype="application/pdf", is_converted=True, can_approve=att.can_approve, ) att.converted = ocred_att att.can_approve = False att.approved = False att.save() ocr_pdf_task.delay( att.pk, ocred_att.pk, ) def convert_attachment(att): from filingcabinet.pdf_utils import convert_to_pdf output_bytes = convert_to_pdf( att.file.path, binary_name=settings.FROIDE_CONFIG.get("doc_conversion_binary"), construct_call=settings.FROIDE_CONFIG.get("doc_conversion_call_func"), ) if output_bytes is None: return if att.converted: new_att = att.converted else: name, ext = os.path.splitext(att.name) name = _("{name}_converted{ext}").format(name=name, ext=".pdf") new_att = FoiAttachment( name=name, belongs_to=att.belongs_to, approved=False, filetype="application/pdf", is_converted=True, can_approve=att.can_approve, ) new_file = ContentFile(output_bytes) new_att.size = new_file.size new_att.file.save(new_att.name, new_file) new_att.save() att.converted = new_att att.can_approve = False att.approved = False att.save() @celery_app.task( name="froide.foirequest.tasks.convert_images_to_pdf_task", time_limit=60 * 5, soft_time_limit=60 * 4, ) def convert_images_to_pdf_task(att_ids, target_id, instructions, can_approve=True): from filingcabinet.pdf_utils import convert_images_to_ocred_pdf att_qs = FoiAttachment.objects.filter(id__in=att_ids) att_map = {a.id: a for a in att_qs} atts = [att_map[a_id] for a_id in att_ids] try: target = FoiAttachment.objects.get(id=target_id) except FoiAttachment.DoesNotExist: return paths = [a.file.path for a in atts] try: pdf_bytes = convert_images_to_ocred_pdf(paths, instructions=instructions) except SoftTimeLimitExceeded: pdf_bytes = None if pdf_bytes is None: att_qs.update(can_approve=can_approve) target.delete() return new_file = ContentFile(pdf_bytes) target.size = new_file.size target.file.save(target.name, new_file) target.save() @celery_app.task( name="froide.foirequest.tasks.ocr_pdf_task", time_limit=60 * 5, soft_time_limit=60 * 4, ) def ocr_pdf_task(att_id, target_id, can_approve=True): from filingcabinet.pdf_utils import run_ocr try: attachment = FoiAttachment.objects.get(pk=att_id) except FoiAttachment.DoesNotExist: return try: target = FoiAttachment.objects.get(pk=target_id) except FoiAttachment.DoesNotExist: return try: pdf_bytes = run_ocr( attachment.file.path, language=settings.TESSERACT_LANGUAGE if settings.TESSERACT_LANGUAGE else settings.LANGUAGE_CODE, timeout=180, ) except SoftTimeLimitExceeded: pdf_bytes = None if pdf_bytes is None: attachment.can_approve = can_approve attachment.save() target.delete() return new_file = ContentFile(pdf_bytes) target.size = new_file.size target.file.save(target.name, new_file) target.save() @celery_app.task( name="froide.foirequest.tasks.redact_attachment_task", time_limit=60 * 6, soft_time_limit=60 * 5, ) def redact_attachment_task(att_id, target_id, instructions): from filingcabinet.pdf_utils import run_ocr from froide.helper.redaction import redact_file try: attachment = FoiAttachment.objects.get(pk=att_id) except FoiAttachment.DoesNotExist: return if att_id != target_id: try: target = FoiAttachment.objects.get(pk=target_id) except FoiAttachment.DoesNotExist: return else: target = attachment logger.info("Trying redaction of %s", attachment.id) try: pdf_bytes = redact_file(attachment.file, instructions) except Exception: logger.error("PDF redaction error", exc_info=True) pdf_bytes = None if pdf_bytes is None: logger.info("Redaction failed %s", attachment.id) # Redaction has failed, remove empty attachment if attachment.redacted: attachment.redacted = None if attachment.is_redacted: attachment.approved = True attachment.can_approve = True attachment.pending = False attachment.save() if not target.file: target.delete() return logger.info("Redaction successful %s", attachment.id) pdf_file = ContentFile(pdf_bytes) target.size = pdf_file.size target.file.save(target.name, pdf_file, save=False) logger.info("Trying OCR %s", target.id) try: pdf_bytes = run_ocr( target.file.path, language=settings.TESSERACT_LANGUAGE if settings.TESSERACT_LANGUAGE else settings.LANGUAGE_CODE, timeout=60 * 4, ) except SoftTimeLimitExceeded: pdf_bytes = None if pdf_bytes is not None: logger.info("OCR successful %s", target.id) pdf_file = ContentFile(pdf_bytes) target.size = pdf_file.size target.file.save(target.name, pdf_file, save=False) else: logger.info("OCR failed %s", target.id) target.can_approve = True target.pending = False target.approve_and_save() FoiAttachment.attachment_published.send(sender=target, user=None) @celery_app.task(name="froide.foirequest.tasks.move_upload_to_attachment") def move_upload_to_attachment(att_id, upload_id): try: att = FoiAttachment.objects.get(pk=att_id) except FoiAttachment.DoesNotExist: return try: upload = Upload.objects.get(pk=upload_id) except FoiAttachment.DoesNotExist: return file = upload.get_file() if file: att.pending = False att.file.save(att.name, file, save=True) upload.finish() upload.delete() if att.can_convert_to_pdf(): convert_attachment_task.delay(att.id)
froide/foirequest/tasks.py
10,134
project does not exist anymore? pb was deleted? Redaction has failed, remove empty attachment
93
en
0.983592
"""Parse Warren2020 fluxes. Fluxes from https://zenodo.org/record/3952926 (DOI:10.5281/zenodo.3952926) See https://arxiv.org/abs/1902.01340 and https://arxiv.org/abs/1912.03328 for description of the models. """ import h5py from sntools.formats import gamma, get_starttime, get_endtime flux = {} def parse_input(input, inflv, starttime, endtime): """Read simulations data from input file. Arguments: input -- prefix of file containing neutrino fluxes inflv -- neutrino flavor to consider starttime -- start time set by user via command line option (or None) endtime -- end time set by user via command line option (or None) """ f = h5py.File(input, 'r') for (t, r) in f['sim_data']['shock_radius']: if r > 1: tbounce = t * 1000 # convert to ms break starttime = get_starttime(starttime, 1000 * f['sim_data']['shock_radius'][0][0] - tbounce) endtime = get_endtime(endtime, 1000 * f['sim_data']['shock_radius'][-1][0] - tbounce) # Save flux data to dictionary to look up in nu_emission() below global flux flux = {} path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv] for i, (t, lum) in enumerate(f[path]['lum']): t = 1000 * t - tbounce # convert to time post-bounce in ms if (t < starttime - 30) or (t > endtime + 30): # Ignore data outside of the requested time span. continue lum *= 1e51 * 624.151 # convert from 10^51 erg/s to MeV/ms mean_e = f[path]['avg_energy'][i][1] mean_e_sq = f[path]['rms_energy'][i][1]**2 flux[t] = (mean_e, mean_e_sq, lum) f.close() return (starttime, endtime, sorted(flux.keys())) def prepare_evt_gen(binned_t): global flux gamma.flux = flux gamma.prepare_evt_gen(binned_t) flux = gamma.flux def nu_emission(eNu, time): gamma.flux = flux return gamma.nu_emission(eNu, time)
sntools/formats/warren2020.py
1,957
Read simulations data from input file. Arguments: input -- prefix of file containing neutrino fluxes inflv -- neutrino flavor to consider starttime -- start time set by user via command line option (or None) endtime -- end time set by user via command line option (or None) Parse Warren2020 fluxes. Fluxes from https://zenodo.org/record/3952926 (DOI:10.5281/zenodo.3952926) See https://arxiv.org/abs/1902.01340 and https://arxiv.org/abs/1912.03328 for description of the models. convert to ms Save flux data to dictionary to look up in nu_emission() below convert to time post-bounce in ms Ignore data outside of the requested time span. convert from 10^51 erg/s to MeV/ms
676
en
0.640687
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sample Google App Engine application that demonstrates using the Users API For more information about App Engine, see README.md under /appengine. """ # [START all] from google.appengine.api import users import webapp2 class MainPage(webapp2.RequestHandler): def get(self): user = users.get_current_user() if user: nickname = user.nickname() logout_url = users.create_logout_url('/') greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format( nickname, logout_url) else: login_url = users.create_login_url('/') greeting = '<a href="{}">Sign in</a>'.format(login_url) self.response.write( '<html><body>{}</body></html>'.format(greeting)) class AdminPage(webapp2.RequestHandler): def get(self): user = users.get_current_user() if user: if users.is_current_user_admin(): self.response.write('You are an administrator.') else: self.response.write('You are not an administrator.') else: self.response.write('You are not logged in.') app = webapp2.WSGIApplication([ ('/', MainPage), ('/admin', AdminPage) ], debug=True) # [END all]
appengine/standard/users/main.py
1,847
Sample Google App Engine application that demonstrates using the Users API For more information about App Engine, see README.md under /appengine. Copyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [START all] [END all]
719
en
0.840371
""" ASGI config for avocadobites project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings') application = get_asgi_application()
avocadobites/avocadobites/asgi.py
401
ASGI config for avocadobites project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
218
en
0.723677
""" eZmax API Definition This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501 The version of the OpenAPI document: 1.1.3 Contact: [email protected] Generated by: https://openapi-generator.tech """ import sys import unittest import eZmaxApi from eZmaxApi.model.ezsignformfield_response import EzsignformfieldResponse globals()['EzsignformfieldResponse'] = EzsignformfieldResponse from eZmaxApi.model.ezsignformfield_response_compound import EzsignformfieldResponseCompound class TestEzsignformfieldResponseCompound(unittest.TestCase): """EzsignformfieldResponseCompound unit test stubs""" def setUp(self): pass def tearDown(self): pass def testEzsignformfieldResponseCompound(self): """Test EzsignformfieldResponseCompound""" # FIXME: construct object with mandatory attributes with example values # model = EzsignformfieldResponseCompound() # noqa: E501 pass if __name__ == '__main__': unittest.main()
test/test_ezsignformfield_response_compound.py
1,047
EzsignformfieldResponseCompound unit test stubs Test EzsignformfieldResponseCompound eZmax API Definition This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501 The version of the OpenAPI document: 1.1.3 Contact: [email protected] Generated by: https://openapi-generator.tech FIXME: construct object with mandatory attributes with example values model = EzsignformfieldResponseCompound() noqa: E501
446
en
0.685035
import os import shutil import tempfile from unittest import TestCase from mock import patch from regulations.apps import RegulationsConfig class RegulationsConfigTests(TestCase): def setUp(self): self.tmpdir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.tmpdir) @patch('regulations.apps.get_app_template_dirs') def test_precompute_custom_templates(self, get_app_template_dirs): """Verify that custom templates are found""" get_app_template_dirs.return_value = [self.tmpdir] open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close() open(os.path.join(self.tmpdir, 'other.html'), 'w').close() RegulationsConfig.precompute_custom_templates() self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'], 'regulations/custom_nodes/123-45-a.html') self.assertEqual(RegulationsConfig.custom_tpls['other'], 'regulations/custom_nodes/other.html') self.assertFalse('another' in RegulationsConfig.custom_tpls)
regulations/tests/apps_tests.py
1,070
Verify that custom templates are found
38
en
0.867291
"""@package vc_updated Functions to implement the updated Voce-Chaboche material model and measure its error. """ import numpy as np import pandas as pd from numdifftools import nd_algopy as nda def uvc_return_mapping(x_sol, data, tol=1.0e-8, maximum_iterations=1000): """ Implements the time integration of the updated Voce-Chaboche material model. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its'). """ if len(x_sol) < 8: raise RuntimeError("No backstresses or using original V-C params.") n_param_per_back = 2 n_basic_param = 6 # Get material properties E = x_sol[0] * 1.0 sy_0 = x_sol[1] * 1.0 Q = x_sol[2] * 1.0 b = x_sol[3] * 1.0 D = x_sol[4] * 1.0 a = x_sol[5] * 1.0 # Set up backstresses n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[n_basic_param + n_param_per_back * i]) gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i]) # Initialize parameters alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components strain = 0. stress = 0. ep_eq = 0. # equivalent plastic strain error = 0. # error measure sum_abs_de = 0. # total strain stress_sim = 0.0 stress_test = 0.0 area_test = 0.0 stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] loading = np.diff(data['e_true']) for increment_number, strain_inc in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq)) trial_stress = stress + E * strain_inc relative_stress = trial_stress - alpha flow_dir = np.sign(relative_stress) yield_condition = np.abs(relative_stress) - yield_stress if yield_condition > tol: is_converged = False else: is_converged = True # For error stress_sim_1 = stress_sim * 1.0 stress_test_1 = stress_test * 1.0 # Return mapping if plastic loading ep_eq_init = ep_eq alpha_init = alpha consist_param = 0. number_of_iterations = 0 while is_converged is False and number_of_iterations < maximum_iterations: number_of_iterations += 1 # Isotropic hardening and isotropic modulus yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq)) iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq) # Kinematic hardening and kinematic modulus alpha = 0. kin_modulus = 0. for i in range(0, n_backstresses): e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init)) alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i] delta_alpha = alpha - alpha_init # Local Newton step numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha) denominator = -(E + iso_modulus + kin_modulus) consist_param = consist_param - numerator / denominator ep_eq = ep_eq_init + consist_param if np.abs(numerator) < tol: is_converged = True # Update the variables stress = trial_stress - E * flow_dir * consist_param for i in range(0, n_backstresses): e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init)) alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \ + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) # Calculate the error stress_sim = stress * 1.0 stress_test = data['Sigma_true'].iloc[increment_number + 1] sum_abs_de += np.abs(strain_inc) area_test += np.abs(strain_inc) * ((stress_test) ** 2 + (stress_test_1) ** 2) / 2. error += np.abs(strain_inc) * ((stress_sim - stress_test) ** 2 + (stress_sim_1 - stress_test_1) ** 2) / 2. if number_of_iterations >= maximum_iterations: print ("Increment number = ", increment_number) print ("Parameters = ", x_sol) print ("Numerator = ", numerator) raise RuntimeError('Return mapping did not converge in ' + str(maximum_iterations) + ' iterations.') area = area_test / sum_abs_de error = error / sum_abs_de return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track, 'area': area} def sim_curve_uvc(x_sol, test_clean): """ Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return DataFrame: Voce-Chaboche approximation The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". """ model_output = uvc_return_mapping(x_sol, test_clean) strain = np.append([0.], model_output['strain']) stress = np.append([0.], model_output['stress']) sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true']) return sim_curve def error_single_test_uvc(x_sol, test_clean): """ Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return float: relative error The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". """ model_output = uvc_return_mapping(x_sol, test_clean) return model_output['error'] def normalized_error_single_test_uvc(x_sol, test_clean): """ Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return list: (float) total error, (float) total area The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". """ model_output = uvc_return_mapping(x_sol, test_clean) return [model_output['error'], model_output['area']] def calc_phi_total(x, data): """ Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Normalized error value expressed as a percent (raw value * 100). The normalized error is defined in de Sousa and Lignos (2017). """ error_total = 0. area_total = 0. for d in data: error, area = normalized_error_single_test_uvc(x, d) error_total += error area_total += area return np.sqrt(error_total / area_total) * 100. def test_total_area(x, data): """ Returns the total squared area underneath all the tests. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Total squared area. """ area_total = 0. for d in data: _, area = normalized_error_single_test_uvc(x, d) area_total += area return area_total def uvc_get_hessian(x, data): """ Returns the Hessian of the material model error function for a given set of test data evaluated at x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return np.array: Hessian matrix of the error function. """ def f(xi): val = 0. for d in data: val += error_single_test_uvc(xi, d) return val hess_fun = nda.Hessian(f) return hess_fun(x) def uvc_consistency_metric(x_base, x_sample, data): """ Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model. :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case. :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Increase in quadratic approximation from the base to the sample case. """ x_diff = x_sample - x_base hess_base = uvc_get_hessian(x_base, data) numerator = np.dot(x_diff, hess_base.dot(x_diff)) denominator = test_total_area(x_base, data) return np.sqrt(numerator / denominator) def uvc_tangent_modulus(x_sol, data, tol=1.0e-8, maximum_iterations=1000): """ Returns the tangent modulus at each strain step. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return np.ndarray: Tangent modulus array. """ if len(x_sol) < 8: raise RuntimeError("No backstresses or using original V-C params.") n_param_per_back = 2 n_basic_param = 6 # Get material properties E = x_sol[0] * 1.0 sy_0 = x_sol[1] * 1.0 Q = x_sol[2] * 1.0 b = x_sol[3] * 1.0 D = x_sol[4] * 1.0 a = x_sol[5] * 1.0 # Set up backstresses n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back) c_k = [] gamma_k = [] for i in range(0, n_backstresses): c_k.append(x_sol[n_basic_param + n_param_per_back * i]) gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i]) # Initialize parameters alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components strain = 0. stress = 0. ep_eq = 0. # equivalent plastic strain stress_track = [] strain_track = [] strain_inc_track = [] iteration_track = [] tangent_track = [] loading = np.diff(data['e_true']) for increment_number, strain_inc in enumerate(loading): strain += strain_inc alpha = np.sum(alpha_components) yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq)) trial_stress = stress + E * strain_inc relative_stress = trial_stress - alpha flow_dir = np.sign(relative_stress) yield_condition = np.abs(relative_stress) - yield_stress if yield_condition > tol: is_converged = False else: is_converged = True # Return mapping if plastic loading ep_eq_init = ep_eq alpha_init = alpha consist_param = 0. number_of_iterations = 0 while is_converged is False and number_of_iterations < maximum_iterations: number_of_iterations += 1 # Isotropic hardening and isotropic modulus yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq)) iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq) # Kinematic hardening and kinematic modulus alpha = 0. kin_modulus = 0. for i in range(0, n_backstresses): e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init)) alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i] delta_alpha = alpha - alpha_init # Local Newton step numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha) denominator = -(E + iso_modulus + kin_modulus) consist_param = consist_param - numerator / denominator ep_eq = ep_eq_init + consist_param if np.abs(numerator) < tol: is_converged = True # Update the variables stress = trial_stress - E * flow_dir * consist_param for i in range(0, n_backstresses): e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init)) alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \ + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k stress_track.append(stress) strain_track.append(strain) strain_inc_track.append(strain_inc) iteration_track.append(number_of_iterations) # Calculate the tangent modulus if number_of_iterations > 0: h_prime = 0. for i in range(0, n_backstresses): h_prime += c_k[i] - flow_dir * gamma_k[i] * alpha_components[i] k_prime = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq) tangent_track.append(E * (k_prime + h_prime) / (E + k_prime + h_prime)) else: # Elastic loading tangent_track.append(E) return np.append([0.], np.array(tangent_track))
RESSPyLab/uvc_model.py
14,107
Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Normalized error value expressed as a percent (raw value * 100). The normalized error is defined in de Sousa and Lignos (2017). Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return float: relative error The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche material model. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return list: (float) total error, (float) total area The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input. :param np.array x_sol: Voce-Chaboche model parameters :param DataFrame test_clean: stress-strain data :return DataFrame: Voce-Chaboche approximation The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true". Returns the total squared area underneath all the tests. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Total squared area. Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model. :param np.array x_base: Updated Voce-Chaboche material model parameters from the base case. :param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return float: Increase in quadratic approximation from the base to the sample case. Returns the Hessian of the material model error function for a given set of test data evaluated at x. :param np.array x: Updated Voce-Chaboche material model parameters. :param list data: (pd.DataFrame) Stress-strain history for each test considered. :return np.array: Hessian matrix of the error function. Implements the time integration of the updated Voce-Chaboche material model. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its'). Returns the tangent modulus at each strain step. :param np.array x_sol: Updated Voce-Chaboche model parameters. :param pd.DataFrame data: stress-strain data. :param float tol: Local Newton tolerance. :param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded. :return np.ndarray: Tangent modulus array. @package vc_updated Functions to implement the updated Voce-Chaboche material model and measure its error. Get material properties Set up backstresses Initialize parameters backstress components equivalent plastic strain error measure total strain For error Return mapping if plastic loading Isotropic hardening and isotropic modulus Kinematic hardening and kinematic modulus Local Newton step Update the variables Calculate the error Get material properties Set up backstresses Initialize parameters backstress components equivalent plastic strain Return mapping if plastic loading Isotropic hardening and isotropic modulus Kinematic hardening and kinematic modulus Local Newton step Update the variables Calculate the tangent modulus Elastic loading
4,148
en
0.484085
import wikipedia as wiki from ..parsing import get_wiki_page_id, get_wiki_lines, get_wiki_sections def get_wiki_references(url, outfile=None): """get_wiki_references. Extracts references from predefined sections of wiki page Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs) :param url: URL of wiki article to scrape :param outfile: File to write extracted references to """ def _check(l): return (not l['doi'] or l['doi'] == l['refs'][-1]['doi']) \ and (not l['arxiv'] or l['arxiv'] == l['refs'][-1]['arxiv']) page = wiki.page(get_wiki_page_id(url)) sections = get_wiki_sections(page.content) lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], []) links = sum([wikiparse.parse(s).external_links for s in sections.values()], []) summary = sum([ [ { 'raw': l, 'links': urlscan.parse_text_urls(l), 'refs': refextract.extract_references_from_string(l), 'doi': doi.find_doi_in_text(l), 'arxiv': m.group(1) if (m := arxiv_url_regex.matches(l)) is not None else None } for l in get_wiki_lines(s, predicate=any) ] for s in sections.values() ]) failed = [ld for ld in summary if not _check(ld)] if any(failed): logger.warning('Consistency check failed for the following lines: {}'.format(failed)) return _serialize(summary, outfile) if __name__ == "__main__": import doctest doctest.testmod()
scraper/apis/wikipedia.py
1,558
get_wiki_references. Extracts references from predefined sections of wiki page Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs) :param url: URL of wiki article to scrape :param outfile: File to write extracted references to
251
en
0.725629
from concurrent.futures.process import ProcessPoolExecutor import api.Config import api.middleware from api.Config import app from api.routers import (feedback, hiscore, label, legacy, legacy_debug, player, prediction, report, scraper) app.include_router(hiscore.router) app.include_router(player.router) app.include_router(prediction.router) app.include_router(feedback.router) app.include_router(report.router) app.include_router(legacy.router) app.include_router(scraper.router) app.include_router(label.router) app.include_router(legacy_debug.router) @app.get("/") async def root(): return {"message": "Hello World"} # @app.on_event("startup") # async def startup_event(): # app.state.executor = ProcessPoolExecutor() # @app.on_event("shutdown") # async def on_shutdown(): # app.state.executor.shutdown()
api/app.py
854
@app.on_event("startup") async def startup_event(): app.state.executor = ProcessPoolExecutor() @app.on_event("shutdown") async def on_shutdown(): app.state.executor.shutdown()
183
en
0.516125
# Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 import os import subprocess OTBN_DIR = os.path.join(os.path.dirname(__file__), '../../..') UTIL_DIR = os.path.join(OTBN_DIR, 'util') SIM_DIR = os.path.join(os.path.dirname(__file__), '..') def asm_and_link_one_file(asm_path: str, work_dir: str) -> str: '''Assemble and link file at asm_path in work_dir. Returns the path to the resulting ELF ''' otbn_as = os.path.join(UTIL_DIR, 'otbn-as') otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld') obj_path = os.path.join(work_dir, 'tst.o') elf_path = os.path.join(work_dir, 'tst') subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True) subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True) return elf_path
hw/ip/otbn/dv/otbnsim/test/testutil.py
857
Assemble and link file at asm_path in work_dir. Returns the path to the resulting ELF Copyright lowRISC contributors. Licensed under the Apache License, Version 2.0, see LICENSE for details. SPDX-License-Identifier: Apache-2.0
229
en
0.751156
#Find,Remove,Find """Return a tuple of the indices of the two smallest values in list L. >>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] >>> find_two_smallest(items) (6, 7) >>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] True """ from typing import List, Tuple def find_two_smallest(L:List[float]) -> Tuple[int, int]: """ (see above) """ # Find the index of the minimum and remove that item smallest = min(L) min1 = L.index(smallest) L.remove(smallest) # Find the index of the new minimum item in the list next_smallest = min(L) min2 = L.index(next_smallest) # Put smallest back into L L.insert(min1, smallest) # Fix min2 in case it was affected by the removal and reinsertion: if min1 <= min2: min2 +=1 return (min1, min2) if __name__ == '__main__': import doctest doctest.testmod() print(find_two_smallest([0, 1, 3, 2, 5, 6, 1]))
chapter12/examples/example02.py
972
(see above) Return a tuple of the indices of the two smallest values in list L. >>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] >>> find_two_smallest(items) (6, 7) >>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476] True Find,Remove,Find Find the index of the minimum and remove that item Find the index of the new minimum item in the list Put smallest back into L Fix min2 in case it was affected by the removal and reinsertion:
456
en
0.759237
from __future__ import absolute_import, print_function import logging import bokeh.server.tornado as tornado from bokeh.application import Application from bokeh.client import pull_session from bokeh.server.views.static_handler import StaticHandler from .utils import ManagedServerLoop, url logging.basicConfig(level=logging.DEBUG) def test_check_whitelist_rejects_port_mismatch(): assert False == tornado.check_whitelist("foo:100", ["foo:101", "foo:102"]) def test_check_whitelist_rejects_name_mismatch(): assert False == tornado.check_whitelist("foo:100", ["bar:100", "baz:100"]) def test_check_whitelist_accepts_name_port_match(): assert True == tornado.check_whitelist("foo:100", ["foo:100", "baz:100"]) def test_check_whitelist_accepts_implicit_port_80(): assert True == tornado.check_whitelist("foo", ["foo:80"]) def test_check_whitelist_accepts_all_on_star(): assert True == tornado.check_whitelist("192.168.0.1", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*']) assert True == tornado.check_whitelist("192.168.0.1:80", ['*:80']) assert False == tornado.check_whitelist("192.168.0.1:80", ['*:81']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['*:*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert True == tornado.check_whitelist("192.168.0.1:5006", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*']) assert False == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:5006']) assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:80']) assert True == tornado.check_whitelist("foobarbaz", ['*']) assert True == tornado.check_whitelist("foobarbaz", ['*:*']) assert True == tornado.check_whitelist("foobarbaz", ['*:80']) assert False == tornado.check_whitelist("foobarbaz", ['*:5006']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:*']) assert True == tornado.check_whitelist("foobarbaz:5006", ['*:5006']) def test_default_resources(): application = Application() with ManagedServerLoop(application) as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo/") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/" assert r.path_versioner == StaticHandler.append_version with ManagedServerLoop(application, prefix="/foo/bar") as server: r = server._tornado.resources() assert r.mode == "server" assert r.root_url == "/foo/bar/" assert r.path_versioner == StaticHandler.append_version def test_default_app_paths(): app = Application() t = tornado.BokehTornado({}, "", []) assert t.app_paths == set() t = tornado.BokehTornado({"/": app}, "", []) assert t.app_paths == { "/" } t = tornado.BokehTornado({"/": app, "/foo": app}, "", []) assert t.app_paths == { "/", "/foo"} # tried to use capsys to test what's actually logged and it wasn't # working, in the meantime at least this tests that log_stats # doesn't crash in various scenarios def test_log_stats(): application = Application() with ManagedServerLoop(application) as server: server._tornado.log_stats() session1 = pull_session(session_id='session1', url=url(server), io_loop=server.io_loop) session2 = pull_session(session_id='session2', url=url(server), io_loop=server.io_loop) server._tornado.log_stats() session1.close() session2.close() server._tornado.log_stats()
bokeh/server/tests/test_tornado.py
4,957
tried to use capsys to test what's actually logged and it wasn't working, in the meantime at least this tests that log_stats doesn't crash in various scenarios
159
en
0.979033
#!/usr/bin/env python # Copyright 2015 Luminal, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import json import operator import os import os.path import sys import time import re import boto3 import botocore.exceptions try: from StringIO import StringIO except ImportError: from io import StringIO try: import yaml NO_YAML = False except ImportError: NO_YAML = True from base64 import b64encode, b64decode from boto3.dynamodb.conditions import Attr from Crypto.Cipher import AES from Crypto.Hash import SHA256 from Crypto.Hash.HMAC import HMAC from Crypto.Util import Counter DEFAULT_REGION = "us-east-1" PAD_LEN = 19 # number of digits in sys.maxint WILDCARD_CHAR = "*" class KmsError(Exception): def __init__(self, value=""): self.value = "KMS ERROR: " + value if value is not "" else "KMS ERROR" def __str__(self): return self.value class IntegrityError(Exception): def __init__(self, value=""): self.value = "INTEGRITY ERROR: " + value if value is not "" else \ "INTEGRITY ERROR" def __str__(self): return self.value class ItemNotFound(Exception): pass class KeyValueToDictionary(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, dict((x[0], x[1]) for x in values)) def printStdErr(s): sys.stderr.write(str(s)) sys.stderr.write("\n") def key_value_pair(string): output = string.split('=') if len(output) != 2: msg = "%r is not the form of \"key=value\"" % string raise argparse.ArgumentTypeError(msg) return output def expand_wildcard(string, secrets): prog = re.compile('^' + string.replace(WILDCARD_CHAR, '.*') + '$') output = [] for secret in secrets: if prog.search(secret) is not None: output.append(secret) return output def value_or_filename(string): # argparse running on old version of python (<2.7) will pass an empty # string to this function before it passes the actual value. # If an empty string is passes in, just return an empty string if string == "": return "" if string[0] == "@": filename = string[1:] try: with open(os.path.expanduser(filename)) as f: output = f.read() except IOError as e: raise argparse.ArgumentTypeError("Unable to read file %s" % filename) else: output = string return output def csv_dump(dictionary): csvfile = StringIO() csvwriter = csv.writer(csvfile) for key in dictionary: csvwriter.writerow([key, dictionary[key]]) return csvfile.getvalue() def paddedInt(i): ''' return a string that contains `i`, left-padded with 0's up to PAD_LEN digits ''' i_str = str(i) pad = PAD_LEN - len(i_str) return (pad * "0") + i_str def getHighestVersion(name, region="us-east-1", table="credential-store"): ''' Return the highest version of `name` in the table ''' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name), ProjectionExpression="version") if response["Count"] == 0: return 0 return response["Items"][0]["version"] def listSecrets(region="us-east-1", table="credential-store"): ''' do a full-table scan of the credential-store, and return the names and versions of every credential ''' dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.scan(ProjectionExpression="#N, version", ExpressionAttributeNames={"#N": "name"}) return response["Items"] def putSecret(name, secret, version, kms_key="alias/credstash", region="us-east-1", table="credential-store", context=None): ''' put a secret called `name` into the secret-store, protected by the key kms_key ''' if not context: context = {} kms = boto3.client('kms', region_name=region) # generate a a 64 byte key. # Half will be for data encryption, the other half for HMAC try: kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64) except: raise KmsError("Could not generate key using KMS key %s" % kms_key) data_key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] wrapped_key = kms_response['CiphertextBlob'] enc_ctr = Counter.new(128) encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr) c_text = encryptor.encrypt(secret) # compute an HMAC using the hmac key and the ciphertext hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256) b64hmac = hmac.hexdigest() dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) data = {} data['name'] = name data['version'] = version if version != "" else paddedInt(1) data['key'] = b64encode(wrapped_key).decode('utf-8') data['contents'] = b64encode(c_text).decode('utf-8') data['hmac'] = b64hmac return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists()) def getAllSecrets(version="", region="us-east-1", table="credential-store", context=None): ''' fetch and decrypt all secrets ''' output = {} secrets = listSecrets(region, table) for credential in set([x["name"] for x in secrets]): try: output[credential] = getSecret(credential, version, region, table, context) except: pass return output def getSecret(name, version="", region="us-east-1", table="credential-store", context=None): ''' fetch and decrypt the secret called `name` ''' if not context: context = {} dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) if version == "": # do a consistent fetch of the credential with the highest version response = secrets.query(Limit=1, ScanIndexForward=False, ConsistentRead=True, KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name)) if response["Count"] == 0: raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name) material = response["Items"][0] else: response = secrets.get_item(Key={"name": name, "version": version}) if "Item" not in response: raise ItemNotFound("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version)) material = response["Item"] kms = boto3.client('kms', region_name=region) # Check the HMAC before we decrypt to verify ciphertext integrity try: kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "InvalidCiphertextException": if context is None: msg = ("Could not decrypt hmac key with KMS. The credential may " "require that an encryption context be provided to decrypt " "it.") else: msg = ("Could not decrypt hmac key with KMS. The encryption " "context provided may not match the one used when the " "credential was stored.") else: msg = "Decryption error %s" % e raise KmsError(msg) except Exception as e: raise KmsError("Decryption error %s" % e) key = kms_response['Plaintext'][:32] hmac_key = kms_response['Plaintext'][32:] hmac = HMAC(hmac_key, msg=b64decode(material['contents']), digestmod=SHA256) if hmac.hexdigest() != material['hmac']: raise IntegrityError("Computed HMAC on %s does not match stored HMAC" % name) dec_ctr = Counter.new(128) decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr) plaintext = decryptor.decrypt(b64decode(material['contents'])).decode("utf-8") return plaintext def deleteSecrets(name, region="us-east-1", table="credential-store"): dynamodb = boto3.resource('dynamodb', region_name=region) secrets = dynamodb.Table(table) response = secrets.scan(FilterExpression=boto3.dynamodb.conditions.Attr("name").eq(name), ProjectionExpression="#N, version", ExpressionAttributeNames={"#N": "name"}) for secret in response["Items"]: print("Deleting %s -- version %s" % (secret["name"], secret["version"])) secrets.delete_item(Key=secret) def createDdbTable(region="us-east-1", table="credential-store"): ''' create the secret store table in DDB in the specified region ''' dynamodb = boto3.resource("dynamodb", region_name=region) if table in (t.name for t in dynamodb.tables.all()): print("Credential Store table already exists") return print("Creating table...") response = dynamodb.create_table( TableName=table, KeySchema=[ { "AttributeName": "name", "KeyType": "HASH", }, { "AttributeName": "version", "KeyType": "RANGE", } ], AttributeDefinitions=[ { "AttributeName": "name", "AttributeType": "S", }, { "AttributeName": "version", "AttributeType": "S", }, ], ProvisionedThroughput={ "ReadCapacityUnits": 1, "WriteCapacityUnits": 1, } ) print("Waiting for table to be created...") client = boto3.client("dynamodb", region_name=region) client.get_waiter("table_exists").wait(TableName=table) print("Table has been created. " "Go read the README about how to create your KMS key") def main(): parsers = {} parsers['super'] = argparse.ArgumentParser( description="A credential/secret storage system") parsers['super'].add_argument("-r", "--region", help="the AWS region in which to operate." "If a region is not specified, credstash " "will use the value of the " "AWS_DEFAULT_REGION env variable, " "or if that is not set, us-east-1") parsers['super'].add_argument("-t", "--table", default="credential-store", help="DynamoDB table to use for " "credential storage") subparsers = parsers['super'].add_subparsers(help='Try commands like ' '"{name} get -h" or "{name}' 'put --help" to get each' 'sub command\'s options' .format(name=os.path.basename( __file__))) action = 'delete' parsers[action] = subparsers.add_parser(action, help='Delete a credential " \ "from the store') parsers[action].add_argument("credential", type=str, help="the name of the credential to delete") parsers[action].set_defaults(action=action) action = 'get' parsers[action] = subparsers.add_parser(action, help="Get a credential " "from the store") parsers[action].add_argument("credential", type=str, help="the name of the credential to get." "Using the wildcard character '%s' will " "search for credentials that match the " "pattern" % WILDCARD_CHAR) parsers[action].add_argument("context", type=key_value_pair, action=KeyValueToDictionary, nargs='*', help="encryption context key/value pairs " "associated with the credential in the form " "of \"key=value\"") parsers[action].add_argument("-n", "--noline", action="store_true", help="Don't append newline to returned " "value (useful in scripts or with " "binary files)") parsers[action].add_argument("-v", "--version", default="", help="Get a specific version of the " "credential (defaults to the latest version)") parsers[action].set_defaults(action=action) action = 'getall' parsers[action] = subparsers.add_parser(action, help="Get all credentials from " "the store") parsers[action].add_argument("context", type=key_value_pair, action=KeyValueToDictionary, nargs='*', help="encryption context key/value pairs " "associated with the credential in the form " "of \"key=value\"") parsers[action].add_argument("-v", "--version", default="", help="Get a specific version of the " "credential (defaults to the latest version)") parsers[action].add_argument("-f", "--format", default="json", choices=["json", "csv"] + ([] if NO_YAML else ["yaml"]), help="Output format. json(default) " + ("" if NO_YAML else "yaml ") + "or csv.") parsers[action].set_defaults(action=action) action = 'list' parsers[action] = subparsers.add_parser(action, help="list credentials and " "their versions") parsers[action].set_defaults(action=action) action = 'put' parsers[action] = subparsers.add_parser(action, help="Put a credential into " "the store") parsers[action].add_argument("credential", type=str, help="the name of the credential to store") parsers[action].add_argument("value", type=value_or_filename, help="the value of the credential to store " "or, if beginning with the \"@\" character, " "the filename of the file containing " "the value", default="") parsers[action].add_argument("context", type=key_value_pair, action=KeyValueToDictionary, nargs='*', help="encryption context key/value pairs " "associated with the credential in the form " "of \"key=value\"") parsers[action].add_argument("-k", "--key", default="alias/credstash", help="the KMS key-id of the master key " "to use. See the README for more " "information. Defaults to alias/credstash") parsers[action].add_argument("-v", "--version", default="", help="Put a specific version of the " "credential (update the credential; " "defaults to version `1`).") parsers[action].add_argument("-a", "--autoversion", action="store_true", help="Automatically increment the version of " "the credential to be stored. This option " "causes the `-v` flag to be ignored. " "(This option will fail if the currently stored " "version is not numeric.)") parsers[action].set_defaults(action=action) action = 'setup' parsers[action] = subparsers.add_parser(action, help='setup the credential store') parsers[action].set_defaults(action=action) args = parsers['super'].parse_args() region = os.getenv( "AWS_DEFAULT_REGION", DEFAULT_REGION) if not args.region \ else args.region if "action" in vars(args): if args.action == "delete": deleteSecrets(args.credential, region=region, table=args.table) return if args.action == "list": credential_list = listSecrets(region=region, table=args.table) if credential_list: # print list of credential names and versions, # sorted by name and then by version max_len = max([len(x["name"]) for x in credential_list]) for cred in sorted(credential_list, key=operator.itemgetter("name", "version")): print("{0:{1}} -- version {2:>}".format( cred["name"], max_len, cred["version"])) else: return if args.action == "put": if args.autoversion: latestVersion = getHighestVersion(args.credential, region, args.table) try: version = paddedInt(int(latestVersion) + 1) except ValueError: printStdErr("Can not autoincrement version. The current " "version: %s is not an int" % latestVersion) return else: version = args.version try: if putSecret(args.credential, args.value, version, kms_key=args.key, region=region, table=args.table, context=args.context): print("{0} has been stored".format(args.credential)) except KmsError as e: printStdErr(e) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "ConditionalCheckFailedException": latestVersion = getHighestVersion(args.credential, region, args.table) printStdErr("%s version %s is already in the credential store. " "Use the -v flag to specify a new version" % (args.credential, latestVersion)) return if args.action == "get": try: if WILDCARD_CHAR in args.credential: names = expand_wildcard(args.credential, [x["name"] for x in listSecrets(region=region, table=args.table)]) print(json.dumps(dict((name, getSecret(name, args.version, region=region, table=args.table, context=args.context)) for name in names))) else: sys.stdout.write(getSecret(args.credential, args.version, region=region, table=args.table, context=args.context)) if not args.noline: sys.stdout.write("\n") except ItemNotFound as e: printStdErr(e) except KmsError as e: printStdErr(e) except IntegrityError as e: printStdErr(e) return if args.action == "getall": secrets = getAllSecrets(args.version, region=region, table=args.table, context=args.context) if args.format == "json": output_func = json.dumps output_args = {"sort_keys": True, "indent": 4, "separators": (',', ': ')} elif not NO_YAML and args.format == "yaml": output_func = yaml.dump output_args = {"default_flow_style": False} elif args.format == 'csv': output_func = csv_dump output_args = {} print(output_func(secrets, **output_args)) return if args.action == "setup": createDdbTable(region=region, table=args.table) return else: parsers['super'].print_help() if __name__ == '__main__': main()
credstash.py
22,693
create the secret store table in DDB in the specified region fetch and decrypt all secrets Return the highest version of `name` in the table fetch and decrypt the secret called `name` do a full-table scan of the credential-store, and return the names and versions of every credential return a string that contains `i`, left-padded with 0's up to PAD_LEN digits put a secret called `name` into the secret-store, protected by the key kms_key !/usr/bin/env python Copyright 2015 Luminal, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. number of digits in sys.maxint argparse running on old version of python (<2.7) will pass an empty string to this function before it passes the actual value. If an empty string is passes in, just return an empty string generate a a 64 byte key. Half will be for data encryption, the other half for HMAC compute an HMAC using the hmac key and the ciphertext do a consistent fetch of the credential with the highest version Check the HMAC before we decrypt to verify ciphertext integrity print list of credential names and versions, sorted by name and then by version
1,578
en
0.787387
""" Django settings for lab01 project. Generated by 'django-admin startproject' using Django 3.2.6. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'django-insecure-7-8hv&pc-$$1)7eiiy2m#m^o6cx%oqqv9@z071ec0%218iwt0!' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'lab01.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'lab01.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
lab01/lab01/settings.py
3,361
Django settings for lab01 project. Generated by 'django-admin startproject' using Django 3.2.6. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.2/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.2/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.2/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.2/howto/static-files/ Default primary key field type https://docs.djangoproject.com/en/3.2/ref/settings/default-auto-field
1,080
en
0.665241
# Copyright 2018, Kay Hayen, mailto:[email protected] # # Python tests originally created or extracted from other peoples work. The # parts were too small to be protected. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # def tupleUnpacking(): return (*a, b, *c) def listUnpacking(): return [*a, b, *c] def setUnpacking(): return {*a, b, *c} def dictUnpacking(): return {"a" : 1, **d} a = range(3) b = 5 c = range(8,10) d = {"a" : 2} print("Tuple unpacked", tupleUnpacking()) print("List unpacked", listUnpacking()) print("Set unpacked", setUnpacking()) print("Dict unpacked", dictUnpacking()) non_iterable = 2.0 def tupleUnpackingError(): try: return (*a,*non_iterable,*c) except Exception as e: return e def listUnpackingError(): try: return [*a,*non_iterable,*c] except Exception as e: return e def setUnpackingError(): try: return {*a,*non_iterable,*c} except Exception as e: return e def dictUnpackingError(): try: return {"a" : 1, **non_iterable} except Exception as e: return e print("Tuple unpacked error:", tupleUnpackingError()) print("List unpacked error:", listUnpackingError()) print("Set unpacked error:", setUnpackingError()) print("Dict unpacked error:", dictUnpackingError())
tests/basics/Unpacking35.py
1,866
Copyright 2018, Kay Hayen, mailto:[email protected] Python tests originally created or extracted from other peoples work. The parts were too small to be protected. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
730
en
0.887757
# Copyright (c) OpenMMLab. All rights reserved. import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from torch.nn.modules.batchnorm import _BatchNorm from mmcls.models.utils import make_divisible from ..builder import BACKBONES from .base_backbone import BaseBackbone class InvertedResidual(BaseModule): """InvertedResidual block for MobileNetV2. Args: in_channels (int): The input channels of the InvertedResidual block. out_channels (int): The output channels of the InvertedResidual block. stride (int): Stride of the middle (first) 3x3 convolution. expand_ratio (int): adjusts number of channels of the hidden layer in InvertedResidual by this amount. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor """ def __init__(self, in_channels, out_channels, stride, expand_ratio, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False, init_cfg=None): super(InvertedResidual, self).__init__(init_cfg) self.stride = stride assert stride in [1, 2], f'stride must in [1, 2]. ' \ f'But received {stride}.' self.with_cp = with_cp self.use_res_connect = self.stride == 1 and in_channels == out_channels hidden_dim = int(round(in_channels * expand_ratio)) layers = [] if expand_ratio != 1: layers.append( ConvModule( in_channels=in_channels, out_channels=hidden_dim, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) layers.extend([ ConvModule( in_channels=hidden_dim, out_channels=hidden_dim, kernel_size=3, stride=stride, padding=1, groups=hidden_dim, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), ConvModule( in_channels=hidden_dim, out_channels=out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) ]) self.conv = nn.Sequential(*layers) def forward(self, x): def _inner_forward(x): if self.use_res_connect: return x + self.conv(x) else: return self.conv(x) if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) return out @BACKBONES.register_module() class MobileNetV2(BaseBackbone): """MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (None or Sequence[int]): Output from which stages. Default: (7, ). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. """ # Parameters to build layers. 4 parameters are needed to construct a # layer, from left to right: expand_ratio, channel, num_blocks, stride. arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] def __init__(self, widen_factor=1., out_indices=(7, ), frozen_stages=-1, deep_stem=False, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), norm_eval=False, with_cp=False, init_cfg=[ dict(type='Kaiming', layer=['Conv2d']), dict( type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) ]): super(MobileNetV2, self).__init__(init_cfg) self.widen_factor = widen_factor self.out_indices = out_indices for index in out_indices: if index not in range(0, 8): raise ValueError('the item in out_indices must in ' f'range(0, 8). But received {index}') if frozen_stages not in range(-1, 8): raise ValueError('frozen_stages must be in range(-1, 8). ' f'But received {frozen_stages}') self.out_indices = out_indices self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.in_channels = make_divisible(32 * widen_factor, 8) if deep_stem: self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) in_channels_ = 16 else: in_channels_ = 3 self.conv0 = nn.Sequential() self.conv1 = ConvModule( in_channels=in_channels_, out_channels=self.in_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.layers = [] for i, layer_cfg in enumerate(self.arch_settings): expand_ratio, channel, num_blocks, stride = layer_cfg out_channels = make_divisible(channel * widen_factor, 8) inverted_res_layer = self.make_layer( out_channels=out_channels, num_blocks=num_blocks, stride=stride, expand_ratio=expand_ratio) layer_name = f'layer{i + 1}' self.add_module(layer_name, inverted_res_layer) self.layers.append(layer_name) if widen_factor > 1.0: self.out_channel = int(1280 * widen_factor) else: self.out_channel = 1280 layer = ConvModule( in_channels=self.in_channels, out_channels=self.out_channel, kernel_size=1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.add_module('conv2', layer) self.layers.append('conv2') def make_layer(self, out_channels, num_blocks, stride, expand_ratio): """Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6. """ layers = [] for i in range(num_blocks): if i >= 1: stride = 1 layers.append( InvertedResidual( self.in_channels, out_channels, stride, expand_ratio=expand_ratio, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg, with_cp=self.with_cp)) self.in_channels = out_channels return nn.Sequential(*layers) def forward(self, x): x = self.conv0(x) x = self.conv1(x) outs = [] for i, layer_name in enumerate(self.layers): layer = getattr(self, layer_name) x = layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def _freeze_stages(self): if self.frozen_stages >= 0: for param in self.conv1.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): layer = getattr(self, f'layer{i}') layer.eval() for param in layer.parameters(): param.requires_grad = False def train(self, mode=True): super(MobileNetV2, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): if isinstance(m, _BatchNorm): m.eval()
mmcls/models/backbones/mobilenet_v2.py
9,966
InvertedResidual block for MobileNetV2. Args: in_channels (int): The input channels of the InvertedResidual block. out_channels (int): The output channels of the InvertedResidual block. stride (int): Stride of the middle (first) 3x3 convolution. expand_ratio (int): adjusts number of channels of the hidden layer in InvertedResidual by this amount. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Returns: Tensor: The output tensor MobileNetV2 backbone. Args: widen_factor (float): Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0. out_indices (None or Sequence[int]): Output from which stages. Default: (7, ). frozen_stages (int): Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters. conv_cfg (dict, optional): Config dict for convolution layer. Default: None, which means using conv2d. norm_cfg (dict): Config dict for normalization layer. Default: dict(type='BN'). act_cfg (dict): Config dict for activation layer. Default: dict(type='ReLU6'). norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. Default: False. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. Default: False. Stack InvertedResidual blocks to build a layer for MobileNetV2. Args: out_channels (int): out_channels of block. num_blocks (int): number of blocks. stride (int): stride of the first block. Default: 1 expand_ratio (int): Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6. Copyright (c) OpenMMLab. All rights reserved. Parameters to build layers. 4 parameters are needed to construct a layer, from left to right: expand_ratio, channel, num_blocks, stride.
2,387
en
0.646152
""" 多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的 """ import threading def do_sth(arg1, arg2, arg3): local_var1 = arg1 local_var2 = arg2 local_var3 = arg3 fun1(local_var1, local_var2, local_var3) fun2(local_var1, local_var2, local_var3) fun3(local_var1, local_var2, local_var3) def fun1(local_var1, local_var2, local_var3): print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1, local_var2, local_var3)) def fun2(local_var1, local_var2, local_var3): print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1, local_var2, local_var3)) def fun3(local_var1, local_var2, local_var3): print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1, local_var2, local_var3)) t1 = threading.Thread(target=do_sth, args=('a', 'b', 'c')) t2 = threading.Thread(target=do_sth, args=('d', 'e', 'f')) t1.start() t2.start()
17_process_thread/46_why_need_ThreadLocal.py
1,094
多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的
45
zh
0.998681
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright 2011-2019, Nigel Small # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from neotime import Date, Time, DateTime, Duration from py2neo.data import Node from py2neo.cypher import cypher_escape, cypher_repr from py2neo.cypher.encoding import LabelSetView, PropertyDictView, PropertySelector class LabelSetViewTestCase(TestCase): def test_can_create_empty_view(self): view = LabelSetView([]) self.assertEqual(repr(view), "") def test_can_create_single_label_view(self): view = LabelSetView(["A"]) self.assertEqual(repr(view), ":A") def test_can_create_double_label_view(self): view = LabelSetView(["A", "B"]) self.assertEqual(repr(view), ":A:B") def test_can_select_existing_in_view(self): view = LabelSetView(["A", "B"]).B self.assertEqual(repr(view), ":B") def test_can_select_non_existing_in_view(self): view = LabelSetView(["A", "B"]).C self.assertEqual(repr(view), "") def test_can_chain_select(self): view = LabelSetView(["A", "B", "C"]).B.C self.assertEqual(repr(view), ":B:C") def test_can_reselect_same(self): view = LabelSetView(["A", "B", "C"]).B.B.C self.assertEqual(repr(view), ":B:C") def test_length(self): view = LabelSetView(["A", "B", "C"]) self.assertEqual(len(view), 3) def test_iterable(self): view = LabelSetView(["A", "B", "C"]) self.assertSetEqual(set(view), {"A", "B", "C"}) def test_containment(self): view = LabelSetView(["A", "B", "C"]) self.assertIn("A", view) def test_non_containment(self): view = LabelSetView(["A", "B", "C"]) self.assertNotIn("D", view) class PropertyDictViewTestCase(TestCase): def test_can_create_empty_view(self): view = PropertyDictView({}) self.assertEqual(repr(view), "{}") def test_can_create_single_property_view(self): view = PropertyDictView({"A": 1}) self.assertEqual(repr(view), "{A: 1}") def test_can_create_double_property_view(self): view = PropertyDictView({"A": 1, "B": 2}) self.assertEqual(repr(view), "{A: 1, B: 2}") def test_can_select_existing_in_view(self): view = PropertyDictView({"A": 1, "B": 2}).B self.assertEqual(repr(view), "{B: 2}") def test_can_select_non_existing_in_view(self): view = PropertyDictView({"A": 1, "B": 2}).C self.assertEqual(repr(view), "{}") def test_can_chain_select(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.C self.assertEqual(repr(view), "{B: 2, C: 3}") def test_can_reselect_same(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.B.C self.assertEqual(repr(view), "{B: 2, C: 3}") def test_length(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}) self.assertEqual(len(view), 3) def test_iterable(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}) self.assertEqual(set(view), {"A", "B", "C"}) def test_containment(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}) self.assertIn("A", view) def test_non_containment(self): view = PropertyDictView({"A": 1, "B": 2, "C": 3}) self.assertNotIn("D", view) class PropertySelectorTestCase(TestCase): def test_simple(self): selector = PropertySelector({"A": 1, "B": 2, "C": 3}) self.assertEqual(selector.A, "1") def test_non_existent(self): selector = PropertySelector({"A": 1, "B": 2, "C": 3}) self.assertEqual(selector.D, "null") class NodeReprTestCase(TestCase): def test_empty(self): a = Node() r = cypher_repr(a) self.assertEqual("({})", r) def test_single_property(self): a = Node(name="Alice") r = cypher_repr(a) self.assertEqual("({name: 'Alice'})", r) def test_property_and_label(self): a = Node("Person", name="Alice") r = cypher_repr(a) self.assertEqual("(:Person {name: 'Alice'})", r) def test_date_property(self): a = Node(d=Date(1970, 1, 1)) r = cypher_repr(a) self.assertEqual("({d: date('1970-01-01')})", r) def test_time_property(self): a = Node(t=Time(12, 34, 56)) r = cypher_repr(a) self.assertEqual("({t: time('12:34:56.000000000')})", r) def test_datetime_property(self): a = Node(dt=DateTime(1970, 1, 1, 12, 34, 56)) r = cypher_repr(a) self.assertEqual("({dt: datetime('1970-01-01T12:34:56.000000000')})", r) def test_duration_property(self): a = Node(dur=Duration(days=3)) r = cypher_repr(a) self.assertEqual("({dur: duration('P3D')})", r) class CypherEscapeTestCase(TestCase): def test_empty_string(self): value = "" with self.assertRaises(ValueError): _ = cypher_escape(value) def test_simple_string(self): value = "foo" escaped = "foo" self.assertEqual(escaped, cypher_escape(value)) def test_string_with_space(self): value = "foo bar" escaped = "`foo bar`" self.assertEqual(escaped, cypher_escape(value)) def test_string_with_backtick(self): value = "foo `bar`" escaped = "`foo ``bar```" self.assertEqual(escaped, cypher_escape(value))
test/unit/test_cypher_encoding.py
5,974
!/usr/bin/env python -*- encoding: utf-8 -*- Copyright 2011-2019, Nigel Small Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
599
en
0.842704
# Written by David Weber # [email protected] """ In this short namespace I house a class that connects to PDB and downloads file over PDB file transfer protocol. """ # ------------------------------------------------------------------------------ import gzip from os import remove, getcwd, path # built in # my pymol API built on Python2 - try both imports try: from urllib.request import urlretrieve, urlcleanup except ImportError: from urllib import urlretrieve, urlcleanup ROOT = 'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{}/{}' class PDBFile: def __init__(self, code): """Initialize a PDBFile object with a pdb file of interest Parameters ---------- code : the pdb code if interest Any valid PDB code can be passed into PDBFile. Examples -------- >>> pdb_file = PDBFile('1rcy') """ self.code = code.lower() def fetch_from_PDB(self): """ Connects to PDB FTP server, downloads a .gz file of interest, decompresses the .gz file into .ent and then dumps a copy of the pdb{code}.ent file into cwd. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file) """ subdir = self.code[1:3] infile = 'pdb{}.ent.gz'.format(self.code) decompressed = infile.strip('.gz') fullpath = ROOT.format(subdir, infile) try: urlcleanup() urlretrieve(fullpath, infile) except Exception: return 'URLError' else: with gzip.open(infile, 'rb') as gz: with open(decompressed, 'wb') as out: out.writelines(gz) remove(infile) return path.join(getcwd(), decompressed) def clear(self): """ Deletes file from current working directory after the file has been processed by some algorithm. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file) # process the file using some algorithm >>> inst.clear() """ filename = 'pdb{}.ent'.format(self.code) try: remove(path.join(getcwd(), filename)) except FileNotFoundError: print('Cannot delete file. Does not exist.')
scalene-triangle/libs/PDB_filegetter.py
2,794
Initialize a PDBFile object with a pdb file of interest Parameters ---------- code : the pdb code if interest Any valid PDB code can be passed into PDBFile. Examples -------- >>> pdb_file = PDBFile('1rcy') Deletes file from current working directory after the file has been processed by some algorithm. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file) # process the file using some algorithm >>> inst.clear() Connects to PDB FTP server, downloads a .gz file of interest, decompresses the .gz file into .ent and then dumps a copy of the pdb{code}.ent file into cwd. Parameters ---------- None Examples -------- >>> inst = PDBFile('1rcy') >>> path_to_file = inst.fetch_from_PDB() >>> print(path_to_file) In this short namespace I house a class that connects to PDB and downloads file over PDB file transfer protocol. Written by David Weber [email protected] ------------------------------------------------------------------------------ built in my pymol API built on Python2 - try both imports
1,090
en
0.73877
# -*- coding: utf-8 -*- #Chucky_Bot import LINETCR from LINETCR.lib.curve.ttypes import * from datetime import datetime from bs4 import BeautifulSoup from threading import Thread from googletrans import Translator from gtts import gTTS import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile cl = LINETCR.LINE() #cl.login(qr=True) cl.login(token='EsOvPPzeFykCVG8OoGf0.hE4TS1Hheb46PcdMzZKaaa.rzBOrFqSAApZownyv2qBJWU3PWWbf9/oE6G+sSVzUTo=') cl.loginResult() print "Azmi 1-Login Success\n" ki = LINETCR.LINE() #ki.login(qr=True) ki.login(token='EsTdk3fyUSbT7LJVwoEd.rLylacrPH39WJb0UIwB8Nq.GYYzsgzj7aHd7mzCSluc3162Uqrry6Jjwf/bFuq9Etw=') ki.loginResult() print "Ki-Login Success\n" kk = LINETCR.LINE() #kk.login(qr=True) kk.login(token='EsNKJDaP0J7Pt7syTOW9.GgPTp3/FisKkVX1rJHeroq.hUG0VDbWHz8R7o80xI0Pvme8dBb3dSsmCnat0PRX+JM=') kk.loginResult() print "Kk-Login Success\n" #kc = LINETCR.LINE() #kc.login(qr=True) #kc.login(token='TOKEN_KAMU_DISINI_BEIB') #kc.loginResult() #print "Kc-Login Success\n" #kr = LINETCR.LINE() #kr.login(qr=True) #kr.login(token='TOKEN_KAMU_DISINI_BEIB') #kr.loginResult() #print "Kr-Login Success\n" #km = LINETCR.LINE() #km.login(qr=True) #km.login(token='TOKEN_KAMU_DISINI_BEIB') #km.loginResult() print "Km-Login Success\n\n=====[Sukses All Login]=====" reload(sys) sys.setdefaultencoding('utf-8') selfMessage =""" ╔═════════════════════════ ║ ☆☞ S E L F ☜☆ ╠═════════════════════════ ╠➩〘Hi〙 ╠➩〘Me〙 ╠➩〘Mymid〙 ╠➩〘Mid @〙 ╠➩〘SearchID: (ID LINE)〙 ╠➩〘Checkdate (DD/MM/YY)〙 ╠➩〘Kalender〙 ╠➩〘Steal contact〙 ╠➩〘Pp @〙 ╠➩〘Cover @〙 ╠➩〘Auto like〙 ╠➩〘Scbc Text〙 ╠➩〘Cbc Text〙 ╠➩〘Gbc Text〙 ╠➩〘Getbio @〙 ╠➩〘Getinfo @〙 ╠➩〘Getname @〙 ╠➩〘Getprofile @〙 ╠➩〘Getcontact @〙 ╠➩〘Getvid @〙 ╠➩〘Friendlist〙 ╠═════════════════════════ ║ ☆☞ S E L F ☜☆ ╚═════════════════════════ """ botMessage =""" ╔═════════════════════════ ║ ☆☞ B O T ☜☆ ╠═════════════════════════ ╠➩〘Absen〙 ╠➩〘Respon〙 ╠➩〘Runtime〙 ╠➩〘Kapten copy @〙 ╠➩〘TC1 copy @〙 ╠➩〘TC2 copy @〙 ╠➩〘TC3 copy @〙 ╠➩〘TC4 copy @〙 ╠➩〘Backup all〙 ╠➩〘/bio Text〙 ╠➩〘@bye (Usir Kapten)〙 ╠➩〘Bye all (Usir Semua)〙 ╠═════════════════════════ ║ ☆☞ B O T ☜☆ ╚═════════════════════════ """ mediaMessage =""" ╔═════════════════════════ ║ ☆☞ M E D I A ☜☆ ╠═════════════════════════ ╠➩〘Gift〙 ╠➩〘Gift1 @ s/d Gift10 @〙 ╠➩〘Giftbycontact〙 ╠➩〘All gift〙 ╠➩〘Gif gore〙 ╠➩〘Google: (Text)〙 ╠➩〘Playstore NamaApp〙 ╠➩〘Fancytext: Text〙 ╠➩〘/musik Judul-Penyanyi〙 ╠➩〘/lirik Judul-Penyanyi〙 ╠➩〘/musrik Judul-Penyanyi〙 ╠➩〘/ig UrsnameInstagram〙 ╠➩〘Checkig UrsnameInstagram〙 ╠➩〘/apakah Text (Kerang Ajaib)〙 ╠➩〘/kapan Text (Kerang Ajaib)〙 ╠➩〘/hari Text (Kerang Ajaib)〙 ╠➩〘/berapa Text (Kerang Ajaib)〙 ╠➩〘/berapakah Text〙 ╠➩〘Youtubelink: Judul Video〙 ╠➩〘Youtubevideo: Judul Video〙 ╠➩〘Youtubesearch: Judul Video〙 ╠➩〘Image NamaGambar〙 ╠➩〘Say-id Text〙 ╠➩〘Say-en Text〙 ╠➩〘Say-jp Text〙 ╠➩〘Image NamaGambar〙 ╠➩〘Tr-id Text (Translate En Ke ID〙 ╠➩〘Tr-en Text (Translate ID Ke En〙 ╠➩〘Tr-th Text (Translate ID Ke Th〙 ╠➩〘Id@en Text (Translate ID Ke En〙 ╠➩〘Id@th Text (Translate ID Ke TH〙 ╠➩〘En@id Text (Translate En Ke ID〙 ╠═════════════════════════ ║ ☆☞ M E D I A ☜☆ ╚═════════════════════════ """ groupMessage =""" ╔═════════════════════════ ║ ☆☞ G R O U P ☜☆ ╠═════════════════════════ ╠➩〘Welcome〙 ╠➩〘Say welcome〙 ╠➩〘Invite creator〙 ╠➩〘Setview〙 ╠➩〘Viewseen〙 ╠➩〘Gn: (NamaGroup)〙 ╠➩〘Tag all〙 ╠➩〘Recover〙 ╠➩〘Cancel〙 ╠➩〘Cancelall〙 ╠➩〘Gcreator〙 ╠➩〘Ginfo〙 ╠➩〘Gurl〙 ╠➩〘List group〙 ╠➩〘Pict group: (NamaGroup)〙 ╠➩〘Spam: (Text)〙 ╠➩〘Spam〙 ╠➩〘Add all〙 ╠➩〘Kick: (Mid)〙 ╠➩〘Invite: (Mid)〙 ╠➩〘Invite〙 ╠➩〘Memlist〙 ╠➩〘Getgroup image〙 ╠➩〘Urlgroup Image〙 ╠═════════════════════════ ║ ☆☞ G R O U P ☜☆ ╚═════════════════════════ """ tjia="u71b6799e1c37868a871d442e67633182" setMessage =""" ╔═════════════════════════ ║ ☆☞ S E T ☜☆ ╠═════════════════════════ ╠➩〘Sambutan on/off〙 ╠➩〘Url on/off〙 ╠➩〘Alwaysread on/off〙 ╠➩〘Sider on/off〙 ╠➩〘Contact on/off〙 ╠➩〘Simisimi on/off〙 ╠═════════════════════════ ║ ☆☞ S E T ☜☆ ╚═════════════════════════ """ creatorMessage =""" ╔═════════════════════════ ║ ☆☞ C R E A T O R ☜☆ ╠═════════════════════════ ╠➩〘Admin add @〙 ╠➩〘Admin remove @〙 ╠➩〘/cnkapten〙 ╠➩〘/cntc1〙 ╠➩〘/cntc2〙 ╠➩〘/cntc3〙 ╠➩〘/cntc4〙 ╠➩〘Crash〙 ╠➩〘Kickall〙 ╠➩〘Bc: (Text)〙 ╠➩〘Nk: @〙 ╠➩〘Ulti @〙 ╠➩〘Join group: (NamaGroup〙 ╠➩〘Leave group: (NamaGroup〙 ╠➩〘Leave all group〙 ╠➩〘Bot restart〙 ╠➩〘Turn off〙 ╠═════════════════════════ ║ ☆☞ C R E A T O R ☜☆ ╚═════════════════════════ """ adminMessage =""" ╔═════════════════════════ ║ ☆☞ A D M I N ☜☆ ╠═════════════════════════ ╠➩〘Admin list〙 ╠➩〘Ban〙 ╠➩〘Unban〙 ╠➩〘Ban @〙 ╠➩〘Unban @〙 ╠➩〘Ban list〙 ╠➩〘Clear ban〙 ╠➩〘Kill〙 ╠➩〘Kick @〙 ╠➩〘Set member: (Jumblah)〙 ╠➩〘Ban group: (NamaGroup〙 ╠➩〘Del ban: (NamaGroup〙 ╠➩〘List ban〙 ╠➩〘Kill ban〙 ╠➩〘Glist〙 ╠➩〘Glistmid〙 ╠➩〘Details group: (Gid)〙 ╠➩〘Cancel invite: (Gid)〙 ╠➩〘Invitemeto: (Gid)〙 ╠➩〘Kapten acc invite〙 ╠➩〘TC1 acc invite〙 ╠➩〘TC2 acc invite〙 ╠➩〘TC3 acc invite〙 ╠➩〘TC4 acc invite〙 ╠➩〘Removechat〙 ╠➩〘Join on/off〙 ╠➩〘Joincancel on/off〙 ╠➩〘Respon on/off〙 ╠➩〘Responkick on/off〙 ╠➩〘Leave on/off〙 ╠➩〘All join / (TC1/2/3/4 Join)〙 ╠═════════════════════════ ║ ☆☞ A D M I N ☜☆ ╚═════════════════════════ """ helpMessage =""" ╔═════════════════════════ ║ ☆☞ H E L P ☜☆ ╠═════════════════════════ ╠➩〘Help protect〙 ╠➩〘Help self〙 ╠➩〘Help bot〙 ╠➩〘Help group〙 ╠➩〘Help set〙 ╠➩〘Help media〙 ╠➩〘Help admin〙 ╠➩〘Help creator〙 ╠➩〘Owner〙 ╠➩〘Pap owner〙 ╠➩〘Admin〙 ╠➩〘Speed〙 ╠➩〘Speed test〙 ╠➩〘Status〙 ╠═════════════════════════ ║ ☆☞ H E L P ☜☆ ╚═════════════════════════ """ protectMessage =""" ╔═════════════════════════ ║ ☆☞ P R O T E C T ☜☆ ╠═════════════════════════ ╠➩〘Allprotect on/off〙 ╠➩〘Autocancel on/off〙 ╠➩〘Qr on/off〙 ╠➩〘Autokick on/off〙 ╠➩〘Ghost on/off〙 ╠➩〘Invitepro on/off〙 ╠═════════════════════════ ║ ☆☞ P R O T E C T ☜☆ ╚═════════════════════════ """ KAC=[cl,ki,kk] mid = cl.getProfile().mid Amid = ki.getProfile().mid Bmid = kk.getProfile().mid Bots=[mid,Amid,Bmid] Creator=["u71b6799e1c37868a871d442e67633182"] admin=["u71b6799e1c37868a871d442e67633182"] contact = cl.getProfile() backup1 = cl.getProfile() backup1.displayName = contact.displayName backup1.statusMessage = contact.statusMessage backup1.pictureStatus = contact.pictureStatus contact = ki.getProfile() backup2 = ki.getProfile() backup2.displayName = contact.displayName backup2.statusMessage = contact.statusMessage backup2.pictureStatus = contact.pictureStatus contact = kk.getProfile() backup3 = kk.getProfile() backup3.displayName = contact.displayName backup3.statusMessage = contact.statusMessage backup3.pictureStatus = contact.pictureStatus responsename = cl.getProfile().displayName responsename2 = ki.getProfile().displayName responsename3 = kk.getProfile().displayName wait = { "LeaveRoom":True, "AutoJoin":False, "AutoJoinCancel":True, "memberscancel":0, "Members":1, "AutoCancel":{}, "AutoCancelon":False, "joinkick":False, "AutoKick":{}, "AutoKickon":False, 'pap':{}, 'invite':{}, 'steal':{}, 'gift':{}, 'likeOn':{}, 'Leave':{}, 'detectMention':True, 'kickMention':False, 'timeline':True, "Timeline":True, "comment1":"Kenapa Kak?", "comment2":"Wkwkwk \(○^ω^○)/", "comment3":"Lucu Banget!!! ヘ(^_^)ヘ", "comment4":"Nice Kak (^_^)", "comment5":"Bot Auto Like ©By : Azmi\nContact Me : 👉 line.me/ti/p/~a_ulul15", "commentOn":True, "commentBlack":{}, "message":"Thx For Add Me (^_^)\nInvite Me To Your Group ヘ(^_^)ヘ", "blacklist":{}, "wblacklist":False, "dblacklist":False, "Qr":{}, "Qron":False, "Contact":False, "Sambutan":True, "Ghost":False, "inviteprotect":False, "alwaysRead":False, "Sider":{}, "Simi":{}, "lang":"JP", "BlGroup":{} } settings = { "simiSimi":{} } cctv = { "cyduk":{}, "point":{}, "sidermem":{} } wait2 = { "readPoint":{}, "readMember":{}, "setTime":{}, "ROM":{} } setTime = {} setTime = wait2['setTime'] mulai = time.time() def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: import urllib,request try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" def _images_get_next_item(s): start_line = s.find('rg_di') if start_line == -1: end_quote = 0 link = "no_links" return link, end_quote else: start_line = s.find('"class="rg_meta"') start_content = s.find('"ou"',start_line+90) end_content = s.find(',"ow"',start_content-90) content_raw = str(s[start_content+6:end_content-1]) return content_raw, end_content def _images_get_all_items(page): items = [] while True: item, end_content = _images_get_next_item(page) if item == "no_links": break else: items.append(item) time.sleep(0.1) page = page[end_content:] return items def waktu(secs): mins, secs = divmod(secs,60) hours, mins = divmod(mins,60) return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs) def cms(string, commands):# /XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX... tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"] for texX in tex: for command in commands: if string ==command: return True return False def upload_tempimage(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' config = { 'album': album, 'name': 'bot auto upload', 'title': 'bot auto upload', 'description': 'bot auto upload' } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image def sendAudio(self, to_, path): M = Message() M.text = None M.to = to_ M.contentMetadata = None M.contentPreview = None M.contentType = 3 M_id = self._client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def sendImage(self, to_, path): M = Message(to=to_, text=None, contentType = 1) M.contentMetadata = None M.contentPreview = None M2 = self._client.sendMessage(0,M) M_id = M2.id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'image', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload image failure.') return True def sendImageWithURL(self, to_, url): path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download image failure.') try: self.sendImage(to_, path) except: try: self.sendImage(to_, path) except Exception as e: raise e def sendAudio(self, to_, path): M = Message() M.text = None M.to = to_ M.contentMetadata = None M.contentPreview = None M.contentType = 3 M_id = self._client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload audio failure.') return True def sendAudioWithURL(self, to_, url): path = self.downloadFileWithURL(url) try: self.sendAudio(to_, path) except Exception as e: raise Exception(e) def sendAudioWithUrl(self, to_, url): path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9)) r = requests.get(url, stream=True, verify=False) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download audio failure.') try: self.sendAudio(to_, path) except Exception as e: raise e def downloadFileWithURL(self, fileUrl): saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9)) r = self.get_content(fileUrl) if r.status_code == 200: with open(saveAs, 'wb') as f: shutil.copyfileobj(r.raw, f) return saveAs else: raise Exception('Download file failure.') def restart_program(): python = sys.executable os.execl(python, python, * sys.argv) def bot(op): try: if op.type == 0: return if op.type == 5: if wait["autoAdd"] == True: cl.findAndAddContactsByMid(op.param1) if(wait["message"]in[""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 55: try: group_id = op.param1 user_id=op.param2 subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, ) except Exception as e: print e if op.type == 55: try: if cctv['cyduk'][op.param1]==True: if op.param1 in cctv['point']: Name = cl.getContact(op.param2).displayName Name = ki.getContact(op.param2).displayName Name = kk.getContact(op.param2).displayName Name = kc.getContact(op.param2).displayName Name = kr.getContact(op.param2).displayName if Name in cctv['sidermem'][op.param1]: pass else: cctv['sidermem'][op.param1] += "\n• " + Name if " " in Name: nick = Name.split(' ') if len(nick) == 2: random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + nick[0] + " ☜" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ") else: random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + nick[1] + " ☜" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ") else: random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ") else: pass else: pass except: pass else: pass if op.type == 22: cl.leaveRoom(op.param1) if op.type == 21: cl.leaveRoom(op.param1) if op.type == 13: print op.param3 if op.param3 in mid: if op.param2 in Creator: cl.acceptGroupInvitation(op.param1) if op.param3 in Amid: if op.param2 in Creator: ki.acceptGroupInvitation(op.param1) if op.param3 in Bmid: if op.param2 in Creator: kk.acceptGroupInvitation(op.param1) if op.param3 in Cmid: if op.param2 in Creator: kc.acceptGroupInvitation(op.param1) if op.param3 in Dmid: if op.param2 in Creator: kr.acceptGroupInvitation(op.param1) if op.param3 in mid: if op.param2 in Amid: cl.acceptGroupInvitation(op.param1) if op.param3 in mid: if op.param2 in Bmid: cl.acceptGroupInvitation(op.param1) if op.param3 in mid: if op.param2 in Cmid: cl.acceptGroupInvitation(op.param1) if op.param3 in Amid: if op.param2 in mid: ki.acceptGroupInvitation(op.param1) if op.param3 in Amid: if op.param2 in Bmid: ki.acceptGroupInvitation(op.param1) if op.param3 in Amid: if op.param2 in Cmid: ki.acceptGroupInvitation(op.param1) if op.param3 in Bmid: if op.param2 in mid: kk.acceptGroupInvitation(op.param1) if op.param3 in Bmid: if op.param2 in Amid: kk.acceptGroupInvitation(op.param1) if op.param3 in Bmid: if op.param2 in Cmid: kk.acceptGroupInvitation(op.param1) if op.param3 in Cmid: if op.param2 in mid: kc.acceptGroupInvitation(op.param1) if op.param3 in Cmid: if op.param2 in Amid: kc.acceptGroupInvitation(op.param1) if op.param3 in Cmid: if op.param2 in Cmid: kc.acceptGroupInvitation(op.param1) if op.param3 in Dmid: if op.param2 in mid: kr.acceptGroupInvitation(op.param1) if op.param3 in Dmid: if op.param2 in Amid: kr.acceptGroupInvitation(op.param1) if op.param3 in Dmid: if op.param2 in Bmid: kr.acceptGroupInvitation(op.param1) if mid in op.param3: if wait["AutoJoinCancel"] == True: G = cl.getGroup(op.param1) if len(G.members) <= wait["memberscancel"]: cl.acceptGroupInvitation(op.param1) cl.sendText(op.param1,"Maaf " + cl.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!") c = Message(to=op.param1, from_=None, text=None, contentType=13) c.contentMetadata={'mid':tjia} cl.sendMessage(c) cl.leaveGroup(op.param1) else: cl.acceptGroupInvitation(op.param1) G = cl.getGroup(op.param1) G.preventJoinByTicket = False cl.updateGroup(G) Ti = cl.reissueGroupTicket(op.param1) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G.preventJoinByTicket = True cl.updateGroup(G) cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆") if mid in op.param3: if wait["AutoJoin"] == True: G = cl.getGroup(op.param1) if len(G.members) <= wait["Members"]: cl.rejectGroupInvitation(op.param1) else: cl.acceptGroupInvitation(op.param1) G = cl.getGroup(op.param1) G.preventJoinByTicket = False cl.updateGroup(G) Ti = cl.reissueGroupTicket(op.param1) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G.preventJoinByTicket = True cl.updateGroup(G) cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆") else: if wait["AutoCancel"][op.param1] == True: if op.param3 in admin: pass else: cl.cancelGroupInvitation(op.param1, [op.param3]) else: if op.param3 in wait["blacklist"]: cl.cancelGroupInvitation(op.param1, [op.param3]) cl.sendText(op.param1, "Blacklist Detected") else: pass if op.type == 19: if wait["AutoKick"][op.param1] == True: try: if op.param3 in Creator: if op.param3 in admin: if op.param3 in Bots: pass if op.param2 in Creator: if op.param2 in admin: if op.param2 in Bots: pass else: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) if op.param2 in wait["blacklist"]: pass else: random.choice(KAC).inviteIntoGroup(op.param1,[op.param3]) except: try: if op.param2 not in Creator: if op.param2 not in admin: if op.param2 not in Bots: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) if op.param2 in wait["blacklist"]: pass else: random.choice(KAC).inviteIntoGroup(op.param1,[op.param3]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Creator: if op.param2 in admin: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if op.param2 in wait["blacklist"]: pass else: if op.param2 in Creator: if op.param2 in admin: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True else: pass if mid in op.param3: if op.param2 in Creator: if op.param2 in Bots: pass try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True G = ki.getGroup(op.param1) G.preventJoinByTicket = False ki.updateGroup(G) Ti = ki.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) X = cl.getGroup(op.param1) X.preventJoinByTicket = True cl.updateGroup(X) if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if Amid in op.param3: if op.param2 in Bots: pass try: kk.kickoutFromGroup(op.param1,[op.param2]) kc.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True X = kk.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = kk.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G = ki.getGroup(op.param1) G.preventJoinByTicket = True ki.updateGroup(G) if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if Bmid in op.param3: if op.param2 in Bots: pass try: kc.kickoutFromGroup(op.param1,[op.param2]) kk.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True X = kc.getGroup(op.param1) X.preventJoinByTicket = False kc.updateGroup(X) Ti = kc.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G = kk.getGroup(op.param1) G.preventJoinByTicket = True kk.updateGroup(G) if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if Cmid in op.param3: if op.param2 in Bots: pass try: cl.kickoutFromGroup(op.param1,[op.param2]) kk.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True X = cl.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G = kc.getGroup(op.param1) G.preventJoinByTicket = True kc.updateGroup(G) if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if Dmid in op.param3: if op.param2 in Bots: pass try: cl.kickoutFromGroup(op.param1,[op.param2]) kk.kickoutFromGroup(op.param1,[op.param2]) except: try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True X = cl.getGroup(op.param1) X.preventJoinByTicket = False cl.updateGroup(X) Ti = cl.reissueGroupTicket(op.param1) cl.acceptGroupInvitationByTicket(op.param1,Ti) ki.acceptGroupInvitationByTicket(op.param1,Ti) kk.acceptGroupInvitationByTicket(op.param1,Ti) kc.acceptGroupInvitationByTicket(op.param1,Ti) kr.acceptGroupInvitationByTicket(op.param1,Ti) G = kc.getGroup(op.param1) G.preventJoinByTicket = True kc.updateGroup(G) if op.param2 in wait["blacklist"]: pass else: if op.param2 in Bots: pass else: wait["blacklist"][op.param2] = True if Creator in op.param3: if admin in op.param3: if op.param2 in Bots: pass try: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) except: try: if op.param2 not in Bots: random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) if op.param2 in wait["blacklist"]: pass else: random.choice(KAC).inviteIntoGroup(op.param1,[op.param3]) except: print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]") if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True random.choice(KAC).inviteIntoGroup(op.param1,[op.param3]) if op.param2 in wait["blacklist"]: pass if op.param2 in wait["whitelist"]: pass else: wait["blacklist"][op.param2] = True if op.type == 11: if wait["Qr"][op.param1] == True: if op.param2 not in Bots: if op.param2 not in admin: G = random.choice(KAC).getGroup(op.param1) G.preventJoinByTicket = True random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) random.choice(KAC).updateGroup(G) if op.type == 17: if wait["Sambutan"] == True: if op.param2 in admin: return ginfo = cl.getGroup(op.param1) contact = cl.getContact(op.param2) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus c = Message(to=op.param1, from_=None, text=None, contentType=13) c.contentMetadata={'mid':op.param2} cl.sendMessage(c) cl.sendText(op.param1,"Hallo " + cl.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^") cl.sendImageWithURL(op.param1,image) print "MEMBER JOIN TO GROUP" if op.type == 17: if wait["joinkick"] == True: if op.param2 in admin: if op.param2 in Bots: return random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) print "MEMBER JOIN KICK TO GROUP" if op.type == 15: if wait["Sambutan"] == True: if op.param2 in admin: return cl.sendText(op.param1,"Good Bye " + cl.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗") random.choice(KAC).inviteIntoGroup(op.param1,[op.param2]) print "MEMBER HAS LEFT THE GROUP" if op.type == 13: if op.param2 not in Creator: if op.param2 not in admin: if op.param2 not in Bots: if op.param2 in Creator: if op.param2 in admin: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) random.choice(KAC).kickoutFromGroup(op.param1,[op.param2]) if op.type == 19: if wait["Ghost"] == True: if op.param2 in admin: if op.param2 in Bots: pass else: try: G = cl.getGroup(op.param1) G.preventJoinByTicket = False cl.updateGroup(G) Ticket = cl.reissueGroupTicket(op.param1) km.acceptGroupInvitationByTicket(op.param1,Ticket) time.sleep(0.01) km.kickoutFromGroup(op.param1,[op.param2]) c = Message(to=op.param1, from_=None, text=None, contentType=13) c.contentMetadata={'mid':op.param2} km.sendMessage(c) km.leaveGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) wait["blacklist"][op.param2] = True except: G = cl.getGroup(op.param1) G.preventJoinByTicket = False cl.updateGroup(G) Ticket = cl.reissueGroupTicket(op.param1) km.acceptGroupInvitationByTicket(op.param1,Ticket) time.sleep(0.01) km.kickoutFromGroup(op.param1,[op.param2]) c = Message(to=op.param1, from_=None, text=None, contentType=13) c.contentMetadata={'mid':op.param2} km.sendMessage(c) km.leaveGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) wait["blacklist"][op.param2] = True if op.type == 26: msg = op.message if wait["alwaysRead"] == True: if msg.toType == 0: cl.sendChatChecked(msg.from_,msg.id) else: cl.sendChatChecked(msg.to,msg.id) if msg.contentType == 16: if wait['likeOn'] == True: url = msg.contentMetadata["postEndUrl"] cl.like(url[25:58], url[66:], likeType=1005) ki.like(url[25:58], url[66:], likeType=1002) kk.like(url[25:58], url[66:], likeType=1004) kc.like(url[25:58], url[66:], likeType=1003) kr.like(url[25:58], url[66:], likeType=1001) cl.comment(url[25:58], url[66:], wait["comment1"]) ki.comment(url[25:58], url[66:], wait["comment2"]) kk.comment(url[25:58], url[66:], wait["comment3"]) kc.comment(url[25:58], url[66:], wait["comment4"]) kr.comment(url[25:58], url[66:], wait["comment5"]) cl.sendText(msg.to,"Like Success") wait['likeOn'] = False if op.type == 26: msg = op.message if msg.to in settings["simiSimi"]: if settings["simiSimi"][msg.to] == True: if msg.text is not None: text = msg.text r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt") data = r.text data = json.loads(data) if data['status'] == 200: if data['result']['result'] == 100: cl.sendText(msg.to,data['result']['response'].encode('utf-8')) if 'MENTION' in msg.contentMetadata.keys() != None: if wait["kickMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"] ret_ = random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in admin: cl.sendText(msg.to,ret_) random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_]) break if mention['M'] in Bots: cl.sendText(msg.to,ret_) random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_]) break if 'MENTION' in msg.contentMetadata.keys() != None: if wait["detectMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["Sekali lagi nge tag gw sumpahin jomblo seumur hidup!","Dont Tag!! Lagi Sibuk",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","Tag Mulu Lo Anjirr!","Dia Lagi Off", cName + " Kenapa Tag? Kangen?","Dia Lagi Tidur\nJangan Di Tag " + cName, "Jangan Suka Tag Gua " + cName, "Kamu Siapa " + cName + "?", "Ada Perlu Apa " + cName + "?","Woii " + cName + " Jangan Ngetag, Riibut!"] ret_ = random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in admin: cl.sendText(msg.to,ret_) break if mention['M'] in Bots: cl.sendText(msg.to,ret_) break if msg.contentType == 13: if wait["wblacklist"] == True: if msg.contentMetadata["mid"] not in admin: if msg.contentMetadata["mid"] in wait["blacklist"]: random.choice(KAC).sendText(msg.to,"Sudah") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False random.choice(KAC).sendText(msg.to,"Ditambahkan") else: cl.sendText(msg.to,"Admin Detected~") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] random.choice(KAC).sendText(msg.to,"Terhapus") wait["dblacklist"] = False else: wait["dblacklist"] = False random.choice(KAC).sendText(msg.to,"Tidak Ada Black List") elif wait["Contact"] == True: msg.contentType = 0 cl.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu)) else: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu)) elif msg.text == "Ginfo": if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: gCreator = ginfo.creator.displayName except: gCreator = "Error" if wait["lang"] == "JP": if ginfo.invitee is None: sinvitee = "0" else: sinvitee = str(len(ginfo.invitee)) if ginfo.preventJoinByTicket == True: u = "close" else: u = "open" cl.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside") else: cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can not be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text is None: return elif msg.text in ["Creator","Owner"]: msg.contentType = 13 msg.contentMetadata = {'mid': tjia} cl.sendMessage(msg) cl.sendText(msg.to,"Itu tukang tikungnya(^_^)") elif msg.text in ["Admin","admin"]: msg.contentType = 13 admin1 = "u71b6799e1c37868a871d442e67633182" admin2 = "u46560b002469877f708c1d2e8966fc9d" admin3 = "u1dee2db35847101e3aa420e667390000" msg.contentMetadata = {'mid': tjia} random.choice(KAC).sendMessage(msg) msg.contentMetadata = {'mid': admin1} random.choice(KAC).sendMessage(msg) msg.contentMetadata = {'mid': admin2} random.choice(KAC).sendMessage(msg) msg.contentMetadata = {'mid': admin3} random.choice(KAC).sendMessage(msg) random.choice(KAC).sendText(msg.to,"Itu Admin Kami (^_^)") elif "Admin add @" in msg.text: if msg.from_ in Creator: print "[Command]Admin add executing" _name = msg.text.replace("Admin add @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan") else: for target in targets: try: admin.append(target) cl.sendText(msg.to,"Admin Chucky Ditambahkan") except: pass print "[Command]Admin add executed" else: cl.sendText(msg.to,"Command Denied.") cl.sendText(msg.to,"Creator Permission Required.") elif "Admin remove @" in msg.text: if msg.from_ in Creator: print "[Command]Admin Remove Executing" _name = msg.text.replace("Admin remove @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan") else: for target in targets: try: admin.remove(target) cl.sendText(msg.to,"Admin Chucky Dihapus") except: pass print "[Command]Admin remove executed" else: cl.sendText(msg.to,"Command Denied.") cl.sendText(msg.to,"Creator Permission Required.") elif msg.text in ["Admin list","admin list","List admin"]: if admin == []: cl.sendText(msg.to,"The Admin List Is Empty") else: cl.sendText(msg.to,"Tunggu...") mc = "╔═════════════════════════\n║ ☆☞ ADMIN CHUCKY ☜☆\n╠═════════════════════════\n" for mi_d in admin: mc += "╠••> " +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc + "╚═════════════════════════") print "[Command]Admin List executed" elif msg.text in ["Group creator","Gcreator","gcreator"]: ginfo = cl.getGroup(msg.to) gCreator = ginfo.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': gCreator} cl.sendMessage(msg) cl.sendText(msg.to,"Itu Yang Buat Grup Ini") elif msg.contentType == 16: if wait["Timeline"] == True: msg.contentType = 0 msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"] random.choice(KAC).sendText(msg.to,msg.text) if msg.contentType == 13: if wait["steal"] == True: _name = msg.contentMetadata["displayName"] copy = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: print "[Target] Stealed" break else: targets.append(copy) if targets == []: pass else: for target in targets: try: cl.findAndAddContactsByMid(target) contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) wait["steal"] = False break except: pass if msg.contentType == 13: if wait["gift"] == True: _name = msg.contentMetadata["displayName"] copy = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: print "[Target] Gift" break else: targets.append(copy) if targets == []: pass else: for target in targets: try: cl.sendText(msg.to,"Gift Sudah Terkirim!") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '1', 'STKPKGID': '1296261'} msg.to = target msg.text = None cl.sendMessage(msg) wait['gift'] = False break except: msg.contentMetadata = {'mid': target} wait["gift"] = False break if msg.contentType == 13: if wait['invite'] == True: _name = msg.contentMetadata["displayName"] invite = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) groups = ki.getGroup(msg.to) groups = kk.getGroup(msg.to) groups = kc.getGroup(msg.to) groups = kr.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: random.choice(KAC).sendText(msg.to, _name + " Berada DiGrup Ini") else: targets.append(invite) if targets == []: pass else: for target in targets: try: cl.findAndAddContactsByMid(target) ki.findAndAddContactsByMid(target) kk.findAndAddContactsByMid(target) kc.findAndAddContactsByMid(target) kr.findAndAddContactsByMid(target) random.choice(KAC).inviteIntoGroup(msg.to,[target]) random.choice(KAC).sendText(msg.to,"Invite " + _name) wait['invite'] = False break except: random.choice(KAC).sendText(msg.to,"Limit Invite") wait['invite'] = False break elif msg.text in ["Key creator","help creator","Help creator"]: cl.sendText(msg.to,creatorMessage) elif msg.text in ["Key group","help group","Help group"]: cl.sendText(msg.to,groupMessage) elif msg.text in ["Key","help","Help"]: cl.sendText(msg.to,helpMessage) elif msg.text in ["Key self","help self","Help self"]: cl.sendText(msg.to,selfMessage) elif msg.text in ["Key bot","help bot","Help bot"]: cl.sendText(msg.to,botMessage) elif msg.text in ["Key set","help set","Help set"]: cl.sendText(msg.to,setMessage) elif msg.text in ["Key media","help media","Help media"]: cl.sendText(msg.to,mediaMessage) elif msg.text in ["Key admin","help admin","Help admin"]: cl.sendText(msg.to,adminMessage) elif msg.text in ["Key protect","help protect","Help protect"]: cl.sendText(msg.to,protectMessage) elif msg.text in ["List group"]: gid = cl.getGroupIdsJoined() h = "" jml = 0 for i in gid: gn = cl.getGroup(i).name h += "♦【%s】\n" % (gn) jml += 1 cl.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml)) elif "Ban group: " in msg.text: grp = msg.text.replace("Ban group: ","") gid = cl.getGroupIdsJoined() if msg.from_ in admin: for i in gid: h = cl.getGroup(i).name if h == grp: wait["BlGroup"][i]=True cl.sendText(msg.to, "Success Ban Group : "+grp) else: pass else: cl.sendText(msg.to, "Only Admin") elif msg.text in ["List ban","List ban group"]: if msg.from_ in admin: if wait["BlGroup"] == {}: random.choice(KAC).sendText(msg.to,"Tidak Ada") else: mc = "" for gid in wait["BlGroup"]: mc += "-> " +cl.getGroup(gid).name + "\n" random.choice(KAC).sendText(msg.to,"===[Ban Group]===\n"+mc) else: cl.sendText(msg.to, "Khusus Admin") elif msg.text in ["Del ban: "]: if msg.from_ in admin: ng = msg.text.replace("Del ban: ","") for gid in wait["BlGroup"]: if cl.getGroup(gid).name == ng: del wait["BlGroup"][gid] cl.sendText(msg.to, "Success del ban "+ng) else: pass else: cl.sendText(msg.to, "Only Admin") elif "Join group: " in msg.text: ng = msg.text.replace("Join group: ","") gid = cl.getGroupIdsJoined() gid = ki.getGroupIdsJoined() gid = kk.getGroupIdsJoined() gid = kc.getGroupIdsJoined() gid = kr.getGroupIdsJoined() try: if msg.from_ in Creator: for i in gid: h = cl.getGroup(i).name h = ki.getGroup(i).name h = kk.getGroup(i).name h = kc.getGroup(i).name h = kr.getGroup(i).name if h == ng: random.choice(KAC).inviteIntoGroup(i,[Creator]) cl.sendText(msg.to,"Success Join To ["+ h +"] Group") else: pass else: cl.sendText(msg.to,"Only Admin") except Exception as e: cl.sendText(msg.to, str(e)) elif "Leave group: " in msg.text: ng = msg.text.replace("Leave group: ","") gid = cl.getGroupIdsJoined() if msg.from_ in Creator: for i in gid: h = cl.getGroup(i).name if h == ng: cl.sendText(i,"Bot Di Paksa Keluar Oleh Owner!") cl.leaveGroup(i) ki.leaveGroup(i) kk.leaveGroup(i) kc.leaveGroup(i) kr.leaveGroup(i) cl.sendText(msg.to,"Success Left ["+ h +"] group") else: pass else: cl.sendText(msg.to,"Only Admin") elif "Leave all group" == msg.text: gid = cl.getGroupIdsJoined() if msg.from_ in Creator: for i in gid: cl.sendText(i,"Bot Di Paksa Keluar Oleh Owner!") cl.leaveGroup(i) ki.leaveGroup(i) kk.leaveGroup(i) kc.leaveGroup(i) kr.leaveGroup(i) cl.sendText(msg.to,"Success Leave All Group") else: cl.sendText(msg.to,"Only Admin") elif "Pict group: " in msg.text: saya = msg.text.replace('Pict group: ','') gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).name gna = cl.getGroup(i) if h == saya: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif msg.text in ["cancelall","Cancelall"]: if msg.toType == 2: X = cl.getGroup(msg.to) if X.invitee is not None: gInviMids = [contact.mid for contact in X.invitee] cl.cancelGroupInvitation(msg.to, gInviMids) else: cl.sendText(msg.to,"Tidak Ada Yang Pending") else: cl.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group") elif msg.text in ["Ourl","Url on"]: if msg.toType == 2: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) cl.sendText(msg.to,"Url Sudah Aktif") else: cl.sendText(msg.to,"Can not be used outside the group") elif msg.text in ["Curl","Url off"]: if msg.toType == 2: X = cl.getGroup(msg.to) X.preventJoinByTicket = True cl.updateGroup(X) cl.sendText(msg.to,"Url Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Can not be used outside the group") elif msg.text in ["Join on","Autojoin on"]: if msg.from_ in admin: wait["AutoJoin"] = True wait["AutoJoinCancel"] = False cl.sendText(msg.to,"Auto Join Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Join off","Autojoin off"]: if msg.from_ in admin: wait["AutoJoin"] = False cl.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Joincancel on","Autojoincancel on"]: if msg.from_ in admin: wait["AutoJoinCancel"] = True wait["AutoJoin"] = False cl.sendText(msg.to,"Auto Join Cancel Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Joincancel off","Autojoincancel off"]: if msg.from_ in admin: wait["AutoJoinCancel"] = False cl.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Respon on"]: if msg.from_ in admin: wait["detectMention"] = True wait["kickMention"] = False cl.sendText(msg.to,"Auto Respon Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Respon off"]: if msg.from_ in admin: wait["detectMention"] = False cl.sendText(msg.to,"Auto Respon Sudah Off") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Responkick on"]: if msg.from_ in admin: wait["kickMention"] = True wait["detectMention"] = False cl.sendText(msg.to,"Auto Respon Kick Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Responkick off"]: if msg.from_ in admin: wait["kickMention"] = False cl.sendText(msg.to,"Auto Respon Kick Sudah Off") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Leave on"]: if msg.from_ in admin: wait["Leave"] = True cl.sendText(msg.to,"Leave Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Autocancel on"]: if msg.from_ in admin: wait["AutoCancel"][msg.to] = True wait["AutoCancelon"] = True cl.sendText(msg.to,"Auto Cancel Sudah Aktif") print wait["AutoCancel"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Autocancel off"]: if msg.from_ in admin: wait["AutoCancel"][msg.to] = False wait["AutoCancelon"] = False cl.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan") print wait["AutoCancel"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Joinkick on"]: if msg.from_ in admin: wait["joinkick"] = True wait["Sambutan"] = False cl.sendText(msg.to,"Join Kick Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Joinkick off"]: if msg.from_ in admin: wait["joinkick"] = False cl.sendText(msg.to,"Join Kick Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Invitepro on","Inviteprotect on"]: if msg.from_ in admin: wait["inviteprotect"] = True cl.sendText(msg.to,"Invite Protect Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Invitepro off","Inviteprotect off"]: if msg.from_ in admin: wait["inviteprotect"] = False cl.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Only Admin") elif "Qr on" in msg.text: if msg.from_ in admin: wait["Qr"][msg.to] = True wait["Qron"] = True cl.sendText(msg.to,"QR Protect Sudah Aktif") print wait["Qr"] else: cl.sendText(msg.to,"Only Admin") elif "Qr off" in msg.text: if msg.from_ in admin: wait["Qr"][msg.to] = False wait["Qron"] = False cl.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan") print wait["Qr"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Autokick on"]: if msg.from_ in admin: wait["AutoKick"][msg.to] = True wait["AutoKickon"] = True cl.sendText(msg.to,"Auto Kick Sudah Aktif") print wait["AutoKick"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Autokick off"]: if msg.from_ in admin: wait["AutoKick"][msg.to] = False wait["AutoKickon"] = False cl.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan") print wait["AutoKick"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Ghost on"]: if msg.from_ in admin: wait["Ghost"] = True cl.sendText(msg.to,"Ghost Sudah Aktif") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Ghost off"]: if msg.from_ in admin: wait["Ghost"] = False cl.sendText(msg.to,"Ghost Sudah Di Nonaktifkan") else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Allprotect on"]: if msg.from_ in admin: wait["AutoCancel"][msg.to] = True wait["AutoCancelon"] = True wait["inviteprotect"] = True wait["joinkick"] = True wait["AutoKick"][msg.to] = True wait["AutoKickon"] = True wait["Qr"][msg.to] = True wait["Qron"] = True wait["Ghost"] = True cl.sendText(msg.to,"All Protect Sudah Aktif Semua") print wait["AutoCancel"] print wait["AutoKick"] print wait["Qr"] else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["Allprotect off"]: if msg.from_ in admin: wait["AutoCancel"][msg.to] = False wait["AutoCancelon"] = False wait["inviteprotect"] = False wait["joinkick"] = False wait["AutoKick"][msg.to] = False wait["AutoKickon"] = False wait["Qr"][msg.to] = False wait["Qron"] = False wait["Ghost"] = False cl.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua") print wait["AutoCancel"] print wait["AutoKick"] print wait["Qr"] else: #else: cl.sendText(msg.to,"Only Admin") elif msg.text in ["K on","Contact on"]: wait["Contact"] = True cl.sendText(msg.to,"Contact Sudah Aktif") elif msg.text in ["K off","Contact off"]: wait["Contact"] = False cl.sendText(msg.to,"Contact Sudah Di Nonaktifkan") elif msg.text in ["Alwaysread on"]: wait["alwaysRead"] = True cl.sendText(msg.to,"Always Read Sudah Aktif") elif msg.text in ["Alwaysread off"]: wait["alwaysRead"] = False cl.sendText(msg.to,"Always Read Sudah Di Nonaktifkan") elif msg.text in ["Sambutan on"]: if wait["Sambutan"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ") else: wait["Sambutan"] = True wait["joinkick"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Sudah Onヽ(´▽`)/") elif msg.text in ["Sambutan off"]: if wait["Sambutan"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)") else: wait["Sambutan"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Sudah Off(p′︵‵。)") elif "Sider on" in msg.text: try: del cctv['point'][msg.to] del cctv['sidermem'][msg.to] del cctv['cyduk'][msg.to] except: pass cctv['point'][msg.to] = msg.id cctv['sidermem'][msg.to] = "" cctv['cyduk'][msg.to]=True wait["Sider"] = True cl.sendText(msg.to,"Siap On Cek Sider") elif "Sider off" in msg.text: if msg.to in cctv['point']: cctv['cyduk'][msg.to]=False wait["Sider"] = False cl.sendText(msg.to, "Cek Sider Off") else: cl.sendText(msg.to, "Heh Belom Di Set") elif msg.text in ["Status"]: md = "" if wait["Sambutan"] == True: md+="╠➩✔️ Sambutan : On\n" else:md+="╠➩❌ Sambutan : Off\n" if wait["joinkick"] == True: md+="╠➩✔️ Join Kick : On\n" else:md+="╠➩❌ Join Kick : Off\n" if wait["AutoJoin"] == True: md+="╠➩✔️ Auto Join : On\n" else: md +="╠➩❌ Auto Join : Off\n" if wait["AutoJoinCancel"] == True: md+="╠➩✔️ Auto Join Cancel : On\n" else: md +="╠➩❌ Auto Join Cancel : Off\n" if wait["Leave"] == True: md+="╠➩✔️ Leave : On\n" else: md +="╠➩❌ Leave : Off\n" if wait["Contact"] == True: md+="╠➩✔️ Info Contact : On\n" else: md+="╠➩❌ Info Contact : Off\n" if wait["AutoCancelon"] == True:md+="╠➩✔️ Auto Cancel : On\n" else: md+= "╠➩❌ Auto Cancel : Off\n" if wait["inviteprotect"] == True:md+="╠➩✔️ Invite Protect : On\n" else: md+= "╠➩❌ Invite Protect : Off\n" if wait["Qron"] == True: md+="╠➩✔️ Qr Protect : On\n" else:md+="╠➩❌ Qr Protect : Off\n" if wait["AutoKickon"] == True: md+="╠➩✔️ Auto Kick : On\n" else:md+="╠➩❌ Auto Kick : Off\n" if wait["Ghost"] == True: md+="╠➩✔️ Ghost : On\n" else:md+="╠➩❌ Ghost : Off\n" if wait["alwaysRead"] == True: md+="╠➩✔️ Always Read : On\n" else:md+="╠➩❌ Always Read: Off\n" if wait["detectMention"] == True: md+="╠➩✔️ Auto Respon : On\n" else:md+="╠➩❌ Auto Respon : Off\n" if wait["kickMention"] == True: md+="╠➩✔️ Auto Respon Kick : On\n" else:md+="╠➩❌ Auto Respon Kick : Off\n" if wait["Sider"] == True: md+="╠➩✔️ Auto Sider : On\n" else:md+="╠➩❌ Auto Sider: Off\n" if wait["Simi"] == True: md+="╠➩✔️ Simisimi : On\n" else:md+="╠➩❌ Simisimi: Off\n" cl.sendText(msg.to,"╔═════════════════════════\n""║ ☆☞ S T A T U S ☜☆\n""╠═════════════════════════\n"+md+"╚═════════════════════════") elif msg.text in ["Gift","gift"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'} msg.text = None cl.sendMessage(msg) elif msg.text in ["All gift"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'} msg.text = None ki.sendMessage(msg) kk.sendMessage(msg) kc.sendMessage(msg) elif msg.text in ["TC1 Gift","TC1 gift"]: msg.contentType = 9 msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733', 'PRDTYPE': 'THEME', 'MSGTPL': '6'} msg.text = None ki.sendMessage(msg) elif msg.text in ["TC2 Gift","TC2 gift"]: msg.contentType = 9 msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e', 'PRDTYPE': 'THEME', 'MSGTPL': '7'} msg.text = None kk.sendMessage(msg) elif msg.text in ["TC3 Gift","TC3 gift"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4', 'PRDTYPE': 'THEME', 'MSGTPL': '8'} msg.text = None kc.sendMessage(msg) elif "Gift1 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift1 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '1', 'STKPKGID': '1380280'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift2 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift2 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '2', 'STKPKGID': '1360738'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift3 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift3 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '3', 'STKPKGID': '1395389'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift4 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift4 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '4', 'STKPKGID': '1329191'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift5 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift5 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '1', 'STKPKGID': '9057'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift6 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift6 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '2', 'STKPKGID': '9167'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift7 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift7 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '3', 'STKPKGID': '7334'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift8 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift8 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '1', 'STKPKGID': '1380280'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift9 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift9 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '4', 'STKPKGID': '1405277'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif "Gift10 " in msg.text: msg.contentType = 13 nk0 = msg.text.replace("Gift10 ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: cl.sendText(msg.to,_name + " Check Your Gift") msg.contentType = 9 msg.contentMetadata= {'PRDTYPE': 'STICKER', 'STKVER': '1', 'MSGTPL': '1', 'STKPKGID': '1296261'} msg.to = target msg.text = None cl.sendMessage(msg) except: msg.contentMetadata = {'mid': target} elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]: msg.contentType = 7 msg.contentMetadata={'STKID': '100', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["hehehe","hehe"]: msg.contentType = 7 msg.contentMetadata={'STKID': '10', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["galau"]: msg.contentType = 7 msg.contentMetadata={'STKID': '9', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["you","kau","kamu"]: msg.contentType = 7 msg.contentMetadata={'STKID': '7', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["marah","hadeuh","hadeh"]: msg.contentType = 7 msg.contentMetadata={'STKID': '6', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["please","pliss","mohon","tolong"]: msg.contentType = 7 msg.contentMetadata={'STKID': '4', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["haa","haaa","kaget"]: msg.contentType = 7 msg.contentMetadata={'STKID': '3', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["lucu","ngakak","lol"]: msg.contentType = 7 msg.contentMetadata={'STKID': '110', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["hmm","hmmm"]: msg.contentType = 7 msg.contentMetadata={'STKID': '101', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["tidur"]: msg.contentType = 7 msg.contentMetadata={'STKID': '1', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["gemes"]: msg.contentType = 7 msg.contentMetadata={'STKID': '2', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["cantik","imut"]: msg.contentType = 7 msg.contentMetadata={'STKID': '5', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["nyanyi","lalala"]: msg.contentType = 7 msg.contentMetadata={'STKID': '11', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["gugup"]: msg.contentType = 7 msg.contentMetadata={'STKID': '8', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]: msg.contentType = 7 msg.contentMetadata={'STKID': '13', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["mantab","mantap","nice","keren"]: msg.contentType = 7 msg.contentMetadata={'STKID': '14', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["ngejek"]: msg.contentType = 7 msg.contentMetadata={'STKID': '15', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["nangis","sedih"]: msg.contentType = 7 msg.contentMetadata={'STKID': '16', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["woi","kampret"]: msg.contentType = 7 msg.contentMetadata={'STKID': '102', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text.lower() in ["huft"]: msg.contentType = 7 msg.contentMetadata={'STKID': '104', 'STKPKGID': '1', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif msg.text in ["Tagall","Tag all"]: group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] cb = "" cb2 = "" strt = int(0) akh = int(0) for md in nama: akh = akh + int(6) cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},""" strt = strt + int(7) akh = akh + 1 cb2 += "@nrik \n" cb = (cb[:int(len(cb)-1)]) msg.contentType = 0 msg.text = cb2 msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'} try: cl.sendMessage(msg) except Exception as error: print error elif msg.text in ["Setview","Setpoint","Cctv"]: subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE) cl.sendText(msg.to, "☆Checkpoint Checked☆") print "Setview" elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]: lurkGroup = "" dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], [] with open('dataSeen/'+msg.to+'.txt','r') as rr: contactArr = rr.readlines() for v in xrange(len(contactArr) -1,0,-1): num = re.sub(r'\n', "", contactArr[v]) contacts.append(num) pass contacts = list(set(contacts)) for z in range(len(contacts)): arg = contacts[z].split('|') userList.append(arg[0]) timelist.append(arg[1]) uL = list(set(userList)) for ll in range(len(uL)): try: getIndexUser = userList.index(uL[ll]) timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000))) recheckData.append(userList[getIndexUser]) except IndexError: conName.append('nones') pass contactId = cl.getContacts(recheckData) for v in range(len(recheckData)): dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')') pass if len(dataResult) > 0: tukang = "╔═════════════════════════\n║ ☆☞ LIST VIEWERS ☜☆\n╠═════════════════════════\n╠➩" grp = '\n╠➩ '.join(str(f) for f in dataResult) total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════" cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total)) subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE) cl.sendText(msg.to, "☆Auto Checkpoint☆") else: cl.sendText(msg.to, "☆Belum Ada Viewers☆") print "Viewseen" elif "Kick " in msg.text: if msg.from_ in admin: if 'MENTION' in msg.contentMetadata.keys()!= None: names = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] print mentionees for mention in mentionees: ki.kickoutFromGroup(msg.to,[mention['M']]) elif "Set member: " in msg.text: if msg.from_ in admin: jml = msg.text.replace("Set member: ","") wait["memberscancel"] = int(jml) cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml) elif "Add all" in msg.text: thisgroup = cl.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] cl.findAndAddContactsByMids(mi_d) cl.sendText(msg.to,"Success Add all") elif msg.text in ["Invite"]: wait["invite"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Auto like"]: wait["likeOn"] = True cl.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!") elif msg.text in ["Steal contact"]: wait["steal"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Giftbycontact"]: wait["gift"] = True cl.sendText(msg.to,"Send Contact") elif "Recover" in msg.text: thisgroup = cl.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] cl.createGroup("Recover", mi_d) cl.sendText(msg.to,"Success recover") elif ("Gn: " in msg.text): if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gn: ","") cl.updateGroup(X) else: cl.sendText(msg.to,"It can't be used besides the group.") elif "Kick: " in msg.text: midd = msg.text.replace("Kick: ","") kicker = [ki,kk,kc] if midd not in admin: random.choice(kicker).kickoutFromGroup(msg.to,[midd]) else: cl.sendText(msg.to,"Admin Detected") elif "Invite: " in msg.text: midd = msg.text.replace("Invite: ","") cl.findAndAddContactsByMid(midd) ki.findAndAddContactsByMid(midd) kk.findAndAddContactsByMid(midd) kc.findAndAddContactsByMid(midd) kr.findAndAddContactsByMid(midd) random.choice(KAC).inviteIntoGroup(msg.to,[midd]) elif "Invite creator" in msg.text: midd = "u71b6799e1c37868a871d442e67633182" random.choice(KAC).inviteIntoGroup(msg.to,[midd]) elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]: gs = cl.getGroup(msg.to) cl.sendText(msg.to,"Selamat Datang Di "+ gs.name) msg.contentType = 7 msg.contentMetadata={'STKID': '247', 'STKPKGID': '3', 'STKVER': '100'} msg.text = None cl.sendMessage(msg) elif "Bc: " in msg.text: bc = msg.text.replace("Bc: ","") gid = cl.getGroupIdsJoined() if msg.from_ in Creator: for i in gid: cl.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~a_ulul15") cl.sendText(msg.to,"Success BC BosQ") else: cl.sendText(msg.to,"Khusus Admin") elif msg.text in ["Cancel"]: gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) cl.sendText(msg.to,"All invitations have been refused") elif msg.text in ["TC1 Cancel"]: gid = ki.getGroupIdsInvited() for i in gid: ki.rejectGroupInvitation(i) ki.sendText(msg.to,"All invitations have been refused") elif msg.text in ["TC2 Cancel"]: gid = kk.getGroupIdsInvited() for i in gid: kk.rejectGroupInvitation(i) kk.sendText(msg.to,"All invitations have been refused") elif msg.text in ["TC3 Cancel"]: gid = kc.getGroupIdsInvited() for i in gid: kc.rejectGroupInvitation(i) kc.sendText(msg.to,"All invitations have been refused") elif msg.text in ["Gurl"]: if msg.toType == 2: x = cl.getGroup(msg.to) if x.preventJoinByTicket == True: x.preventJoinByTicket = False cl.updateGroup(x) gurl = cl.reissueGroupTicket(msg.to) cl.sendText(msg.to,"line://ti/g/" + gurl) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Can't be used outside the group") else: cl.sendText(msg.to,"Not for use less than group") elif msg.text in ["All join","Join all"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) ki.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) kk.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) kc.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) kr.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) G = cl.getGroup(msg.to) G.preventJoinByTicket = True ki.updateGroup(G) G.preventJoinByTicket(G) ki.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["TC1 join"]: if msg.from_ in admin: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) invsend = 0 Ti = cl.reissueGroupTicket(msg.to) ki.acceptGroupInvitationByTicket(msg.to,Ti) G = kk.getGroup(msg.to) G.preventJoinByTicket = True ki.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["TC2 join"]: if msg.from_ in admin: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) invsend = 0 Ti = cl.reissueGroupTicket(msg.to) kk.acceptGroupInvitationByTicket(msg.to,Ti) G = ki.getGroup(msg.to) G.preventJoinByTicket = True kk.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["TC3 join"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) kc.acceptGroupInvitationByTicket(msg.to,Ticket) G.preventJoinByTicket = True kc.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["TC4 join"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) kr.acceptGroupInvitationByTicket(msg.to,Ticket) G.preventJoinByTicket = True kr.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["Ghost join"]: if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) km.acceptGroupInvitationByTicket(msg.to,Ticket) G.preventJoinByTicket = True km.updateGroup(G) else: cl.sendText(msg.to,"Sape lu!") elif msg.text in ["timeline"]: try: url = cl.activity(limit=5) cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId']) except Exception as E: print E elif msg.text in ["Bye all"]: if wait["Leave"] == True: ki.leaveGroup(msg.to) kk.leaveGroup(msg.to) kc.leaveGroup(msg.to) kr.leaveGroup(msg.to) else: cl.sendText(msg.to,"Leavenya Belum On") elif msg.text in ["@bye","@Bye"]: if wait["Leave"] == True: cl.leaveGroup(msg.to) wait["Leave"] = False else: cl.sendText(msg.to,"Bilang Dulu Sama Admin Ku") elif msg.text in ["Absen"]: cl.sendText(msg.to,"Pasukan Absen!!") ki.sendText(msg.to,"TC1 Hadiir \(ˆ▿ˆ)/") kk.sendText(msg.to,"TC2 Hadiir \(ˆ▿ˆ)/") kc.sendText(msg.to,"TC3 Hadiir \(ˆ▿ˆ)/") kr.sendText(msg.to,"Hadiir Semua Kapten \(ˆ▿ˆ)/") elif msg.text.lower() in ["respon"]: cl.sendText(msg.to,responsename) ki.sendText(msg.to,responsename2) kk.sendText(msg.to,responsename3) kc.sendText(msg.to,responsename4) kr.sendText(msg.to,responsename5) elif msg.text in ["Sp","Speed","speed"]: start = time.time() print("Speed") elapsed_time = time.time() - start cl.sendText(msg.to, "Tunggu Bentaar BOS....") cl.sendText(msg.to, "%sseconds" % (elapsed_time)) elif msg.text in ["Speed test"]: start = time.time() cl.sendText(msg.to, "Tunggu Bentaar BOS......") elapsed_time = time.time() - start cl.sendText(msg.to, "%sseconds" % (elapsed_time)) elif "Nk: " in msg.text: if msg.from_ in Creator: X = cl.getGroup(msg.to) X.preventJoinByTicket = False cl.updateGroup(X) invsend = 0 Ti = cl.reissueGroupTicket(msg.to) kr.acceptGroupInvitationByTicket(msg.to,Ti) G = kk.getGroup(msg.to) G.preventJoinByTicket = True kk.updateGroup(G) nk0 = msg.text.replace("Nk: ","") nk1 = nk0.lstrip() nk2 = nk1.replace("@","") nk3 = nk2.rstrip() _name = nk3 targets = [] for s in X.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: if target not in admin: kr.kickoutFromGroup(msg.to,[target]) kr.leaveGroup(msg.to) ki.sendText(msg.to,"Succes BosQ") kk.sendText(msg.to,"Pakyu~") else: cl.sendText(msg.to,"Admin Detected") else: cl.sendText(msg.to,"Lu sape!") elif msg.text in ["Ban"]: if msg.from_ in admin: wait["wblacklist"] = True ki.sendText(msg.to,"send contact") elif msg.text in ["Unban"]: if msg.from_ in admin: wait["dblacklist"] = True ki.sendText(msg.to,"send contact") elif "Ban @" in msg.text: if msg.from_ in admin: if msg.toType == 2: print "@Ban by mention" _name = msg.text.replace("Ban @","") _nametarget = _name.rstrip(' ') gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kc.sendText(msg.to,"Not found") else: for target in targets: if target not in admin: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) ki.sendText(msg.to,"Succes BosQ") except: ki.sendText(msg.to,"Error") else: cl.sendText(msg.to,"Admin Detected~") elif msg.text in ["Banlist","Ban list"]: if msg.from_ in admin: if wait["blacklist"] == {}: random.choice(KAC).sendText(msg.to,"Tidak Ada") else: mc = "" for mi_d in wait["blacklist"]: mc += "->" +cl.getContact(mi_d).displayName + "\n" random.choice(KAC).sendText(msg.to,"===[Blacklist User]===\n"+mc) elif "Unban @" in msg.text: if msg.toType == 2: print "@Unban by mention" if msg.from_ in admin: _name = msg.text.replace("Unban @","") _nametarget = _name.rstrip(' ') gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kk.sendText(msg.to,"Not found") else: for target in targets: try: del wait["blacklist"][target] f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) ki.sendText(msg.to,"Succes BosQ") except: ki.sendText(msg.to,"Succes BosQ") elif msg.text.lower() == 'clear ban': if msg.from_ in admin: wait["blacklist"] = {} cl.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐") elif msg.text.lower() in ["sayang","chucky"]: ki.sendText(msg.to,"Apa Sayang :*") elif msg.text in ["Kill ban"]: if msg.from_ in admin: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: ki.sendText(msg.to,"There was no blacklist user") return for jj in matched_list: random.choice(KAC).kickoutFromGroup(msg.to,[jj]) ki.sendText(msg.to,"Blacklist emang pantas tuk di usir") else: cl.sendText(msg.to, "Khusus creator") elif msg.text in ["Kill"]: if msg.toType == 2: if msg.from_ in admin: group = ki.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: kk.sendText(msg.to,"Fuck You") kc.sendText(msg.to,"Fuck You") return for jj in matched_list: try: klist=[ki,kk,kc] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[jj]) print (msg.to,[jj]) except: pass elif "Kickall" == msg.text: if msg.from_ in Creator: if msg.toType == 2: print "Kick all member" _name = msg.text.replace("Kickall","") gs = ki.getGroup(msg.to) gs = kk.getGroup(msg.to) gs = kc.getGroup(msg.to) ki.sendText(msg.to,"Sampai jumpaa~") kc.sendText(msg.to,"Dadaaah~") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to,"Not found.") else: for target in targets: if target not in admin: try: klist=[ki,kk,kc] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except Exception as e: cl.sendText(msg.to,str(e)) cl.inviteIntoGroup(msg.to, targets) elif msg.text in ["Bot restart","Reboot"]: if msg.from_ in Creator: cl.sendText(msg.to, "Bot Has Been Restarted...") restart_program() print "@Restart" else: cl.sendText(msg.to, "No Access") elif msg.text in ["Turn off"]: if msg.from_ in Creator: try: import sys sys.exit() except: pass elif 'Crash' in msg.text: if msg.from_ in Creator: msg.contentType = 13 msg.contentMetadata = {'mid': "NADYA,'"} cl.sendMessage(msg) elif "Kapten copy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("Kapten copy @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to, "Not Found...") else: for target in targets: try: cl.CloneContactProfile(target) cl.sendText(msg.to, "Copied (^_^)") except Exception as e: print e elif "TC1 copy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("TC1 copy @","") _nametarget = _name.rstrip(' ') gs = ki.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: ki.sendText(msg.to, "Not Found...") else: for target in targets: try: ki.CloneContactProfile(target) ki.sendText(msg.to, "Copied (^_^)") except Exception as e: print e elif "TC2 copy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("TC2 copy @","") _nametarget = _name.rstrip(' ') gs = kk.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kk.sendText(msg.to, "Not Found...") else: for target in targets: try: kk.CloneContactProfile(target) kk.sendText(msg.to, "Copied (^_^)") except Exception as e: print e elif "TC3 copy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("TC3 copy @","") _nametarget = _name.rstrip(' ') gs = kc.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kc.sendText(msg.to, "Not Found...") else: for target in targets: try: kc.CloneContactProfile(target) kc.sendText(msg.to, "Copied (^_^)") except Exception as e: print e elif "TC4 copy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("TC4 copy @","") _nametarget = _name.rstrip(' ') gs = kr.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: kr.sendText(msg.to, "Not Found...") else: for target in targets: try: kr.CloneContactProfile(target) kr.sendText(msg.to, "Copied (^_^)") except Exception as e: print e elif msg.text in ["Backup all"]: try: ki.updateDisplayPicture(backup2.pictureStatus) ki.updateProfile(backup2) kk.updateDisplayPicture(backup3.pictureStatus) kk.updateProfile(backup3) kc.updateDisplayPicture(backup4.pictureStatus) kc.updateProfile(backup4) kr.updateDisplayPicture(backup5.pictureStatus) kr.updateProfile(backup5) cl.updateDisplayPicture(backup1.pictureStatus) cl.updateProfile(backup1) cl.sendText(msg.to, "All Done (^_^)") except Exception as e: cl.sendText(msg.to, str(e)) elif "/musik " in msg.text: songname = msg.text.replace("/musik ","") params = {"songname": songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: abc = song[3].replace('https://','http://') cl.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4]) cl.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ") cl.sendAudioWithURL(msg.to,abc) cl.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0]) elif '/lirik ' in msg.text.lower(): try: songname = msg.text.lower().replace('/lirik ','') params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, hasil) except Exception as wak: cl.sendText(msg.to, str(wak)) elif "/musrik " in msg.text: songname = msg.text.replace("/musrik ","") params = {"songname": songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: abc = song[3].replace('https://','http://') hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ") cl.sendAudioWithURL(msg.to,abc) cl.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil) cl.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0]) elif "Fancytext: " in msg.text: txt = msg.text.replace("Fancytext: ", "") cl.kedapkedip(msg.to,txt) print "[Command] Kedapkedip" elif "cover @" in msg.text: if msg.toType == 2: cover = msg.text.replace("cover @","") _nametarget = cover.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found") else: for target in targets: try: h = cl.channel.getHome(target) objId = h["result"]["homeInfo"]["objectId"] cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId) except Exception as error: print error cl.sendText(msg.to,"Upload image failed.") elif "Cover @" in msg.text: if msg.toType == 2: cover = msg.text.replace("Cover @","") _nametarget = cover.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found") else: for target in targets: try: h = cl.channel.getHome(target) objId = h["result"]["homeInfo"]["objectId"] cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId) except Exception as error: print error cl.sendText(msg.to,"Upload image failed.") elif "pp @" in msg.text: if msg.toType == 2: cover = msg.text.replace("pp @","") _nametarget = cover.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found") else: for target in targets: try: h = cl.getContact(target) cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) except Exception as error: print error cl.sendText(msg.to,"Upload image failed.") elif "Pp @" in msg.text: if msg.toType == 2: cover = msg.text.replace("Pp @","") _nametarget = cover.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found") else: for target in targets: try: h = cl.getContact(target) cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) except Exception as error: print error cl.sendText(msg.to,"Upload image failed.") elif msg.text.lower() in ["van","yog","wan","gong","tep","pap creator"]: link = ["http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj"] pilih = random.choice(link) ki.sendImageWithURL(msg.to,pilih) elif msg.text.lower() in ["van","yog","wan","gong","tep","pap owner","pap creator"]: link = ["http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj"] pilih = random.choice(link) ki.sendImageWithURL(msg.to,pilih) elif "Spam: " in msg.text: bctxt = msg.text.replace("Spam: ", "") t = 10 while(t): random.choice(KAC).sendText(msg.to, (bctxt)) t-=1 elif "Scbc " in msg.text: bctxt = msg.text.replace("Scbc ", "") orang = cl.getAllContactIds() t = 20 for manusia in orang: while(t): cl.sendText(manusia, (bctxt)) t-=1 elif "Cbc " in msg.text: broadcasttxt = msg.text.replace("Cbc ", "") orang = cl.getAllContactIds() for manusia in orang: cl.sendText(manusia, (broadcasttxt)) elif '/ig ' in msg.text.lower(): try: instagram = msg.text.lower().replace("/ig ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html.parser') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() nadya = text1[0].replace("s150x150/","") user = "Name: " + text[-2] + "\n" user1 = "Username: " + text[-1] + "\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "========INSTAGRAM INFO ========\n" details = "\n========INSTAGRAM INFO ========" cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) cl.sendImageWithURL(msg.to, nadya) except Exception as njer: cl.sendText(msg.to, str(njer)) elif "Checkig " in msg.text: separate = msg.text.split(" ") user = msg.text.replace(separate[0] + " ","") if user.startswith("@"): user = user.replace("@","") profile = "https://www.instagram.com/" + user with requests.session() as x: x.headers['user-agent'] = 'Mozilla/5.0' end_cursor = '' for count in range(1, 999): print('PAGE: ', count) r = x.get(profile, params={'max_id': end_cursor}) data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1) j = json.loads(data) for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']: if node['is_video']: page = 'https://www.instagram.com/p/' + node['code'] r = x.get(page) url = re.search(r'"video_url": "([^"]+)"', r.text).group(1) print(url) cl.sendVideoWithURL(msg.to,url) else: print (node['display_src']) cl.sendImageWithURL(msg.to,node['display_src']) end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1) elif 'Youtubelink: ' in msg.text: try: textToSearch = (msg.text).replace('Youtube ', "").strip() query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") results = soup.find(attrs={'class':'yt-uix-tile-link'}) cl.sendText(msg.to,'https://www.youtube.com' + results['href']) except: cl.sendText(msg.to,"Could not find it") elif 'Youtubevideo: ' in msg.text: try: textToSearch = (msg.text).replace('Youtubevideo: ', "").strip() query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") results = soup.find(attrs={'class': 'yt-uix-tile-link'}) cl.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href']) except: cl.sendText(msg.to, "Could not find it") elif "Say-id " in msg.text: say = msg.text.replace("Say-id ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-en " in msg.text: say = msg.text.replace("Say-en ","") lang = 'en' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-jp " in msg.text: say = msg.text.replace("Say-jp ","") lang = 'ja' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say welcome" in msg.text: gs = cl.getGroup(msg.to) say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name) lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif msg.text.lower() in ["hi","hai","halo","hallo"]: beb = "Hi Sayang 😘 " +cl.getContact(msg.from_).displayName + " 􀸂􀆇starry heart􏿿" kr.sendText(msg.to,beb) elif "playstore " in msg.text.lower(): tob = msg.text.lower().replace("playstore ","") cl.sendText(msg.to,"Sedang Mencari...") cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob) cl.sendText(msg.to,"Tuh Linknya Kak (^_^)") elif "Mid @" in msg.text: _name = msg.text.replace("Mid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: random.choice(KAC).sendText(msg.to, g.mid) else: pass elif "/bio " in msg.text: string = msg.text.replace("/bio ","") if len(string.decode('utf-8')) <= 500: profile = cl.getProfile() profile.statusMessage = string cl.updateProfile(profile) ki.updateProfile(profile) kk.updateProfile(profile) kc.updateProfile(profile) kr.updateProfile(profile) cl.sendText(msg.to,"All Done") elif "/cnkapten" in msg.text: if msg.from_ in Creator: string = msg.text.replace("/cnkapten","Mi Kapten") if len(string.decode('utf-8')) <= 5000: profile = cl.getProfile() profile.displayName = string cl.updateProfile(profile) cl.sendText(msg.to,"Done") elif "/cntc1" in msg.text: if msg.from_ in Creator: string = msg.text.replace("/cntc1","Mi TC1") if len(string.decode('utf-8')) <= 5000: profile = ki.getProfile() profile.displayName = string ki.updateProfile(profile) ki.sendText(msg.to,"Done") elif "/cntc2" in msg.text: if msg.from_ in Creator: string = msg.text.replace("/cntc2","Mi TC2") if len(string.decode('utf-8')) <= 5000: profile = kk.getProfile() profile.displayName = string kk.updateProfile(profile) kk.sendText(msg.to,"Done") elif "/cntc3" in msg.text: if msg.from_ in Creator: string = msg.text.replace("/cntc3","Mi TC3") if len(string.decode('utf-8')) <= 5000: profile = kc.getProfile() profile.displayName = string kc.updateProfile(profile) kc.sendText(msg.to,"Done") elif "/cntc4" in msg.text: if msg.from_ in Creator: string = msg.text.replace("/cntc4","Mi TC4") if len(string.decode('utf-8')) <= 5000: profile = cl.getProfile() profile.displayName = string kr.updateProfile(profile) kr.sendText(msg.to,"Done") elif "Ulti " in msg.text: if msg.from_ in Creator: ulti0 = msg.text.replace("Ulti ","") ulti1 = ulti0.rstrip() ulti2 = ulti1.replace("@","") ulti3 = ulti2.rstrip() _name = ulti3 gs = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) gs.preventJoinByTicket = False cl.updateGroup(gs) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) km.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.2) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets ==[]: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: km.kickoutFromGroup(msg.to,[target]) km.leaveGroup(msg.to) print (msg.to,[g.mid]) except: km.sendText(msg.t,"Ter ELIMINASI....") km.sendText(msg.to,"WOLES brooo....!!!") km.leaveGroup(msg.to) gs = cl.getGroup(msg.to) gs.preventJoinByTicket = True cl.updateGroup(gs) gs.preventJoinByTicket(gs) cl.updateGroup(gs) elif msg.text.lower() in ["mymid","myid"]: middd = "Name : " +cl.getContact(msg.from_).displayName + "\nMid : " +msg.from_ kr.sendText(msg.to,middd) elif msg.text.lower() in ["me"]: msg.contentType = 13 msg.contentMetadata = {'mid': msg.from_} cl.sendMessage(msg) elif "/apakah " in msg.text: apk = msg.text.replace("/apakah ","") rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"] p = random.choice(rnd) lang = 'id' tts = gTTS(text=p, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "/hari " in msg.text: apk = msg.text.replace("/hari ","") rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"] p = random.choice(rnd) lang = 'id' tts = gTTS(text=p, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "/berapa " in msg.text: apk = msg.text.replace("/berapa ","") rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%'] p = random.choice(rnd) lang = 'id' tts = gTTS(text=p, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "/berapakah " in msg.text: apk = msg.text.replace("/berapakah ","") rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada'] p = random.choice(rnd) lang = 'id' tts = gTTS(text=p, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "/kapan " in msg.text: apk = msg.text.replace("/kapan ","") rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"] p = random.choice(rnd) lang = 'id' tts = gTTS(text=p, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif msg.text in ["Simisimi on","Simisimi:on"]: settings["simiSimi"][msg.to] = True wait["Simi"] = True cl.sendText(msg.to," Simisimi Di Aktifkan") elif msg.text in ["Simisimi off","Simisimi:off"]: settings["simiSimi"][msg.to] = False wait["Simi"] = False cl.sendText(msg.to,"Simisimi Di Nonaktifkan") elif "Image " in msg.text: search = msg.text.replace("Image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass elif "Youtubesearch: " in msg.text: query = msg.text.replace("Youtube ","") with requests.session() as s: s.headers['user-agent'] = 'Mozilla/5.0' url = 'http://www.youtube.com/results' params = {'search_query': query} r = s.get(url, params=params) soup = BeautifulSoup(r.content, 'html.parser') hasil = "" for a in soup.select('.yt-lockup-title > a[title]'): if '&list=' not in a['href']: hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n')) cl.sendText(msg.to,hasil) print '[Command] Youtube Search' elif "Tr-id " in msg.text: isi = msg.text.replace("Tr-id ","") translator = Translator() hasil = translator.translate(isi, dest='id') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Tr-en " in msg.text: isi = msg.text.replace("Tr-en ","") translator = Translator() hasil = translator.translate(isi, dest='en') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Tr-th " in msg.text: isi = msg.text.replace("Tr-th ","") translator = Translator() hasil = translator.translate(isi, dest='th') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Id@en" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'en' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result) elif "En@id" in msg.text: bahasa_awal = 'en' bahasa_tujuan = 'id' kata = msg.text.replace("En@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result) elif "Id@th" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'th' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result) elif "Th@id" in msg.text: bahasa_awal = 'th' bahasa_tujuan = 'id' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result) elif msg.text in ["Friendlist"]: contactlist = cl.getAllContactIds() kontak = cl.getContacts(contactlist) num=1 msgs="═════════List Friend═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Memlist"]: kontak = cl.getGroup(msg.to) group = kontak.members num=1 msgs="═════════List Member═�����═══════-" for ids in group: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group) cl.sendText(msg.to, msgs) elif msg.text in ["Spam"]: if msg.from_ in admin: cl.sendText(msg.to,"Aku belum mandi") ki.sendText(msg.to,"Tak tun tuang") kk.sendText(msg.to,"Tak tun tuang") cl.sendText(msg.to,"Tapi masih cantik juga") ki.sendText(msg.to,"Tak tun tuang") kk.sendText(msg.to,"Tak tun tuang") cl.sendText(msg.to,"apalagi kalau sudah mandi") ki.sendText(msg.to,"Tak tun tuang") kk.sendText(msg.to,"Pasti cantik sekali") cl.sendText(msg.to,"yiha") ki.sendText(msg.to,"Kalau orang lain melihatku") kk.sendText(msg.to,"Tak tun tuang") cl.sendText(msg.to,"Badak aku taba bana") ki.sendText(msg.to,"Tak tun tuang") kk.sendText(msg.to,"Tak tuntuang") cl.sendText(msg.to,"Tapi kalau langsuang diidu") ki.sendText(msg.to,"Tak tun tuang") kk.sendText(msg.to,"Atagfirullah baunya") cl.sendText(msg.to,"Males lanjutin ah") ki.sendText(msg.to,"Sepi bat") kk.sendText(msg.to,"Iya sepi udah udah") cl.sendText(msg.to,"Gaada yang denger juga kita nyanyi") ki.sendText(msg.to,"Nah") kk.sendText(msg.to,"Mending gua makan dulu") cl.sendText(msg.to,"Siyap") ki.sendText(msg.to,"Okeh") kk.sendText(msg.to,"Katanya owner kita Jomblo ya") cl.sendText(msg.to,"Iya emang") ki.sendText(msg.to,"Denger denger si lagi nyari pacar doi") kk.sendText(msg.to,"Udah ah gosip mulu doain aja biar dapet") elif "Getvid @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getvid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendVideoWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getgroup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendImageWithURL(msg.to,path) elif "Urlgroup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendText(msg.to,path) elif "Getname" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) except: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) elif "Getprofile" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) except: pass elif "Getcontact" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] mmid = cl.getContact(key1) msg.contentType = 13 msg.contentMetadata = {"mid": key1} cl.sendMessage(msg) elif "Getinfo" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu)) except: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu)) elif "Getbio" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) except: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime) cl.sendText(msg.to,van) elif "Checkdate " in msg.text: tanggal = msg.text.replace("Checkdate ","") r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) lahir = data["data"]["lahir"] usia = data["data"]["usia"] ultah = data["data"]["ultah"] zodiak = data["data"]["zodiak"] cl.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========") elif msg.text in ["Kalender","Time","Waktu"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): bln = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" cl.sendText(msg.to, rst) elif "SearchID: " in msg.text: userid = msg.text.replace("SearchID: ","") contact = cl.findContactsByUserid(userid) msg.contentType = 13 msg.contentMetadata = {'mid': contact.mid} cl.sendMessage(msg) elif "Searchid: " in msg.text: userid = msg.text.replace("Searchid: ","") contact = cl.findContactsByUserid(userid) msg.contentType = 13 msg.contentMetadata = {'mid': contact.mid} cl.sendMessage(msg) elif "removechat" in msg.text.lower(): if msg.from_ in admin: try: cl.removeAllMessages(op.param2) ki.removeAllMessages(op.param2) kk.removeAllMessages(op.param2) kc.removeAllMessages(op.param2) kr.removeAllMessages(op.param2) print "[Command] Remove Chat" cl.sendText(msg.to,"Done") except Exception as error: print error cl.sendText(msg.to,"Error") elif "Invitemeto: " in msg.text: if msg.from_ in admin: gid = msg.text.replace("Invitemeto: ","") if gid == "": cl.sendText(msg.to,"Invalid group id") else: try: cl.findAndAddContactsByMid(msg.from_) ki.findAndAddContactsByMid(msg.from_) kk.findAndAddContactsByMid(msg.from_) kc.findAndAddContactsByMid(msg.from_) kr.findAndAddContactsByMid(msg.from_) random.choice(KAC).inviteIntoGroup(gid,[msg.from_]) except: cl.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu") elif msg.text in ["Glist"]: cl.sendText(msg.to, "Tunggu Sebentar. . .") gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "╠➩" + "%s\n" % (cl.getGroup(i).name +" ~> ["+str(len(cl.getGroup(i).members))+"]") cl.sendText(msg.to,"╔═════════════════════════\n║ ☆☞ LIST GROUPS☜☆\n╠═════════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════") elif msg.text in ["Glistmid"]: gruplist = kr.getGroupIdsJoined() kontak = kr.getGroups(gruplist) num=1 msgs="═════════List GrupMid═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.id) num=(num+1) msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak) kr.sendText(msg.to, msgs) elif "Google: " in msg.text: a = msg.text.replace("Google: ","") b = urllib.quote(a) cl.sendText(msg.to,"Sedang Mencari...") cl.sendText(msg.to, "https://www.google.com/" + b) cl.sendText(msg.to,"Itu Dia Linknya. . .") elif "Details group: " in msg.text: if msg.from_ in admin: gid = msg.text.replace("Details group: ","") if gid in [""," "]: cl.sendText(msg.to,"Grup id tidak valid") else: try: groups = cl.getGroup(gid) if groups.members is not None: members = str(len(groups.members)) else: members = "0" if groups.invitee is not None: pendings = str(len(groups.invitee)) else: pendings = "0" h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus cl.sendText(msg.to,h) except Exception as error: cl.sendText(msg.to,(error)) elif "Cancel invite: " in msg.text: if msg.from_ in admin: gids = msg.text.replace("Cancel invite: ","") gid = cl.getGroup(gids) for i in gid: if i is not None: try: cl.rejectGroupInvitation(i) except: cl.sendText(msg.to,"Error!") break else: break if gid is not None: cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name) else: cl.sendText(msg.to,"Grup tidak ditemukan") elif msg.text in ["Kapten acc invite"]: if msg.from_ in admin: gid = cl.getGroupIdsInvited() _list = "" for i in gid: if i is not None: gids = cl.getGroup(i) _list += gids.name cl.acceptGroupInvitation(i) else: break if gid is not None: cl.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list) else: cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini") elif msg.text in ["TC1 acc invite"]: if msg.from_ in admin: gid = ki.getGroupIdsInvited() _list = "" for i in gid: if i is not None: gids = ki.getGroup(i) _list += gids.name ki.acceptGroupInvitation(i) else: break if gid is not None: ki.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list) else: ki.sendText(msg.to,"Tidak ada grup yang tertunda saat ini") elif msg.text in ["TC2 acc invite"]: if msg.from_ in admin: gid = kk.getGroupIdsInvited() _list = "" for i in gid: if i is not None: gids = kk.getGroup(i) _list += gids.name kk.acceptGroupInvitation(i) else: break if gid is not None: kk.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list) else: kk.sendText(msg.to,"Tidak ada grup yang tertunda saat ini") elif msg.text in ["TC3 acc invite"]: if msg.from_ in admin: gid = kc.getGroupIdsInvited() _list = "" for i in gid: if i is not None: gids = kc.getGroup(i) _list += gids.name kc.acceptGroupInvitation(i) else: break if gid is not None: kc.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list) else: kc.sendText(msg.to,"Tidak ada grup yang tertunda saat ini") elif msg.text in ["TC4 acc invite"]: if msg.from_ in admin: gid = kr.getGroupIdsInvited() _list = "" for i in gid: if i is not None: gids = kr.getGroup(i) _list += gids.name kr.acceptGroupInvitation(i) else: break if gid is not None: kr.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list) else: kr.sendText(msg.to,"Tidak ada grup yang tertunda saat ini") elif "Gif gore" in msg.text: gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif") gore = random.choice(gif) cl.sendGifWithURL(msg.to,gore) if op.type == 59: print op except Exception as error: print error while True: try: Ops = cl.fetchOps(cl.Poll.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(cl.Poll.rev)) for Op in Ops: if (Op.type != OpType.END_OF_OPERATION): cl.Poll.rev = max(cl.Poll.rev, Op.revision) bot(Op)
ma.py
177,124
-*- coding: utf-8 -*-Chucky_Botcl.login(qr=True)ki.login(qr=True)kk.login(qr=True)kc = LINETCR.LINE()kc.login(qr=True)kc.login(token='TOKEN_KAMU_DISINI_BEIB')kc.loginResult()print "Kc-Login Success\n"kr = LINETCR.LINE()kr.login(qr=True)kr.login(token='TOKEN_KAMU_DISINI_BEIB')kr.loginResult()print "Kr-Login Success\n"km = LINETCR.LINE()km.login(qr=True)km.login(token='TOKEN_KAMU_DISINI_BEIB')km.loginResult() /XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...else:
453
en
0.068466
# -*- coding: utf-8 -*- import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import time from PIL import Image import random import os from sample import sample_conf from tensorflow.python.framework.errors_impl import NotFoundError # 设置以下环境变量可开启CPU识别 # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "-1" class TrainError(Exception): pass class TrainModel(object): def __init__(self, img_path, char_set, model_save_dir, verify=False): # 模型路径 self.model_save_dir = model_save_dir # 打乱文件顺序+校验图片格式 self.img_path = img_path self.img_list = os.listdir(img_path) # 校验格式 if verify: self.confirm_image_suffix() # 打乱文件顺序 random.seed(time.time()) random.shuffle(self.img_list) # 获得图片宽高和字符长度基本信息 label, captcha_array = self.gen_captcha_text_image(self.img_list[0]) captcha_shape = captcha_array.shape captcha_shape_len = len(captcha_shape) if captcha_shape_len == 3: image_height, image_width, channel = captcha_shape self.channel = channel elif captcha_shape_len == 2: image_height, image_width = captcha_shape else: raise TrainError("图片转换为矩阵时出错,请检查图片格式") # 初始化变量 # 图片尺寸 self.image_height = image_height self.image_width = image_width # 验证码长度(位数) self.max_captcha = len(label) # 验证码字符类别 self.char_set = char_set self.char_set_len = len(char_set) # 相关信息打印 print("-->图片尺寸: {} X {}".format(image_height, image_width)) print("-->验证码长度: {}".format(self.max_captcha)) print("-->验证码共{}类 {}".format(self.char_set_len, char_set)) print("-->使用测试集为 {}".format(img_path)) # tf初始化占位符 self.X = tf.placeholder(tf.float32, [None, image_height * image_width]) # 特征向量 self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签 self.keep_prob = tf.placeholder(tf.float32) # dropout值 self.w_alpha = 0.01 self.b_alpha = 0.1 # test model input and output print(">>> Start model test") batch_x, batch_y = self.get_batch(0, size=100) print(">>> input batch images shape: {}".format(batch_x.shape)) print(">>> input batch labels shape: {}".format(batch_y.shape)) def gen_captcha_text_image(self, img_name): """ 返回一个验证码的array形式和对应的字符串标签 :return:tuple (str, numpy.array) """ # 标签 label = img_name.split("_")[0] # 文件 img_file = os.path.join(self.img_path, img_name) captcha_image = Image.open(img_file) captcha_array = np.array(captcha_image) # 向量化 return label, captcha_array @staticmethod def convert2gray(img): """ 图片转为灰度图,如果是3通道图则计算,单通道图则直接返回 :param img: :return: """ if len(img.shape) > 2: r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2] gray = 0.2989 * r + 0.5870 * g + 0.1140 * b return gray else: return img def text2vec(self, text): """ 转标签为oneHot编码 :param text: str :return: numpy.array """ text_len = len(text) if text_len > self.max_captcha: raise ValueError('验证码最长{}个字符'.format(self.max_captcha)) vector = np.zeros(self.max_captcha * self.char_set_len) for i, ch in enumerate(text): idx = i * self.char_set_len + self.char_set.index(ch) vector[idx] = 1 return vector def get_batch(self, n, size=128): batch_x = np.zeros([size, self.image_height * self.image_width]) # 初始化 batch_y = np.zeros([size, self.max_captcha * self.char_set_len]) # 初始化 max_batch = int(len(self.img_list) / size) # print(max_batch) if max_batch - 1 < 0: raise TrainError("训练集图片数量需要大于每批次训练的图片数量") if n > max_batch - 1: n = n % max_batch s = n * size e = (n + 1) * size this_batch = self.img_list[s:e] # print("{}:{}".format(s, e)) for i, img_name in enumerate(this_batch): label, image_array = self.gen_captcha_text_image(img_name) image_array = self.convert2gray(image_array) # 灰度化图片 batch_x[i, :] = image_array.flatten() / 255 # flatten 转为一维 batch_y[i, :] = self.text2vec(label) # 生成 oneHot return batch_x, batch_y def confirm_image_suffix(self): # 在训练前校验所有文件格式 print("开始校验所有图片后缀") for index, img_name in enumerate(self.img_list): print("{} image pass".format(index), end='\r') if not img_name.endswith(sample_conf['image_suffix']): raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]' .format(sample_conf['image_suffix'], img_name)) print("所有图片格式校验通过") def model(self): x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1]) print(">>> input x: {}".format(x)) # 卷积层1 wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bc1 = tf.Variable(self.b_alpha * tf.random_normal([32])) conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1)) conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv1 = tf.nn.dropout(conv1, self.keep_prob) # 卷积层2 wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bc2 = tf.Variable(self.b_alpha * tf.random_normal([64])) conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2)) conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv2 = tf.nn.dropout(conv2, self.keep_prob) # 卷积层3 wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bc3 = tf.Variable(self.b_alpha * tf.random_normal([128])) conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3)) conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') conv3 = tf.nn.dropout(conv3, self.keep_prob) print(">>> convolution 3: ", conv3.shape) next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3] # 全连接层1 wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024])) dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]]) dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1)) dense = tf.nn.dropout(dense, self.keep_prob) # 全连接层2 wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer()) bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len])) y_predict = tf.add(tf.matmul(dense, wout), bout) return y_predict def train_cnn(self): y_predict = self.model() print(">>> input batch predict shape: {}".format(y_predict.shape)) print(">>> End model test") # 计算概率 损失 cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y)) # 梯度下降 optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost) # 计算准确率 predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]) # 预测结果 max_idx_p = tf.argmax(predict, 2) # 预测结果 max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2) # 标签 # 计算准确率 correct_pred = tf.equal(max_idx_p, max_idx_l) accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1)) # 模型保存对象 saver = tf.train.Saver() with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) # 恢复模型 if os.path.exists(self.model_save_dir): try: saver.restore(sess, self.model_save_dir) # 判断捕获model文件夹中没有模型文件的错误 except NotFoundError: print("model文件夹为空,将创建新模型") else: pass step = 1 for i in range(3000): batch_x, batch_y = self.get_batch(i, size=128) _, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75}) if step % 10 == 0: batch_x_test, batch_y_test = self.get_batch(i, size=100) acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.}) print("第{}次训练 >>> 准确率为 {} >>> loss {}".format(step, acc, cost_)) # 准确率达到99%后保存并停止 if acc > 0.99: saver.save(sess, self.model_save_dir) break # 每训练500轮就保存一次 if i % 500 == 0: saver.save(sess, self.model_save_dir) step += 1 saver.save(sess, self.model_save_dir) def recognize_captcha(self): label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list)) f = plt.figure() ax = f.add_subplot(111) ax.text(0.1, 0.9, "origin:" + label, ha='center', va='center', transform=ax.transAxes) plt.imshow(captcha_array) # 预测图片 image = self.convert2gray(captcha_array) image = image.flatten() / 255 y_predict = self.model() saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, self.model_save_dir) predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2) text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.}) predict_text = text_list[0].tolist() print("正确: {} 预测: {}".format(label, predict_text)) # 显示图片和预测结果 p_text = "" for p in predict_text: p_text += str(self.char_set[p]) print(p_text) plt.text(20, 1, 'predict:{}'.format(p_text)) plt.show() def main(): train_image_dir = sample_conf["train_image_dir"] char_set = sample_conf["char_set"] model_save_dir = sample_conf["model_save_dir"] tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False) tm.train_cnn() # 开始训练模型 # tm.recognize_captcha() # 识别图片示例 if __name__ == '__main__': main()
train_model.py
12,141
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回 :param img: :return: 返回一个验证码的array形式和对应的字符串标签 :return:tuple (str, numpy.array) 转标签为oneHot编码 :param text: str :return: numpy.array -*- coding: utf-8 -*- 设置以下环境变量可开启CPU识别 os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "-1" 模型路径 打乱文件顺序+校验图片格式 校验格式 打乱文件顺序 获得图片宽高和字符长度基本信息 初始化变量 图片尺寸 验证码长度(位数) 验证码字符类别 相关信息打印 tf初始化占位符 特征向量 标签 dropout值 test model input and output 标签 文件 向量化 初始化 初始化 print(max_batch) print("{}:{}".format(s, e)) 灰度化图片 flatten 转为一维 生成 oneHot 在训练前校验所有文件格式 卷积层1 卷积层2 卷积层3 全连接层1 全连接层2 计算概率 损失 梯度下降 计算准确率 预测结果 预测结果 标签 计算准确率 模型保存对象 恢复模型 判断捕获model文件夹中没有模型文件的错误 准确率达到99%后保存并停止 每训练500轮就保存一次 预测图片 显示图片和预测结果 开始训练模型 tm.recognize_captcha() 识别图片示例
712
zh
0.903385
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # The idea for this module (but no code) was borrowed from the # quantities (http://pythonhosted.org/quantities/) package. """Helper functions for Quantity. In particular, this implements the logic that determines scaling and result units for a given ufunc, given input units. """ from fractions import Fraction import numpy as np from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS from ..core import (UnitsError, UnitConversionError, UnitTypeError, dimensionless_unscaled, get_current_unit_registry) def _d(unit): if unit is None: return dimensionless_unscaled else: return unit def get_converter(from_unit, to_unit): """Like Unit._get_converter, except returns None if no scaling is needed, i.e., if the inferred scale is unity.""" try: scale = from_unit._to(to_unit) except UnitsError: return from_unit._apply_equivalencies( from_unit, to_unit, get_current_unit_registry().equivalencies) except AttributeError: raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'" .format(from_unit, to_unit)) if scale == 1.: return None else: return lambda val: scale * val def get_converters_and_unit(f, unit1, unit2): converters = [None, None] # By default, we try adjusting unit2 to unit1, so that the result will # be unit1 as well. But if there is no second unit, we have to try # adjusting unit1 (to dimensionless, see below). if unit2 is None: if unit1 is None: # No units for any input -- e.g., np.add(a1, a2, out=q) return converters, dimensionless_unscaled changeable = 0 # swap units. unit2 = unit1 unit1 = None elif unit2 is unit1: # ensure identical units is fast ("==" is slow, so avoid that). return converters, unit1 else: changeable = 1 # Try to get a converter from unit2 to unit1. if unit1 is None: try: converters[changeable] = get_converter(unit2, dimensionless_unscaled) except UnitsError: # special case: would be OK if unitless number is zero, inf, nan converters[1-changeable] = False return converters, unit2 else: return converters, dimensionless_unscaled else: try: converters[changeable] = get_converter(unit2, unit1) except UnitsError: raise UnitConversionError( "Can only apply '{0}' function to quantities " "with compatible dimensions" .format(f.__name__)) return converters, unit1 # SINGLE ARGUMENT UFUNC HELPERS # # The functions below take a single argument, which is the quantity upon which # the ufunc is being used. The output of the helper function should be two # values: a list with a single converter to be used to scale the input before # it is being passed to the ufunc (or None if no conversion is needed), and # the unit the output will be in. def helper_onearg_test(f, unit): return ([None], None) def helper_invariant(f, unit): return ([None], _d(unit)) def helper_square(f, unit): return ([None], unit ** 2 if unit is not None else dimensionless_unscaled) def helper_reciprocal(f, unit): return ([None], unit ** -1 if unit is not None else dimensionless_unscaled) one_half = 0.5 # faster than Fraction(1, 2) one_third = Fraction(1, 3) def helper_sqrt(f, unit): return ([None], unit ** one_half if unit is not None else dimensionless_unscaled) def helper_cbrt(f, unit): return ([None], (unit ** one_third if unit is not None else dimensionless_unscaled)) def helper_modf(f, unit): if unit is None: return [None], (dimensionless_unscaled, dimensionless_unscaled) try: return ([get_converter(unit, dimensionless_unscaled)], (dimensionless_unscaled, dimensionless_unscaled)) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper__ones_like(f, unit): return [None], dimensionless_unscaled def helper_dimensionless_to_dimensionless(f, unit): if unit is None: return [None], dimensionless_unscaled try: return ([get_converter(unit, dimensionless_unscaled)], dimensionless_unscaled) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper_dimensionless_to_radian(f, unit): from ..si import radian if unit is None: return [None], radian try: return [get_converter(unit, dimensionless_unscaled)], radian except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) def helper_degree_to_radian(f, unit): from ..si import degree, radian try: return [get_converter(unit, degree)], radian except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_radian_to_degree(f, unit): from ..si import degree, radian try: return [get_converter(unit, radian)], degree except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_radian_to_dimensionless(f, unit): from ..si import radian try: return [get_converter(unit, radian)], dimensionless_unscaled except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "quantities with angle units" .format(f.__name__)) def helper_frexp(f, unit): if not unit.is_unity(): raise UnitTypeError("Can only apply '{0}' function to " "unscaled dimensionless quantities" .format(f.__name__)) return [None], (None, None) # TWO ARGUMENT UFUNC HELPERS # # The functions below take a two arguments. The output of the helper function # should be two values: a tuple of two converters to be used to scale the # inputs before being passed to the ufunc (None if no conversion is needed), # and the unit the output will be in. def helper_multiplication(f, unit1, unit2): return [None, None], _d(unit1) * _d(unit2) def helper_division(f, unit1, unit2): return [None, None], _d(unit1) / _d(unit2) def helper_power(f, unit1, unit2): # TODO: find a better way to do this, currently need to signal that one # still needs to raise power of unit1 in main code if unit2 is None: return [None, None], False try: return [None, get_converter(unit2, dimensionless_unscaled)], False except UnitsError: raise UnitTypeError("Can only raise something to a " "dimensionless quantity") def helper_ldexp(f, unit1, unit2): if unit2 is not None: raise TypeError("Cannot use ldexp with a quantity " "as second argument.") else: return [None, None], _d(unit1) def helper_copysign(f, unit1, unit2): # if first arg is not a quantity, just return plain array if unit1 is None: return [None, None], None else: return [None, None], unit1 def helper_heaviside(f, unit1, unit2): try: converter2 = (get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None) except UnitsError: raise UnitTypeError("Can only apply 'heaviside' function with a " "dimensionless second argument.") return ([None, converter2], dimensionless_unscaled) def helper_two_arg_dimensionless(f, unit1, unit2): try: converter1 = (get_converter(unit1, dimensionless_unscaled) if unit1 is not None else None) converter2 = (get_converter(unit2, dimensionless_unscaled) if unit2 is not None else None) except UnitsError: raise UnitTypeError("Can only apply '{0}' function to " "dimensionless quantities" .format(f.__name__)) return ([converter1, converter2], dimensionless_unscaled) # This used to be a separate function that just called get_converters_and_unit. # Using it directly saves a few us; keeping the clearer name. helper_twoarg_invariant = get_converters_and_unit def helper_twoarg_comparison(f, unit1, unit2): converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, None def helper_twoarg_invtrig(f, unit1, unit2): from ..si import radian converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, radian def helper_twoarg_floor_divide(f, unit1, unit2): converters, _ = get_converters_and_unit(f, unit1, unit2) return converters, dimensionless_unscaled def helper_divmod(f, unit1, unit2): converters, result_unit = get_converters_and_unit(f, unit1, unit2) return converters, (dimensionless_unscaled, result_unit) # list of ufuncs: # http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs UNSUPPORTED_UFUNCS |= { np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift, np.right_shift, np.logical_and, np.logical_or, np.logical_xor, np.logical_not} for name in 'isnat', 'gcd', 'lcm': # isnat was introduced in numpy 1.14, gcd+lcm in 1.15 ufunc = getattr(np, name, None) if isinstance(ufunc, np.ufunc): UNSUPPORTED_UFUNCS |= {ufunc} # SINGLE ARGUMENT UFUNCS # ufuncs that return a boolean and do not care about the unit onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit) for ufunc in onearg_test_ufuncs: UFUNC_HELPERS[ufunc] = helper_onearg_test # ufuncs that return a value with the same unit as the input invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative, np.spacing, np.rint, np.floor, np.ceil, np.trunc, np.positive) for ufunc in invariant_ufuncs: UFUNC_HELPERS[ufunc] = helper_invariant # ufuncs that require dimensionless input and and give dimensionless output dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log, np.log10, np.log2, np.log1p) # As found out in gh-7058, some numpy 1.13 conda installations also provide # np.erf, even though upstream doesn't have it. We include it if present. if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc): dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,) for ufunc in dimensionless_to_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless # ufuncs that require dimensionless input and give output in radians dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh, np.arcsinh, np.arctanh) for ufunc in dimensionless_to_radian_ufuncs: UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian # ufuncs that require input in degrees and give output in radians degree_to_radian_ufuncs = (np.radians, np.deg2rad) for ufunc in degree_to_radian_ufuncs: UFUNC_HELPERS[ufunc] = helper_degree_to_radian # ufuncs that require input in radians and give output in degrees radian_to_degree_ufuncs = (np.degrees, np.rad2deg) for ufunc in radian_to_degree_ufuncs: UFUNC_HELPERS[ufunc] = helper_radian_to_degree # ufuncs that require input in radians and give dimensionless output radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh, np.tanh) for ufunc in radian_to_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless # ufuncs handled as special cases UFUNC_HELPERS[np.sqrt] = helper_sqrt UFUNC_HELPERS[np.square] = helper_square UFUNC_HELPERS[np.reciprocal] = helper_reciprocal UFUNC_HELPERS[np.cbrt] = helper_cbrt UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like UFUNC_HELPERS[np.modf] = helper_modf UFUNC_HELPERS[np.frexp] = helper_frexp # TWO ARGUMENT UFUNCS # two argument ufuncs that require dimensionless input and and give # dimensionless output two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2) for ufunc in two_arg_dimensionless_ufuncs: UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless # two argument ufuncs that return a value with the same unit as the input twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum, np.minimum, np.fmin, np.fmax, np.nextafter, np.remainder, np.mod, np.fmod) for ufunc in twoarg_invariant_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_invariant # two argument ufuncs that need compatible inputs and return a boolean twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal) for ufunc in twoarg_comparison_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_comparison # two argument ufuncs that do inverse trigonometry twoarg_invtrig_ufuncs = (np.arctan2,) # another private function in numpy; use getattr in case it disappears if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc): twoarg_invtrig_ufuncs += (np.core.umath._arg,) for ufunc in twoarg_invtrig_ufuncs: UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig # ufuncs handled as special cases UFUNC_HELPERS[np.multiply] = helper_multiplication UFUNC_HELPERS[np.divide] = helper_division UFUNC_HELPERS[np.true_divide] = helper_division UFUNC_HELPERS[np.power] = helper_power UFUNC_HELPERS[np.ldexp] = helper_ldexp UFUNC_HELPERS[np.copysign] = helper_copysign UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide UFUNC_HELPERS[np.heaviside] = helper_heaviside UFUNC_HELPERS[np.float_power] = helper_power UFUNC_HELPERS[np.divmod] = helper_divmod
astropy/units/quantity_helper/helpers.py
14,433
Like Unit._get_converter, except returns None if no scaling is needed, i.e., if the inferred scale is unity. Helper functions for Quantity. In particular, this implements the logic that determines scaling and result units for a given ufunc, given input units. -*- coding: utf-8 -*- Licensed under a 3-clause BSD style license - see LICENSE.rst The idea for this module (but no code) was borrowed from the quantities (http://pythonhosted.org/quantities/) package. By default, we try adjusting unit2 to unit1, so that the result will be unit1 as well. But if there is no second unit, we have to try adjusting unit1 (to dimensionless, see below). No units for any input -- e.g., np.add(a1, a2, out=q) swap units. ensure identical units is fast ("==" is slow, so avoid that). Try to get a converter from unit2 to unit1. special case: would be OK if unitless number is zero, inf, nan SINGLE ARGUMENT UFUNC HELPERS The functions below take a single argument, which is the quantity upon which the ufunc is being used. The output of the helper function should be two values: a list with a single converter to be used to scale the input before it is being passed to the ufunc (or None if no conversion is needed), and the unit the output will be in. faster than Fraction(1, 2) TWO ARGUMENT UFUNC HELPERS The functions below take a two arguments. The output of the helper function should be two values: a tuple of two converters to be used to scale the inputs before being passed to the ufunc (None if no conversion is needed), and the unit the output will be in. TODO: find a better way to do this, currently need to signal that one still needs to raise power of unit1 in main code if first arg is not a quantity, just return plain array This used to be a separate function that just called get_converters_and_unit. Using it directly saves a few us; keeping the clearer name. list of ufuncs: http://docs.scipy.org/doc/numpy/reference/ufuncs.htmlavailable-ufuncs isnat was introduced in numpy 1.14, gcd+lcm in 1.15 SINGLE ARGUMENT UFUNCS ufuncs that return a boolean and do not care about the unit ufuncs that return a value with the same unit as the input ufuncs that require dimensionless input and and give dimensionless output As found out in gh-7058, some numpy 1.13 conda installations also provide np.erf, even though upstream doesn't have it. We include it if present. ufuncs that require dimensionless input and give output in radians ufuncs that require input in degrees and give output in radians ufuncs that require input in radians and give output in degrees ufuncs that require input in radians and give dimensionless output ufuncs handled as special cases TWO ARGUMENT UFUNCS two argument ufuncs that require dimensionless input and and give dimensionless output two argument ufuncs that return a value with the same unit as the input two argument ufuncs that need compatible inputs and return a boolean two argument ufuncs that do inverse trigonometry another private function in numpy; use getattr in case it disappears ufuncs handled as special cases
3,062
en
0.79164
# Uses python3 import sys def get_change(money, coins): t = [j+1 for j in range(money+1)] # boundary condition t[0] = 0 for j in range(1, money+1): for c in coins: if c <= j: t[j] = min(t[j], 1+t[j-c]) return t[money] if __name__ == '__main__': coins = [1, 3, 4] money = int(input()) print(get_change(money, coins))
1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py
393
Uses python3 boundary condition
31
en
0.68529
from sepal_ui import sepalwidgets as sw from ipywidgets import dlink from component import parameter as cp class ParamTile(sw.Card): def __init__(self, model): # read the model self.model = model # add the base widgets self.close = sw.Icon(children=["mdi-close"], small=True) self.title = sw.CardTitle( class_="pa-0 ma-0", children=[sw.Spacer(), self.close] ) # create the widgets self.w_target = sw.Select( small=True, items=[{"text": f"{i+1}0%", "value": i + 1} for i in range(cp.nb_target)], v_model=model.target, label="target", dense=True, ) self.w_weight = sw.Select( small=True, items=[i + 1 for i in range(cp.nb_weight)], v_model=model.weight, label="weight", dense=True, ) # link the widgets to the model self.model.bind(self.w_target, "target").bind(self.w_weight, "weight") # create the object super().__init__( max_width="500px", class_="pa-1", children=[self.title, self.w_target, self.w_weight], viz=False, disabled=False, ) # add javascript events self.close.on_event("click", lambda *args: self.hide()) dlink((self, "disabled"), (self, "loading")) def reset(self): self.w_target.v_model = None self.w_weight.v_model = None self.hide() return
component/tile/param_tile.py
1,555
read the model add the base widgets create the widgets link the widgets to the model create the object add javascript events
124
en
0.582046
#! /usr/bin/env python """Functions for working with the DLRN API""" import csv import os.path import requests from toolchest import yaml from atkinson.config.manager import ConfigManager from atkinson.logging.logger import getLogger def _raw_fetch(url, logger): """ Fetch remote data and return the text output. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Raw text data, None otherwise """ ret_data = None try: req = requests.get(url) if req.status_code == requests.codes.ok: ret_data = req.text except requests.exceptions.ConnectionError as error: logger.warning(error.request) return ret_data def _fetch_yaml(url, logger): """ Fetch remote data and process the text as yaml. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Parsed yaml data in the form of a dictionary """ ret_data = None raw_data = _raw_fetch(url, logger) if raw_data is not None: ret_data = yaml.parse(raw_data) return ret_data def dlrn_http_factory(host, config_file=None, link_name=None, logger=getLogger()): """ Create a DlrnData instance based on a host. :param host: A host name string to build instances :param config_file: A dlrn config file(s) to use in addition to the default. :param link_name: A dlrn symlink to use. This overrides the config files link parameter. :param logger: An atkinson logger to use. Default is the base logger. :return: A DlrnData instance """ manager = None files = ['dlrn.yml'] if config_file is not None: if isinstance(config_file, list): files.extend(config_file) else: files.append(config_file) local_path = os.path.realpath(os.path.dirname(__file__)) manager = ConfigManager(filenames=files, paths=local_path) if manager is None: return None config = manager.config if host not in config: return None link = config[host]['link'] if link_name is not None: link = link_name return DlrnHttpData(config[host]['url'], config[host]['release'], link_name=link, logger=logger) class DlrnHttpData(): """A class used to interact with the dlrn API""" def __init__(self, url, release, link_name='current', logger=getLogger()): """ Class constructor :param url: The URL to the host to obtain data. :param releases: The release name to use for lookup. :param link_name: The name of the dlrn symlink to fetch data from. :param logger: An atkinson logger to use. Default is the base logger. """ self.url = os.path.join(url, release) self.release = release self._logger = logger self._link_name = link_name self._commit_data = {} self._fetch_commit() def _fetch_commit(self): """ Fetch the commit data from dlrn """ full_url = os.path.join(self.url, self._link_name, 'commit.yaml') data = _fetch_yaml(full_url, self._logger) if data is not None and 'commits' in data: pkg = data['commits'][0] if pkg['status'] == 'SUCCESS': self._commit_data = {'name': pkg['project_name'], 'dist_hash': pkg['distro_hash'], 'commit_hash': pkg['commit_hash'], 'extended_hash': pkg.get('extended_hash')} else: msg = '{0} has a status of error'.format(str(pkg)) self._logger.warning(msg) def _build_url(self): """ Generate a url given a commit hash and distgit hash to match the format base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987 is a portion of the distgit hash. :return: A string with the full URL. """ first = self._commit_data['commit_hash'][0:2] second = self._commit_data['commit_hash'][2:4] third = self._commit_data['commit_hash'] for key in ['dist_hash', 'extended_hash']: if self._commit_data.get(key, 'None') != 'None': third += '_' + self._commit_data[key][0:8] return os.path.join(self.url, first, second, third) @property def commit(self): """ Get the dlrn commit information :return: A dictionary of name, dist-git hash, commit hash and extended hash. An empty dictionary is returned otherwise. """ return self._commit_data @property def versions(self): """ Get the version data for the versions.csv file and return the data in a dictionary :return: A dictionary of packages with commit and dist-git hashes """ ret_dict = {} full_url = os.path.join(self._build_url(), 'versions.csv') data = _raw_fetch(full_url, self._logger) if data is not None: data = data.replace(' ', '_') split_data = data.split() reader = csv.DictReader(split_data) for row in reader: ret_dict[row['Project']] = {'source': row['Source_Sha'], 'state': row['Status'], 'distgit': row['Dist_Sha'], 'nvr': row['Pkg_NVR']} else: msg = 'Could not fetch {0}'.format(full_url) self._logger.error(msg) return ret_dict
atkinson/dlrn/http_data.py
5,950
A class used to interact with the dlrn API Class constructor :param url: The URL to the host to obtain data. :param releases: The release name to use for lookup. :param link_name: The name of the dlrn symlink to fetch data from. :param logger: An atkinson logger to use. Default is the base logger. Generate a url given a commit hash and distgit hash to match the format base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987 is a portion of the distgit hash. :return: A string with the full URL. Fetch the commit data from dlrn Fetch remote data and process the text as yaml. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Parsed yaml data in the form of a dictionary Fetch remote data and return the text output. :param url: The URL to fetch the data from :param logger: A logger instance to use. :return: Raw text data, None otherwise Get the dlrn commit information :return: A dictionary of name, dist-git hash, commit hash and extended hash. An empty dictionary is returned otherwise. Create a DlrnData instance based on a host. :param host: A host name string to build instances :param config_file: A dlrn config file(s) to use in addition to the default. :param link_name: A dlrn symlink to use. This overrides the config files link parameter. :param logger: An atkinson logger to use. Default is the base logger. :return: A DlrnData instance Get the version data for the versions.csv file and return the data in a dictionary :return: A dictionary of packages with commit and dist-git hashes Functions for working with the DLRN API ! /usr/bin/env python
1,685
en
0.7002
""" Module for the selection of machine learning models. There are several different functions which can perform the model selection: all of them have an intuitive interface, but are also powerful and flexible. In addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way. These different functions perform the model selection in different contexts, i.e. each function is specifically meant for a specific scenario. Certain contexts are more specific, and other are more general. On the whole, there are six different model selection functions, divided into two main groups: 1. functions that perform the model selection with respect to a **single dataset**; 2. functions that perform the model selection with respect to **multiple datasets**. The six functions, sorted from the most specific context to the most general one, are: - *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset); - *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple datasets). This module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array. Moreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module: - the machine learning models are represented as sklearn models (i.e. sklearn estimators); - under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e. GridSearchCV); - several other operations are done using the functionalities provided by sklearn. This module, besides the model selection functions, contains also some utilities: - the PolynomialRegression class; - some utility functions. """ import matplotlib.pyplot as plt import numpy as np from sklearn.utils import resample from sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit, GridSearchCV from sklearn.metrics import mean_squared_error, accuracy_score from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures from sklearn.base import BaseEstimator from sklearn.linear_model import LinearRegression #---------------------------------------------------------------------------------------------------------------------------- # POLYNOMIAL REGRESSOR MODEL class PolynomialRegression(BaseEstimator): """ Polynomial regression model. It's a sklearn model: it's compliant to the sklearn estimators interface. `Example <https://scikit-learn.org/stable/developers/develop.html>`_ Parameters ---------- degree: int Degree to apply for the polynomial transformation. Notes ---------- The polynomial transformation is performed using the sklearn PolynomialFeatures. """ def __init__(self, degree=1): self.degree=degree def fit(self, X, y): self.poly_transformer = PolynomialFeatures(self.degree, include_bias=False) self.poly_transformer.fit(X) X = self.poly_transformer.transform(X) self.model = LinearRegression(fit_intercept=True) self.model.fit(X,y) return self def predict(self, X): X = self.poly_transformer.transform(X) return self.model.predict(X) def get_params(self, deep=True): return {"degree": self.degree} def set_params(self, **parameters): for parameter, value in parameters.items(): setattr(self, parameter, value) return self #---------------------------------------------------------------------------------------------------------------------------- # UTILITY FUNCTIONS def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): """ Compute the training-validation-test scores for the given model on the given dataset. The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation score is performed applying the cross validation on the training set. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days). (This affects the computing of the scores). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- train_score: float val_score: float test_score: float Notes ---------- - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error). Otherwise, the returned scores are accuracy measures. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ if regr: scoring="neg_mean_squared_error" else: scoring="accuracy" # Split into training e test. if not time_series : # Random splitting (not time series) X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) else: # time series splitting train_len = int(X.shape[0]*(1-test_size)) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if(scale): # Scale the features in X scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) # Cross validation if not time_series: # k-fold cross validation cv = n_folds else: # cross validation for time series cv = TimeSeriesSplit(n_splits = n_folds) scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring) val_score = scores.mean() # validation score if regr: val_score = -val_score model.fit(X_train_80,y_train_80) # Fit the model using all the training # Compute training and test scores train_score=0 test_score=0 if regr: train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test)) else: train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80)) test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test)) return train_score, val_score, test_score # Return a triple def compute_bias_variance_error(X, y, model, scale=False, N_TESTS = 20, sample_size=0.67): """ Compute the bias^2-variance-error scores for the given model on the given dataset. These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the dataset. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). N_TESTS: int Number of samples that are made in order to compute the measures. sample_size: float Decimal number between 0 and 1, which indicates the proportion of the sample. Returns ---------- bias: float variance: float error: float """ # Scale the features in `X` if(scale): scaler = MinMaxScaler() scaler.fit(X) X = scaler.transform(X) # Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list). # In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many # columns as the number of instances in `X` (each column is a point of the dataset). # Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points. # Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples. vector_ypred = [] # Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it. for i in range(N_TESTS): # Extract a new sample (sample 'i') Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) ) # Fit the model on this sample 'i' model.fit(Xs,ys) # Add the predictions made by the model on all the dataset points vector_ypred.append(list(model.predict(X))) vector_ypred = np.array(vector_ypred) # Transform into numpy array # Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on # the `N_TEST` samples. vector_bias = (y - np.mean(vector_ypred, axis=0))**2 # Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on # the `N_TEST` samples. vector_variance = np.var(vector_ypred, axis=0) # Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on # the `N_TEST` samples. vector_error = np.sum((vector_ypred - y)**2, axis=0)/N_TESTS bias = np.mean(vector_bias) # Total bias^2 of the model variance = np.mean(vector_variance) # Total variance of the model error = np.mean(vector_error) # Total error of the model return bias,variance,error # Return a triple def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel="Index", title="Actual vs Predicted values", figsize=(6,6)): """ Plot the predictions made by the given model on the given dataset, versus its actual values. The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are made. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model used to make the predictions. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. plot_type: int Indicates the type of the plot. - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed predicted values. - 1 -> On the x axis the actual values are put, on the y axis the predicted ones. xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. (It's used only if `plot_type` is 0). xlabel: str Label of the x axis of the plot. (It's used only if `plot_type` is 0). title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. Notes ---------- The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous sequences. I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify the visualization). For this reason, typically this function is applied on time series datasets. """ train_len = int(X.shape[0]*(1-test_size)) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if(scale): # Scale the features in X scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) model.fit(X_train_80,y_train_80) # Fit using all the training set predictions = model.predict(X_test) fig, ax = plt.subplots(figsize=figsize) if plot_type==0: if xvalues is None: xvalues=range(len(X)) ax.plot(xvalues,y, 'o:', label='actual values') ax.plot(xvalues[train_len:],predictions, 'o:', label='predicted values') ax.legend() elif plot_type==1: ax.plot(y[train_len:],predictions,'o') ax.plot([0, 1], [0, 1], 'r-',transform=ax.transAxes) xlabel="Actual values" ax.set_ylabel("Predicted values") ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() return ax def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6,6), bar=False): """ Plot the given list of training-validation scores. This function is an auxiliary function for the model selection functions. It's meant to be private in the module. Parameters ---------- xvalues: list (in general iterable) Values to put in the x axis of the plot. train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation scores. Basically, it is a list of training-validation scores. plot_train: bool Indicates whether to plot also the training scores or to plot only the validation ones. xlabel: str Label of the x axis. title: str Title of the plot. figsize: tuple Two dimensions of the plot. bar: bool Indicates whether to plot the scores using bars or using points. If `bar` it's True, `xvalues` must contain string (i.e. labels). Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. """ fig, ax = plt.subplots(figsize=figsize) if not bar: # Points if plot_train: # Plot also the training scores ax.plot(xvalues,train_val_scores[:,0], 'o:', label='Train') ax.plot(xvalues,train_val_scores[:,1], 'o:', label='Validation') # Validation scores else: # Bars if plot_train: # Plot also the training scores x = np.arange(len(xvalues)) # The label locations width = 0.35 # The width of the bars ax.bar(x-width/2,train_val_scores[:,0], width=width, label='Train') ax.bar(x+width/2,train_val_scores[:,1], width=width, label='Validation') # Validation scores ax.set_xticks(x) ax.set_xticklabels(xvalues) else: ax.bar(xvalues,train_val_scores[:,1],label='Validation') ax.set_xlabel(xlabel) ax.set_title(title) ax.grid() ax.legend() return ax #---------------------------------------------------------------------------------------------------------------------------- # FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel=None, title="Hyperparameter validation", figsize=(6,6)): """ Select the best value for the specified hyperparameter of the specified model on the given dataset. In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`. This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested). best_index: int Index of `hyperparameter_values` that indicates which is the best hyperparameter value. test_score: float Test score associated with the best hyperparameter value. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ param_grid = {hyperparameter:hyperparameter_values} # Create the hyperparameter grid # Call the function for the validation of an arbitrary number of hyperparameters params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) ax = None if(plot): # Make the plot if not xvalues: # Default values on the x axis xvalues = hyperparameter_values if not xlabel: # Default label on the x axis xlabel = hyperparameter ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize) return train_val_scores, best_index, test_score, ax def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True): """ Select the best combination of values for the specified hyperparameters of the specified model on the given dataset. In other words, perform the tuning of multiple hyperparameters. The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the associated values to test. All the possible combinations of values are tested, in an exhaustive way (i.e. grid search). This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- params: list List which enumerates all the possible combinations of hyperparameters values. It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination). train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of possible combinations of the hyperparameters values. (It has as many rows as the elements of `params`). best_index: int Index of `params` that indicates which is the best combination of hyperparameters values. test_score: float Test score associated with the best combination of hyperparameters values. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ if regr: scoring="neg_mean_squared_error" else: scoring="accuracy" # Split into training-test sets if not time_series : # Random splitting X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state) else: # Time series splitting train_len = int(X.shape[0]*(1-test_size)) X_train_80 = X[:train_len] y_train_80 = y[:train_len] X_test = X[train_len:] y_test = y[train_len:] if(scale): # Scale the features in `X` scaler = MinMaxScaler() scaler.fit(X_train_80) X_train_80 = scaler.transform(X_train_80) X_test = scaler.transform(X_test) # Cross validation strategy if not time_series: # The strategy is the classic k-fold cross validation cv = n_folds else: # Time series cross validation strategy cv = TimeSeriesSplit(n_splits = n_folds) # Grid search grid_search = GridSearchCV(model,param_grid,scoring=scoring,cv=cv,return_train_score=True) grid_search.fit(X_train_80,y_train_80) params = grid_search.cv_results_["params"] # List of all the possible combinations of hyperparameters values # List where for all the possible combinations of hyperparameters values there is the associated training score train_scores = grid_search.cv_results_["mean_train_score"] # List where for all the possible combinations of hyperparameters values there is the associated validation score val_scores = grid_search.cv_results_["mean_test_score"] # Index of `params`, corresponding to the best combination of hyperparameters values best_index = grid_search.best_index_ # Model with the best combination of hyperparameters values best_model = grid_search.best_estimator_ if regr: # The scores are negative: moltiply by -1 train_scores = train_scores*(-1) val_scores = val_scores*(-1) train_val_scores = np.concatenate((train_scores.reshape(-1,1), val_scores.reshape(-1,1)), axis=1) # Fit the best model on all the training set best_model.fit(X_train_80,y_train_80) # Compute the test score of the best model test_score=0 if regr: test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test)) else: test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test)) return params, train_val_scores, best_index, test_score def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Models", title="Models validation", figsize=(6,6)): """ Select the best model on the given dataset. The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See `hyperparameters_validation`). For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid search). Actually, the function `hyperparameters_validation` is used. (See `hyperparameters_validation`). The selection of the best model is made using the validation score (i.e. the best model is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grids of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of the parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list). This list indicates, for each different model, if the features in `X` have to be scaled or not. `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be True: in this case the `X` features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- models_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). models_best_params: list List which indicates, for each model, the best combination of the hyperparameters values for that model. It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the associated model. best_index: int Index of `model_paramGrid_list` that indicates which is the best model. test_score: float Test score associated with the best model. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ if not scale_list: # `scale_list` is either None or False scale_list = [False]*len(model_paramGrid_list) elif scale_list is True: # `scale_list` is True scale_list = [True]*len(model_paramGrid_list) # Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and # the other for the validation scores. At the beginning it is a list of tuples. models_train_val_score = [] # List which has as many elements as the models: for each model there is the dictionary of the best combination of # hyperparameters values. models_best_params = [] # List which has as many elements as the models: for each model there is the test score (associated with the best # combination of hyperparameters values). models_test_score = [] for i,triple in enumerate(model_paramGrid_list): # Iterate through all the cuples model-param_grid model,param_grid = triple[1:] # Apply the grid search on model-param_grid params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale_list[i], test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) models_train_val_score.append(tuple(train_val_scores[best_index])) # Add the row for that model models_best_params.append(params[best_index]) # Add the element for that model models_test_score.append(test_score) # Add the element for that model models_train_val_score = np.array(models_train_val_score) # Transform into numpy matrix (i.e. np.array) # Find the best index (i.e. the best model) if regr: best_index = np.argmin(models_train_val_score,axis=0)[1] else: best_index = np.argmax(models_train_val_score,axis=0)[1] # Test score of the best model test_score = models_test_score[best_index] ax = None if(plot): # Make the plot if not xvalues: # Default values for the x axis xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))] ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True) return models_train_val_score, models_best_params, best_index, test_score, ax #---------------------------------------------------------------------------------------------------------------------------- # FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation", figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)): """ Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best couple dataset-hyperparameter value). For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified `hyperparameter` of `model`. In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function `hyperparameter_validation` is applied. (See `hyperparameter_validation`). In the end, the best couple dataset-hyperparameter value is selected. Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset. (As the plot made by the `hyperparameter_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation scores). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary' plots). (See 'hyperparameter_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_hyperparameter_value: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best `hyperparameter` value on that dataset. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-hyperparameter value. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- hyperparameter_validation: select the best value for the specified hyperparameter of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as # columns. At the beginning it is a list. datasets_train_val_score = [] # List which contains, for each dataset, the best hyperparameter value datasets_best_hyperparameter_value = [] # List which contains, for each dataset, its test score (associated with the best hyperparameter value) datasets_test_score = [] # List of axes axes = [] for i,dataset in enumerate(dataset_list): # Iterate through all the datasets X,y = dataset # Perform the hyperparameter tuning on the current dataset train_val_scores, best_index, test_score, ax = hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xvalues=hyperparameter_values, xlabel=hyperparameter, title="Dataset "+str(i)+" : hyperparameter validation", figsize=figsize_verbose) datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) # Add the element related to that dataset datasets_test_score.append(test_score) # Add the row related to that dataset if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value) if regr: best_index = np.argmin(datasets_train_val_score,axis=0)[1] else: best_index = np.argmax(datasets_train_val_score,axis=0)[1] # Test score of the best couple dataset-hyperparameter value test_score = datasets_test_score[best_index] if(plot): # Make the plot if not xvalues: # Default values on the x axis xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True) axes.append(ax) return datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation",figsize=(6,6)): """ Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e. select the best couple dataset-combination of hyperparameters values). For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified with `param_grid`) are tested. In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact, on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`). In the end, the best couple dataset-combination of hyperparameters values is selected. Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. Parameters ---------- dataset_list: list List of couple, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series datasets (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_params: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best combination of hyperparameters values on that dataset. Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated values. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-combination of hyperparameters values. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as # columns . At the beginning it is a list. datasets_train_val_score = [] # List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary) datasets_best_params = [] # List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values) datasets_test_score = [] for X,y in dataset_list: # Iterate through all the datasets # Perform the exaustive hyperparameters tuning on the current dataset params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr) datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset datasets_best_params.append(params[best_index]) # Add the element related to that dataset datasets_test_score.append(test_score) # Add the row related to that dataset datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters # values) if regr: best_index = np.argmin(datasets_train_val_score,axis=0)[1] else: best_index = np.argmax(datasets_train_val_score,axis=0)[1] # Test score of the best couple dataset-combination of hyperparameters values test_score = datasets_test_score[best_index] ax = None if(plot): # Make the plot if not xvalues: # Default values on the x axis xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True) return datasets_train_val_score, datasets_best_params, best_index, test_score, ax def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation", figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)): """ Select the best dataset and the best model (i.e. select the best couple dataset-model). For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function `models_validation` is applied. (See `models_validation`). In the end, the best couple dataset-model is selected. Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the `models_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grid of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets). `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be True: in this case the 'X' features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots). (See 'models_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_model: list List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For each dataset, it contains the best model for that dataset. More precisely, it is a list of triple: - the first element is the index of `model_paramGrid_list` which indicates the best model; - the second element is the mnemonic name of the best model; - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary which has as keys the hyperparameters names and as values their associated values). best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-model. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- models_validation: select the best model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. """ # numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as # columns. At the beginning it is a list. datasets_train_val_score = [] # List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of # hyperparameters values datasets_best_model = [] # List which contains, for each dataset, its test score (associated to the best model) datasets_test_score = [] # List of axes axes = [] for i,dataset in enumerate(dataset_list): # Iterate through all the datasets X,y = dataset # Perform the models validation on the current dataset models_train_val_score, models_best_params, best_index, test_score, ax = models_validation(X, y, model_paramGrid_list, scale_list=scale_list, test_size=test_size, time_series=time_series, random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train, xlabel="Models", title=("Dataset "+str(i)+ " : models validation"), figsize=figsize_verbose) datasets_train_val_score.append(tuple(models_train_val_score[best_index,:])) # Add the row related to that dataset # Add the element related to that dataset datasets_best_model.append((best_index,model_paramGrid_list[best_index][0],models_best_params[best_index])) datasets_test_score.append(test_score) # Add the element related to that dataset if ax: axes.append(ax) datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy # Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model) if regr: best_index = np.argmin(datasets_train_val_score,axis=0)[1] else: best_index = np.argmax(datasets_train_val_score,axis=0)[1] # Test score of the best couple dataset-model test_score = datasets_test_score[best_index] if(plot): # Make the plot if not xvalues: # Default values on the x axis xvalues = range(len(dataset_list)) ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True) axes.append(ax) return datasets_train_val_score, datasets_best_model, best_index, test_score, axes
model_selection.py
67,692
Polynomial regression model. It's a sklearn model: it's compliant to the sklearn estimators interface. `Example <https://scikit-learn.org/stable/developers/develop.html>`_ Parameters ---------- degree: int Degree to apply for the polynomial transformation. Notes ---------- The polynomial transformation is performed using the sklearn PolynomialFeatures. Plot the given list of training-validation scores. This function is an auxiliary function for the model selection functions. It's meant to be private in the module. Parameters ---------- xvalues: list (in general iterable) Values to put in the x axis of the plot. train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation scores. Basically, it is a list of training-validation scores. plot_train: bool Indicates whether to plot also the training scores or to plot only the validation ones. xlabel: str Label of the x axis. title: str Title of the plot. figsize: tuple Two dimensions of the plot. bar: bool Indicates whether to plot the scores using bars or using points. If `bar` it's True, `xvalues` must contain string (i.e. labels). Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. Compute the bias^2-variance-error scores for the given model on the given dataset. These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the dataset. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). N_TESTS: int Number of samples that are made in order to compute the measures. sample_size: float Decimal number between 0 and 1, which indicates the proportion of the sample. Returns ---------- bias: float variance: float error: float Compute the training-validation-test scores for the given model on the given dataset. The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation score is performed applying the cross validation on the training set. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model to evaluate. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days). (This affects the computing of the scores). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- train_score: float val_score: float test_score: float Notes ---------- - If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error). Otherwise, the returned scores are accuracy measures. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best couple dataset-hyperparameter value). For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified `hyperparameter` of `model`. In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function `hyperparameter_validation` is applied. (See `hyperparameter_validation`). In the end, the best couple dataset-hyperparameter value is selected. Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the `hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset. (As the plot made by the `hyperparameter_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation scores). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary' plots). (See 'hyperparameter_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_hyperparameter_value: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best `hyperparameter` value on that dataset. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-hyperparameter value. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- hyperparameter_validation: select the best value for the specified hyperparameter of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e. select the best couple dataset-combination of hyperparameters values). For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified with `param_grid`) are tested. In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact, on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`). In the end, the best couple dataset-combination of hyperparameters values is selected. Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. Parameters ---------- dataset_list: list List of couple, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in 'X' (for all the datasets). (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series datasets (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_params: list List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For each dataset, it contains the best combination of hyperparameters values on that dataset. Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated values. best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-combination of hyperparameters values. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best dataset and the best model (i.e. select the best couple dataset-model). For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function `models_validation` is applied. (See `models_validation`). In the end, the best couple dataset-model is selected. Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets. It's a validation focused on the datasets. In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected. It's a two-levels selection. This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation score). The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset selection. This is the 'main' plot. Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the `models_validation` function). Parameters ---------- dataset_list: list List of couples, where each couple is a dataset. - The first element is X, the two-dimensional np.array containing the explanatory features of the dataset. - The second element is y, the mono dimensional np.array containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grid of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets). `scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be True: in this case the 'X' features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset). time_series: bool Indicates if the given datasets are time series dataset (i.e. datasets indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the datasets. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot). plot_train: bool Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots). xvalues: list (in general, iterable) Values that have to be put in the x axis of the 'main' plot. xlabel: str Label of the x axis of the 'main' plot. title: str Title of the 'main' plot. figsize: tuple Two dimensions of the 'main' plot. verbose: bool If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots). (See 'models_validation'). figsize_verbose: tuple Two dimensions of the 'secondary' plots. Returns ---------- datasets_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`. datasets_best_model: list List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For each dataset, it contains the best model for that dataset. More precisely, it is a list of triple: - the first element is the index of `model_paramGrid_list` which indicates the best model; - the second element is the mnemonic name of the best model; - the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary which has as keys the hyperparameters names and as values their associated values). best_index: int Index of `dataset_list` that indicates which is the best dataset. test_score: float Test score associated with the best couple dataset-model. axes: list List of the matplotlib Axes where the plots have been made. Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any). If no plot has been made, `axes` is an empty list. See also ---------- models_validation: select the best model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best couple dataset-model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best value for the specified hyperparameter of the specified model on the given dataset. In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`. This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified `hyperparameter`. hyperparameter: str The name of the hyperparameter that has to be validated. hyperparameter_values: list List of values for `hyperparameter` that have to be taken into account in the selection. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested). best_index: int Index of `hyperparameter_values` that indicates which is the best hyperparameter value. test_score: float Test score associated with the best hyperparameter value. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best hyperparameter value is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best combination of values for the specified hyperparameters of the specified model on the given dataset. In other words, perform the tuning of multiple hyperparameters. The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the associated values to test. All the possible combinations of values are tested, in an exhaustive way (i.e. grid search). This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model which has the specified hyperparameters. param_grid: dict Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of values to test. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. Returns ---------- params: list List which enumerates all the possible combinations of hyperparameters values. It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination). train_val_scores: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of possible combinations of the hyperparameters values. (It has as many rows as the elements of `params`). best_index: int Index of `params` that indicates which is the best combination of hyperparameters values. test_score: float Test score associated with the best combination of hyperparameters values. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best combination of hyperparameters values is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Select the best model on the given dataset. The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each specified hyperparameter of the model). (That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See `hyperparameters_validation`). For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid search). Actually, the function `hyperparameters_validation` is used. (See `hyperparameters_validation`). The selection of the best model is made using the validation score (i.e. the best model is the one with the best validation score). The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross validation on the training set. Additionally, the training and test scores are also computed. Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the selection. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model_paramGrid_list: list List that specifies the models and the relative grids of hyperparameters to be tested. It's a list of triples (i.e. tuples), where each triple represents a model: - the first element is a string, which is a mnemonic name of that model; - the second element is the sklearn model; - the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same structure of the parameter `param_grid` of the function `hyperparameters_validation`. scale_list: list or bool List of booleans, which has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list). This list indicates, for each different model, if the features in `X` have to be scaled or not. `scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be True: in this case the `X` features are scaled for all the models. test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. time_series: bool Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days). (This affects the computing of the validation score). random_state: int Used in the training-test splitting of the dataset. n_folds: int Indicates how many folds are made in order to compute the k-fold cross validation. (It's used only if `time_series` is False). regr: bool Indicates if it's either a regression or a classification problem. plot: bool Indicates whether to plot or not the validation score values. plot_train: bool Indicates whether to plot also the training scores. (It's considered only if `plot` is True). xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. xlabel: str Label of the x axis of the plot. title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- models_train_val_score: np.array Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation scores. It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list). models_best_params: list List which indicates, for each model, the best combination of the hyperparameters values for that model. It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the associated model. best_index: int Index of `model_paramGrid_list` that indicates which is the best model. test_score: float Test score associated with the best model. ax: matplotlib.axes.Axes The matplotlib Axes where the plot has been made. If `plot` is False, then it is None. See also ---------- hyperparameters_validation: select the best combination of values for the specified hyperparameters of the specified model on the given dataset. Notes ---------- - If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best model is the one associated with the minimum validation score. Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the maximum validation score. - If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`. Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit. Plot the predictions made by the given model on the given dataset, versus its actual values. The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are made. Parameters ---------- X: np.array Two-dimensional np.array, containing the explanatory features of the dataset. y: np.array Mono dimensional np.array, containing the response feature of the dataset. model: sklearn.base.BaseEstimator Model used to make the predictions. scale: bool Indicates whether to scale or not the features in `X`. (The scaling is performed using the sklearn MinMaxScaler). test_size: float Decimal number between 0 and 1, which indicates the proportion of the test set. plot_type: int Indicates the type of the plot. - 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed predicted values. - 1 -> On the x axis the actual values are put, on the y axis the predicted ones. xvalues: list (in general, iterable) Values that have to be put in the x axis of the plot. (It's used only if `plot_type` is 0). xlabel: str Label of the x axis of the plot. (It's used only if `plot_type` is 0). title: str Title of the plot. figsize: tuple Two dimensions of the plot. Returns ---------- matplotlib.axes.Axes The matplotlib Axes where the plot has been made. Notes ---------- The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous sequences. I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify the visualization). For this reason, typically this function is applied on time series datasets. Module for the selection of machine learning models. There are several different functions which can perform the model selection: all of them have an intuitive interface, but are also powerful and flexible. In addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way. These different functions perform the model selection in different contexts, i.e. each function is specifically meant for a specific scenario. Certain contexts are more specific, and other are more general. On the whole, there are six different model selection functions, divided into two main groups: 1. functions that perform the model selection with respect to a **single dataset**; 2. functions that perform the model selection with respect to **multiple datasets**. The six functions, sorted from the most specific context to the most general one, are: - *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset); - *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple datasets). This module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array. Moreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module: - the machine learning models are represented as sklearn models (i.e. sklearn estimators); - under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e. GridSearchCV); - several other operations are done using the functionalities provided by sklearn. This module, besides the model selection functions, contains also some utilities: - the PolynomialRegression class; - some utility functions. ---------------------------------------------------------------------------------------------------------------------------- POLYNOMIAL REGRESSOR MODEL---------------------------------------------------------------------------------------------------------------------------- UTILITY FUNCTIONS Split into training e test. Random splitting (not time series) time series splitting Scale the features in X Cross validation k-fold cross validation cross validation for time series validation score Fit the model using all the training Compute training and test scores Return a triple Scale the features in `X` Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list). In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many columns as the number of instances in `X` (each column is a point of the dataset). Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points. Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples. Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it. Extract a new sample (sample 'i') Fit the model on this sample 'i' Add the predictions made by the model on all the dataset points Transform into numpy array Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on the `N_TEST` samples. Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on the `N_TEST` samples. Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on the `N_TEST` samples. Total bias^2 of the model Total variance of the model Total error of the model Return a triple Scale the features in X Fit using all the training set Points Plot also the training scores Validation scores Bars Plot also the training scores The label locations The width of the bars Validation scores---------------------------------------------------------------------------------------------------------------------------- FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET Create the hyperparameter grid Call the function for the validation of an arbitrary number of hyperparameters Make the plot Default values on the x axis Default label on the x axis Split into training-test sets Random splitting Time series splitting Scale the features in `X` Cross validation strategy The strategy is the classic k-fold cross validation Time series cross validation strategy Grid search List of all the possible combinations of hyperparameters values List where for all the possible combinations of hyperparameters values there is the associated training score List where for all the possible combinations of hyperparameters values there is the associated validation score Index of `params`, corresponding to the best combination of hyperparameters values Model with the best combination of hyperparameters values The scores are negative: moltiply by -1 Fit the best model on all the training set Compute the test score of the best model `scale_list` is either None or False `scale_list` is True Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and the other for the validation scores. At the beginning it is a list of tuples. List which has as many elements as the models: for each model there is the dictionary of the best combination of hyperparameters values. List which has as many elements as the models: for each model there is the test score (associated with the best combination of hyperparameters values). Iterate through all the cuples model-param_grid Apply the grid search on model-param_grid Add the row for that model Add the element for that model Add the element for that model Transform into numpy matrix (i.e. np.array) Find the best index (i.e. the best model) Test score of the best model Make the plot Default values for the x axis---------------------------------------------------------------------------------------------------------------------------- FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns. At the beginning it is a list. List which contains, for each dataset, the best hyperparameter value List which contains, for each dataset, its test score (associated with the best hyperparameter value) List of axes Iterate through all the datasets Perform the hyperparameter tuning on the current dataset Add the row related to that dataset Add the element related to that dataset Add the row related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value) Test score of the best couple dataset-hyperparameter value Make the plot Default values on the x axis numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns . At the beginning it is a list. List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary) List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values) Iterate through all the datasets Perform the exaustive hyperparameters tuning on the current dataset Add the row related to that dataset Add the element related to that dataset Add the row related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters values) Test score of the best couple dataset-combination of hyperparameters values Make the plot Default values on the x axis numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as columns. At the beginning it is a list. List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of hyperparameters values List which contains, for each dataset, its test score (associated to the best model) List of axes Iterate through all the datasets Perform the models validation on the current dataset Add the row related to that dataset Add the element related to that dataset Add the element related to that dataset Transform into numpy Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model) Test score of the best couple dataset-model Make the plot Default values on the x axis
45,537
en
0.838156
#!/usr/bin/env python3 import importlib.machinery as imm import logging import pathlib import re import configargparse class ModuleInfo: def __init__(self, path): self.path = pathlib.Path(path) name = str(self.path.parent / self.path.stem) name = name.replace("/", ".") self.name = re.sub(r"^[\.]+", "", name) self.module = imm.SourceFileLoader(self.name, path).load_module() if not hasattr(self.module, "get_parser"): raise ValueError(f"{path} does not have get_parser()") def get_parser(): parser = configargparse.ArgumentParser( description='generate RST from argparse options', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) parser.add_argument('src', type=str, nargs='+', help='source python files that contain get_parser() func') return parser # parser args = get_parser().parse_args() modinfo = [] for p in args.src: if "__init__.py" in p: continue modinfo.append(ModuleInfo(p)) # print refs for m in modinfo: logging.info(f"processing: {m.path.name}") d = m.module.get_parser().description assert d is not None print(f"- :ref:`{m.path.name}`: {d}") print() # print argparse for m in modinfo: cmd = m.path.name sep = "~" * len(cmd) print(f""" .. _{cmd}: {cmd} {sep} .. argparse:: :module: {m.name} :func: get_parser :prog: {cmd} """)
doc/argparse2rst.py
1,513
!/usr/bin/env python3 parser print refs print argparse
54
de
0.080533
""" util_list module. Contains the mflist class. This classes encapsulates modflow-style list inputs away from the individual packages. The end-user should not need to instantiate this class directly. some more info """ from __future__ import division, print_function import os import warnings import numpy as np from ..datbase import DataInterface, DataListInterface, DataType from ..utils.recarray_utils import create_empty_recarray try: from numpy.lib import NumpyVersion numpy114 = NumpyVersion(np.__version__) >= "1.14.0" except ImportError: numpy114 = False class MfList(DataInterface, DataListInterface): """ a generic object for handling transient boundary condition lists Parameters ---------- package : package object The package object (of type :class:`flopy.pakbase.Package`) to which this MfList will be added. data : varies the data of the transient list (optional). (the default is None) Attributes ---------- mxact : int the max number of active bc for any stress period Methods ------- add_record(kper,index,value) : None add a record to stress period kper at index location write_transient(f) : None write the transient sequence to the model input file f check_kij() : None checks for boundaries outside of model domain - issues warnings only See Also -------- Notes ----- Examples -------- """ def __init__( self, package, data=None, dtype=None, model=None, list_free_format=None, binary=False, ): if isinstance(data, MfList): for attr in data.__dict__.items(): setattr(self, attr[0], attr[1]) if model is None: self._model = package.parent else: self._model = model self._package = package return self._package = package if model is None: self._model = package.parent else: self._model = model if dtype is None: assert isinstance(self.package.dtype, np.dtype) self.__dtype = self.package.dtype else: self.__dtype = dtype self.__binary = binary self.__vtype = {} self.__data = {} if data is not None: self.__cast_data(data) self.__df = None if list_free_format is None: if package.parent.version == "mf2k": list_free_format = False self.list_free_format = list_free_format return @property def name(self): return self.package.name @property def mg(self): return self._model.modelgrid @property def sr(self): return self.mg.sr @property def model(self): return self._model @property def package(self): return self._package @property def data_type(self): return DataType.transientlist @property def plotable(self): return True def get_empty(self, ncell=0): d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10) return d def export(self, f, **kwargs): from flopy import export return export.utils.mflist_export(f, self, **kwargs) def append(self, other): """ append the recarrays from one MfList to another Parameters ---------- other: variable: an item that can be cast in to an MfList that corresponds with self Returns ------- dict of {kper:recarray} """ if not isinstance(other, MfList): other = MfList( self.package, data=other, dtype=self.dtype, model=self._model, list_free_format=self.list_free_format, ) msg = ( "MfList.append(): other arg must be " + "MfList or dict, not {0}".format(type(other)) ) assert isinstance(other, MfList), msg other_kpers = list(other.data.keys()) other_kpers.sort() self_kpers = list(self.data.keys()) self_kpers.sort() new_dict = {} for kper in range(self._model.nper): other_data = other[kper].copy() self_data = self[kper].copy() other_len = other_data.shape[0] self_len = self_data.shape[0] if (other_len == 0 and self_len == 0) or ( kper not in self_kpers and kper not in other_kpers ): continue elif self_len == 0: new_dict[kper] = other_data elif other_len == 0: new_dict[kper] = self_data else: new_len = other_data.shape[0] + self_data.shape[0] new_data = np.recarray(new_len, dtype=self.dtype) new_data[:self_len] = self_data new_data[self_len : self_len + other_len] = other_data new_dict[kper] = new_data return new_dict def drop(self, fields): """drop fields from an MfList Parameters ---------- fields : list or set of field names to drop Returns ------- dropped : MfList without the dropped fields """ if not isinstance(fields, list): fields = [fields] names = [n for n in self.dtype.names if n not in fields] dtype = np.dtype( [(k, d) for k, d in self.dtype.descr if k not in fields] ) spd = {} for k, v in self.data.items(): # because np 1.9 doesn't support indexing by list of columns newarr = np.array([self.data[k][n] for n in names]).transpose() newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view( np.recarray ) for n in dtype.names: newarr[n] = self.data[k][n] spd[k] = newarr return MfList(self.package, spd, dtype=dtype) @property def data(self): return self.__data @property def df(self): if self.__df is None: self.__df = self.get_dataframe() return self.__df @property def vtype(self): return self.__vtype @property def dtype(self): return self.__dtype # Get the itmp for a given kper def get_itmp(self, kper): if kper not in list(self.__data.keys()): return None if self.__vtype[kper] is None: return -1 # If an external file, have to load it if self.__vtype[kper] == str: return self.__fromfile(self.__data[kper]).shape[0] if self.__vtype[kper] == np.recarray: return self.__data[kper].shape[0] # If not any of the above, it must be an int return self.__data[kper] @property def mxact(self): mxact = 0 for kper in list(self.__data.keys()): mxact = max(mxact, self.get_itmp(kper)) return mxact @property def fmt_string(self): """Returns a C-style fmt string for numpy savetxt that corresponds to the dtype""" if self.list_free_format is not None: use_free = self.list_free_format else: use_free = True if self.package.parent.has_package("bas6"): use_free = self.package.parent.bas6.ifrefm # mt3d list data is fixed format if "mt3d" in self.package.parent.version.lower(): use_free = False fmts = [] for field in self.dtype.descr: vtype = field[1][1].lower() if vtype in ("i", "b"): if use_free: fmts.append("%9d") else: fmts.append("%10d") elif vtype == "f": if use_free: if numpy114: # Use numpy's floating-point formatter (Dragon4) fmts.append("%15s") else: fmts.append("%15.7E") else: fmts.append("%10G") elif vtype == "o": if use_free: fmts.append("%9s") else: fmts.append("%10s") elif vtype == "s": msg = ( "MfList.fmt_string error: 'str' type found in dtype. " "This gives unpredictable results when " "recarray to file - change to 'object' type" ) raise TypeError(msg) else: raise TypeError( "MfList.fmt_string error: unknown vtype in " "field: {}".format(field) ) if use_free: fmt_string = " " + " ".join(fmts) else: fmt_string = "".join(fmts) return fmt_string # Private method to cast the data argument # Should only be called by the constructor def __cast_data(self, data): # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: raise Exception( "MfList error: casting list to ndarray: " + str(e) ) # If data is a dict, the we have to assume it is keyed on kper if isinstance(data, dict): if not list(data.keys()): raise Exception("MfList error: data dict is empty") for kper, d in data.items(): try: kper = int(kper) except Exception as e: raise Exception( "MfList error: data dict key " + "{0:s} not integer: ".format(kper) + str(type(kper)) + "\n" + str(e) ) # Same as before, just try... if isinstance(d, list): # warnings.warn("MfList: casting list to array at " +\ # "kper {0:d}".format(kper)) try: d = np.array(d) except Exception as e: raise Exception( "MfList error: casting list " + "to ndarray: " + str(e) ) # super hack - sick of recarrays already # if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1): # d = d.view(np.recarray) if isinstance(d, np.recarray): self.__cast_recarray(kper, d) elif isinstance(d, np.ndarray): self.__cast_ndarray(kper, d) elif isinstance(d, int): self.__cast_int(kper, d) elif isinstance(d, str): self.__cast_str(kper, d) elif d is None: self.__data[kper] = -1 self.__vtype[kper] = None else: raise Exception( "MfList error: unsupported data type: " + str(type(d)) + " at kper " + "{0:d}".format(kper) ) # A single recarray - same MfList for all stress periods elif isinstance(data, np.recarray): self.__cast_recarray(0, data) # A single ndarray elif isinstance(data, np.ndarray): self.__cast_ndarray(0, data) # A single filename elif isinstance(data, str): self.__cast_str(0, data) else: raise Exception( "MfList error: unsupported data type: " + str(type(data)) ) def __cast_str(self, kper, d): # If d is a string, assume it is a filename and check that it exists assert os.path.exists(d), ( "MfList error: dict filename (string) '" + d + "' value for " + "kper {0:d} not found".format(kper) ) self.__data[kper] = d self.__vtype[kper] = str def __cast_int(self, kper, d): # If d is an integer, then it must be 0 or -1 if d > 0: raise Exception( "MfList error: dict integer value for " "kper {0:10d} must be 0 or -1, " "not {1:10d}".format(kper, d) ) if d == 0: self.__data[kper] = 0 self.__vtype[kper] = None else: self.__data[kper] = -1 self.__vtype[kper] = None def __cast_recarray(self, kper, d): assert d.dtype == self.__dtype, ( "MfList error: recarray dtype: " + str(d.dtype) + " doesn't match " + "self dtype: " + str(self.dtype) ) self.__data[kper] = d self.__vtype[kper] = np.recarray def __cast_ndarray(self, kper, d): d = np.atleast_2d(d) if d.dtype != self.__dtype: assert d.shape[1] == len(self.dtype), ( "MfList error: ndarray " + "shape " + str(d.shape) + " doesn't match dtype " + "len: " + str(len(self.dtype)) ) # warnings.warn("MfList: ndarray dtype does not match self " +\ # "dtype, trying to cast") try: self.__data[kper] = np.core.records.fromarrays( d.transpose(), dtype=self.dtype ) except Exception as e: raise Exception( "MfList error: casting ndarray to recarray: " + str(e) ) self.__vtype[kper] = np.recarray def get_dataframe(self, squeeze=True): """ Cast recarrays for stress periods into single dataframe containing all stress periods. Parameters ---------- squeeze : bool Reduce number of columns in dataframe to only include stress periods where a variable changes. Returns ------- df : dataframe Dataframe of shape nrow = ncells, ncol = nvar x nper. If the squeeze option is chosen, nper is the number of stress periods where at least one cells is different, otherwise it is equal to the number of keys in MfList.data. Notes ----- Requires pandas. """ try: import pandas as pd except Exception as e: msg = "MfList.get_dataframe() requires pandas" raise ImportError(msg) # make a dataframe of all data for all stress periods names = ["k", "i", "j"] if "MNW2" in self.package.name: names += ["wellid"] # find relevant variable names # may have to iterate over the first stress period for per in range(self._model.nper): if hasattr(self.data[per], "dtype"): varnames = list( [n for n in self.data[per].dtype.names if n not in names] ) break # create list of dataframes for each stress period # each with index of k, i, j dfs = [] for per in self.data.keys(): recs = self.data[per] if recs is None or len(recs) == 0: # add an empty dataframe if a stress period is # empty (e.g. no pumping during a predevelopment # period) columns = names + list( ["{}{}".format(c, per) for c in varnames] ) dfi = pd.DataFrame(data=None, columns=columns) dfi = dfi.set_index(names) else: dfi = pd.DataFrame.from_records(recs) dfg = dfi.groupby(names) count = dfg[varnames[0]].count().rename("n") if (count > 1).values.any(): print( "Duplicated list entry locations aggregated " "for kper {}".format(per) ) for kij in count[count > 1].index.values: print(" (k,i,j) {}".format(kij)) dfi = dfg.sum() # aggregate dfi.columns = list(["{}{}".format(c, per) for c in varnames]) dfs.append(dfi) df = pd.concat(dfs, axis=1) if squeeze: keep = [] for var in varnames: diffcols = list([n for n in df.columns if var in n]) diff = df[diffcols].fillna(0).diff(axis=1) diff[ "{}0".format(var) ] = 1 # always return the first stress period changed = diff.sum(axis=0) != 0 keep.append(df.loc[:, changed.index[changed]]) df = pd.concat(keep, axis=1) df = df.reset_index() df.insert(len(names), "node", df.i * self._model.ncol + df.j) return df def add_record(self, kper, index, values): # Add a record to possible already set list for a given kper # index is a list of k,i,j or nodes. # values is a list of floats. # The length of index + values must be equal to the number of names # in dtype assert len(index) + len(values) == len(self.dtype), ( "MfList.add_record() error: length of index arg +" + "length of value arg != length of self dtype" ) # If we already have something for this kper, then add to it if kper in list(self.__data.keys()): if self.vtype[kper] == int: # If a 0 or -1, reset self.__data[kper] = self.get_empty(1) self.__vtype[kper] = np.recarray elif self.vtype[kper] == str: # If filename, load into recarray d = self.__fromfile(self.data[kper]) d.resize(d.shape[0], d.shape[1]) self.__data[kper] = d self.__vtype[kper] = np.recarray elif self.vtype[kper] == np.recarray: # Extend the recarray self.__data[kper] = np.append( self.__data[kper], self.get_empty(1) ) else: self.__data[kper] = self.get_empty(1) self.__vtype[kper] = np.recarray rec = list(index) rec.extend(list(values)) try: self.__data[kper][-1] = tuple(rec) except Exception as e: raise Exception( "MfList.add_record() error: adding record to " + "recarray: " + str(e) ) def __getitem__(self, kper): # Get the recarray for a given kper # If the data entry for kper is a string, # return the corresponding recarray, # but don't reset the value in the data dict # assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ # str(kper) + " not in data.keys()" try: kper = int(kper) except Exception as e: raise Exception( "MfList error: _getitem__() passed invalid kper index:" + str(kper) ) if kper not in list(self.data.keys()): if kper == 0: return self.get_empty() else: return self.data[self.__find_last_kper(kper)] if self.vtype[kper] == int: if self.data[kper] == 0: return self.get_empty() else: return self.data[self.__find_last_kper(kper)] if self.vtype[kper] == str: return self.__fromfile(self.data[kper]) if self.vtype[kper] == np.recarray: return self.data[kper] def __setitem__(self, kper, data): if kper in list(self.__data.keys()): if self._model.verbose: print("removing existing data for kper={}".format(kper)) self.data.pop(kper) # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: raise Exception( "MfList error: casting list to ndarray: " + str(e) ) # cast data if isinstance(data, int): self.__cast_int(kper, data) elif isinstance(data, np.recarray): self.__cast_recarray(kper, data) # A single ndarray elif isinstance(data, np.ndarray): self.__cast_ndarray(kper, data) # A single filename elif isinstance(data, str): self.__cast_str(kper, data) else: raise Exception( "MfList error: unsupported data type: " + str(type(data)) ) # raise NotImplementedError("MfList.__setitem__() not implemented") def __fromfile(self, f): # d = np.fromfile(f,dtype=self.dtype,count=count) try: d = np.genfromtxt(f, dtype=self.dtype) except Exception as e: raise Exception( "MfList.__fromfile() error reading recarray " + "from file " + str(e) ) return d def get_filenames(self): kpers = list(self.data.keys()) kpers.sort() filenames = [] first = kpers[0] for kper in list(range(0, max(self._model.nper, max(kpers) + 1))): # Fill missing early kpers with 0 if kper < first: itmp = 0 kper_vtype = int elif kper in kpers: kper_vtype = self.__vtype[kper] if ( self._model.array_free_format and self._model.external_path is not None ): # py_filepath = '' # py_filepath = os.path.join(py_filepath, # self._model.external_path) filename = self.package.name[0] + "_{0:04d}.dat".format(kper) filenames.append(filename) return filenames def get_filename(self, kper): ext = "dat" if self.binary: ext = "bin" return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext) @property def binary(self): return bool(self.__binary) def write_transient(self, f, single_per=None, forceInternal=False): # forceInternal overrides isExternal (set below) for cases where # external arrays are not supported (oh hello MNW1!) # write the transient sequence described by the data dict nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() assert hasattr(f, "read"), ( "MfList.write() error: " + "f argument must be a file handle" ) kpers = list(self.data.keys()) kpers.sort() first = kpers[0] if single_per is None: loop_over_kpers = list(range(0, max(nper, max(kpers) + 1))) else: if not isinstance(single_per, list): single_per = [single_per] loop_over_kpers = single_per for kper in loop_over_kpers: # Fill missing early kpers with 0 if kper < first: itmp = 0 kper_vtype = int elif kper in kpers: kper_data = self.__data[kper] kper_vtype = self.__vtype[kper] if kper_vtype == str: if not self._model.array_free_format: kper_data = self.__fromfile(kper_data) kper_vtype = np.recarray itmp = self.get_itmp(kper) if kper_vtype == np.recarray: itmp = kper_data.shape[0] elif (kper_vtype == int) or (kper_vtype is None): itmp = kper_data # Fill late missing kpers with -1 else: itmp = -1 kper_vtype = int f.write( " {0:9d} {1:9d} # stress period {2:d}\n".format( itmp, 0, kper + 1 ) ) isExternal = False if ( self._model.array_free_format and self._model.external_path is not None and forceInternal is False ): isExternal = True if self.__binary: isExternal = True if isExternal: if kper_vtype == np.recarray: py_filepath = "" if self._model.model_ws is not None: py_filepath = self._model.model_ws if self._model.external_path is not None: py_filepath = os.path.join( py_filepath, self._model.external_path ) filename = self.get_filename(kper) py_filepath = os.path.join(py_filepath, filename) model_filepath = filename if self._model.external_path is not None: model_filepath = os.path.join( self._model.external_path, filename ) self.__tofile(py_filepath, kper_data) kper_vtype = str kper_data = model_filepath if kper_vtype == np.recarray: name = f.name if self.__binary or not numpy114: f.close() # switch file append mode to binary with open(name, "ab+") as f: self.__tofile(f, kper_data) # continue back to non-binary f = open(name, "a") else: self.__tofile(f, kper_data) elif kper_vtype == str: f.write(" open/close " + kper_data) if self.__binary: f.write(" (BINARY)") f.write("\n") def __tofile(self, f, data): # Write the recarray (data) to the file (or file handle) f assert isinstance(data, np.recarray), ( "MfList.__tofile() data arg " + "not a recarray" ) # Add one to the kij indices lnames = [name.lower() for name in self.dtype.names] # --make copy of data for multiple calls d = data.copy() for idx in ["k", "i", "j", "node"]: if idx in lnames: d[idx] += 1 if self.__binary: dtype2 = [] for name in self.dtype.names: dtype2.append((name, np.float32)) dtype2 = np.dtype(dtype2) d = np.array(d, dtype=dtype2) d.tofile(f) else: np.savetxt(f, d, fmt=self.fmt_string, delimiter="") def check_kij(self): names = self.dtype.names if ("k" not in names) or ("i" not in names) or ("j" not in names): warnings.warn( "MfList.check_kij(): index fieldnames 'k,i,j' " + "not found in self.dtype names: " + str(names) ) return nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() if nl == 0: warnings.warn( "MfList.check_kij(): unable to get dis info from " + "model" ) return for kper in list(self.data.keys()): out_idx = [] data = self[kper] if data is not None: k = data["k"] k_idx = np.where(np.logical_or(k < 0, k >= nl)) if k_idx[0].shape[0] > 0: out_idx.extend(list(k_idx[0])) i = data["i"] i_idx = np.where(np.logical_or(i < 0, i >= nr)) if i_idx[0].shape[0] > 0: out_idx.extend(list(i_idx[0])) j = data["j"] j_idx = np.where(np.logical_or(j < 0, j >= nc)) if j_idx[0].shape[0]: out_idx.extend(list(j_idx[0])) if len(out_idx) > 0: warn_str = ( "MfList.check_kij(): warning the following " + "indices are out of bounds in kper " + str(kper) + ":\n" ) for idx in out_idx: d = data[idx] warn_str += " {0:9d} {1:9d} {2:9d}\n".format( d["k"] + 1, d["i"] + 1, d["j"] + 1 ) warnings.warn(warn_str) def __find_last_kper(self, kper): kpers = list(self.data.keys()) kpers.sort() last = 0 for kkper in kpers[::-1]: # if this entry is valid if self.vtype[kkper] != int or self.data[kkper] != -1: last = kkper if kkper <= kper: break return kkper def get_indices(self): """ a helper function for plotting - get all unique indices """ names = self.dtype.names lnames = [] [lnames.append(name.lower()) for name in names] if "k" not in lnames or "j" not in lnames: raise NotImplementedError("MfList.get_indices requires kij") kpers = list(self.data.keys()) kpers.sort() indices = [] for i, kper in enumerate(kpers): kper_vtype = self.__vtype[kper] if (kper_vtype != int) or (kper_vtype is not None): d = self.data[kper] if not indices: indices = list(zip(d["k"], d["i"], d["j"])) else: new_indices = list(zip(d["k"], d["i"], d["j"])) for ni in new_indices: if ni not in indices: indices.append(ni) return indices def attribute_by_kper(self, attr, function=np.mean, idx_val=None): assert attr in self.dtype.names if idx_val is not None: assert idx_val[0] in self.dtype.names kpers = list(self.data.keys()) kpers.sort() values = [] for kper in range(0, max(self._model.nper, max(kpers))): if kper < min(kpers): values.append(0) elif kper > max(kpers) or kper not in kpers: values.append(values[-1]) else: kper_data = self.__data[kper] if idx_val is not None: kper_data = kper_data[ np.where(kper_data[idx_val[0]] == idx_val[1]) ] # kper_vtype = self.__vtype[kper] v = function(kper_data[attr]) values.append(v) return values def plot( self, key=None, names=None, kper=0, filename_base=None, file_extension=None, mflay=None, **kwargs ): """ Plot stress period boundary condition (MfList) data for a specified stress period Parameters ---------- key : str MfList dictionary key. (default is None) names : list List of names for figure titles. (default is None) kper : int MODFLOW zero-based stress period number to return. (default is zero) filename_base : str Base file name that will be used to automatically generate file names for output image files. Plots will be exported as image files if file_name_base is not None. (default is None) file_extension : str Valid matplotlib.pyplot file extension for savefig(). Only used if filename_base is not None. (default is 'png') mflay : int MODFLOW zero-based layer number to return. If None, then all all layers will be included. (default is None) **kwargs : dict axes : list of matplotlib.pyplot.axis List of matplotlib.pyplot.axis that will be used to plot data for each layer. If axes=None axes will be generated. (default is None) pcolor : bool Boolean used to determine if matplotlib.pyplot.pcolormesh plot will be plotted. (default is True) colorbar : bool Boolean used to determine if a color bar will be added to the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. (default is False) inactive : bool Boolean used to determine if a black overlay in inactive cells in a layer will be displayed. (default is True) contour : bool Boolean used to determine if matplotlib.pyplot.contour plot will be plotted. (default is False) clabel : bool Boolean used to determine if matplotlib.pyplot.clabel will be plotted. Only used if contour=True. (default is False) grid : bool Boolean used to determine if the model grid will be plotted on the figure. (default is False) masked_values : list List of unique values to be excluded from the plot. Returns ---------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.wel.stress_period_data.plot(ml.wel, kper=1) """ from flopy.plot import PlotUtilities axes = PlotUtilities._plot_mflist_helper( self, key=key, names=names, kper=kper, filename_base=filename_base, file_extension=file_extension, mflay=mflay, **kwargs ) return axes def to_shapefile(self, filename, kper=None): """ Export stress period boundary condition (MfList) data for a specified stress period Parameters ---------- filename : str Shapefile name to write kper : int MODFLOW zero-based stress period number to return. (default is None) Returns ---------- None See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.wel.to_shapefile('test_hk.shp', kper=1) """ import warnings warnings.warn( "Deprecation warning: to_shapefile() is deprecated. use .export()" ) # if self.sr is None: # raise Exception("MfList.to_shapefile: SpatialReference not set") # import flopy.utils.flopy_io as fio # if kper is None: # keys = self.data.keys() # keys.sort() # else: # keys = [kper] # array_dict = {} # for kk in keys: # arrays = self.to_array(kk) # for name, array in arrays.items(): # for k in range(array.shape[0]): # #aname = name+"{0:03d}_{1:02d}".format(kk, k) # n = fio.shape_attr_name(name, length=4) # aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1) # array_dict[aname] = array[k] # fio.write_grid_shapefile(filename, self.sr, array_dict) self.export(filename, kper=kper) def to_array(self, kper=0, mask=False): """ Convert stress period boundary condition (MfList) data for a specified stress period to a 3-D numpy array Parameters ---------- kper : int MODFLOW zero-based stress period number to return. (default is zero) mask : boolean return array with np.NaN instead of zero Returns ---------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the MfList dtype names for the stress period data ('cond', 'flux', 'bhead', etc.). See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> v = ml.wel.stress_period_data.to_array(kper=1) """ i0 = 3 unstructured = False if "inode" in self.dtype.names: raise NotImplementedError() if "node" in self.dtype.names: if "i" not in self.dtype.names and "j" not in self.dtype.names: i0 = 1 unstructured = True arrays = {} for name in self.dtype.names[i0:]: if not self.dtype.fields[name][0] == object: if unstructured: arr = np.zeros((self._model.nlay * self._model.ncpl,)) else: arr = np.zeros( (self._model.nlay, self._model.nrow, self._model.ncol) ) arrays[name] = arr.copy() # if this kper is not found if kper not in self.data.keys(): kpers = list(self.data.keys()) kpers.sort() # if this kper is before the first entry, # (maybe) mask and return if kper < kpers[0]: if mask: for name, arr in arrays.items(): arrays[name][:] = np.NaN return arrays # find the last kper else: kper = self.__find_last_kper(kper) sarr = self.data[kper] if np.isscalar(sarr): # if there are no entries for this kper if sarr == 0: if mask: for name, arr in arrays.items(): arrays[name][:] = np.NaN return arrays else: raise Exception("MfList: something bad happened") for name, arr in arrays.items(): if unstructured: cnt = np.zeros( (self._model.nlay * self._model.ncpl,), dtype=np.float ) else: cnt = np.zeros( (self._model.nlay, self._model.nrow, self._model.ncol), dtype=np.float, ) # print(name,kper) for rec in sarr: if unstructured: arr[rec["node"]] += rec[name] cnt[rec["node"]] += 1.0 else: arr[rec["k"], rec["i"], rec["j"]] += rec[name] cnt[rec["k"], rec["i"], rec["j"]] += 1.0 # average keys that should not be added if name not in ("cond", "flux"): idx = cnt > 0.0 arr[idx] /= cnt[idx] if mask: arr = np.ma.masked_where(cnt == 0.0, arr) arr[cnt == 0.0] = np.NaN arrays[name] = arr.copy() # elif mask: # for name, arr in arrays.items(): # arrays[name][:] = np.NaN return arrays @property def masked_4D_arrays(self): # get the first kper arrays = self.to_array(kper=0, mask=True) # initialize these big arrays m4ds = {} for name, array in arrays.items(): m4d = np.zeros( ( self._model.nper, self._model.nlay, self._model.nrow, self._model.ncol, ) ) m4d[0, :, :, :] = array m4ds[name] = m4d for kper in range(1, self._model.nper): arrays = self.to_array(kper=kper, mask=True) for name, array in arrays.items(): m4ds[name][kper, :, :, :] = array return m4ds def masked_4D_arrays_itr(self): # get the first kper arrays = self.to_array(kper=0, mask=True) # initialize these big arrays for name, array in arrays.items(): m4d = np.zeros( ( self._model.nper, self._model.nlay, self._model.nrow, self._model.ncol, ) ) m4d[0, :, :, :] = array for kper in range(1, self._model.nper): arrays = self.to_array(kper=kper, mask=True) for tname, array in arrays.items(): if tname == name: m4d[kper, :, :, :] = array yield name, m4d @property def array(self): return self.masked_4D_arrays @classmethod def from_4d(cls, model, pak_name, m4ds): """construct an MfList instance from a dict of (attribute_name,masked 4D ndarray Parameters ---------- model : mbase derived type pak_name : str package name (e.g GHB) m4ds : {attribute name:4d masked numpy.ndarray} Returns ------- MfList instance """ sp_data = MfList.masked4D_arrays_to_stress_period_data( model.get_package(pak_name).get_default_dtype(), m4ds ) return cls(model.get_package(pak_name), data=sp_data) @staticmethod def masked4D_arrays_to_stress_period_data(dtype, m4ds): """ convert a dictionary of 4-dim masked arrays to a stress_period_data style dict of recarray Parameters ---------- dtype : numpy dtype m4ds : dict {name:masked numpy 4-dim ndarray} Returns ------- dict {kper:recarray} """ assert isinstance(m4ds, dict) for name, m4d in m4ds.items(): assert isinstance(m4d, np.ndarray) assert name in dtype.names assert m4d.ndim == 4 keys = list(m4ds.keys()) for i1, key1 in enumerate(keys): a1 = np.isnan(m4ds[key1]) for i2, key2 in enumerate(keys[i1:]): a2 = np.isnan(m4ds[key2]) if not np.array_equal(a1, a2): raise Exception( "Transient2d error: masking not equal" + " for {0} and {1}".format(key1, key2) ) sp_data = {} for kper in range(m4d.shape[0]): vals = {} for name, m4d in m4ds.items(): arr = m4d[kper, :, :, :] isnan = np.argwhere(~np.isnan(arr)) v = [] for k, i, j in isnan: v.append(arr[k, i, j]) vals[name] = v kk = isnan[:, 0] ii = isnan[:, 1] jj = isnan[:, 2] spd = np.recarray(shape=isnan.shape[0], dtype=dtype) spd["i"] = ii spd["k"] = kk spd["j"] = jj for n, v in vals.items(): spd[n] = v sp_data[kper] = spd return sp_data
flopy/utils/util_list.py
44,483
a generic object for handling transient boundary condition lists Parameters ---------- package : package object The package object (of type :class:`flopy.pakbase.Package`) to which this MfList will be added. data : varies the data of the transient list (optional). (the default is None) Attributes ---------- mxact : int the max number of active bc for any stress period Methods ------- add_record(kper,index,value) : None add a record to stress period kper at index location write_transient(f) : None write the transient sequence to the model input file f check_kij() : None checks for boundaries outside of model domain - issues warnings only See Also -------- Notes ----- Examples -------- append the recarrays from one MfList to another Parameters ---------- other: variable: an item that can be cast in to an MfList that corresponds with self Returns ------- dict of {kper:recarray} drop fields from an MfList Parameters ---------- fields : list or set of field names to drop Returns ------- dropped : MfList without the dropped fields Returns a C-style fmt string for numpy savetxt that corresponds to the dtype construct an MfList instance from a dict of (attribute_name,masked 4D ndarray Parameters ---------- model : mbase derived type pak_name : str package name (e.g GHB) m4ds : {attribute name:4d masked numpy.ndarray} Returns ------- MfList instance Cast recarrays for stress periods into single dataframe containing all stress periods. Parameters ---------- squeeze : bool Reduce number of columns in dataframe to only include stress periods where a variable changes. Returns ------- df : dataframe Dataframe of shape nrow = ncells, ncol = nvar x nper. If the squeeze option is chosen, nper is the number of stress periods where at least one cells is different, otherwise it is equal to the number of keys in MfList.data. Notes ----- Requires pandas. a helper function for plotting - get all unique indices convert a dictionary of 4-dim masked arrays to a stress_period_data style dict of recarray Parameters ---------- dtype : numpy dtype m4ds : dict {name:masked numpy 4-dim ndarray} Returns ------- dict {kper:recarray} Plot stress period boundary condition (MfList) data for a specified stress period Parameters ---------- key : str MfList dictionary key. (default is None) names : list List of names for figure titles. (default is None) kper : int MODFLOW zero-based stress period number to return. (default is zero) filename_base : str Base file name that will be used to automatically generate file names for output image files. Plots will be exported as image files if file_name_base is not None. (default is None) file_extension : str Valid matplotlib.pyplot file extension for savefig(). Only used if filename_base is not None. (default is 'png') mflay : int MODFLOW zero-based layer number to return. If None, then all all layers will be included. (default is None) **kwargs : dict axes : list of matplotlib.pyplot.axis List of matplotlib.pyplot.axis that will be used to plot data for each layer. If axes=None axes will be generated. (default is None) pcolor : bool Boolean used to determine if matplotlib.pyplot.pcolormesh plot will be plotted. (default is True) colorbar : bool Boolean used to determine if a color bar will be added to the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. (default is False) inactive : bool Boolean used to determine if a black overlay in inactive cells in a layer will be displayed. (default is True) contour : bool Boolean used to determine if matplotlib.pyplot.contour plot will be plotted. (default is False) clabel : bool Boolean used to determine if matplotlib.pyplot.clabel will be plotted. Only used if contour=True. (default is False) grid : bool Boolean used to determine if the model grid will be plotted on the figure. (default is False) masked_values : list List of unique values to be excluded from the plot. Returns ---------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.wel.stress_period_data.plot(ml.wel, kper=1) Convert stress period boundary condition (MfList) data for a specified stress period to a 3-D numpy array Parameters ---------- kper : int MODFLOW zero-based stress period number to return. (default is zero) mask : boolean return array with np.NaN instead of zero Returns ---------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the MfList dtype names for the stress period data ('cond', 'flux', 'bhead', etc.). See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> v = ml.wel.stress_period_data.to_array(kper=1) Export stress period boundary condition (MfList) data for a specified stress period Parameters ---------- filename : str Shapefile name to write kper : int MODFLOW zero-based stress period number to return. (default is None) Returns ---------- None See Also -------- Notes ----- Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow.load('test.nam') >>> ml.wel.to_shapefile('test_hk.shp', kper=1) util_list module. Contains the mflist class. This classes encapsulates modflow-style list inputs away from the individual packages. The end-user should not need to instantiate this class directly. some more info because np 1.9 doesn't support indexing by list of columns Get the itmp for a given kper If an external file, have to load it If not any of the above, it must be an int mt3d list data is fixed format Use numpy's floating-point formatter (Dragon4) Private method to cast the data argument Should only be called by the constructor If data is a list, then all we can do is try to cast it to an ndarray, then cast again to a recarray warnings.warn("MfList casting list to array") If data is a dict, the we have to assume it is keyed on kper Same as before, just try... warnings.warn("MfList: casting list to array at " +\ "kper {0:d}".format(kper)) super hack - sick of recarrays already if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1): d = d.view(np.recarray) A single recarray - same MfList for all stress periods A single ndarray A single filename If d is a string, assume it is a filename and check that it exists If d is an integer, then it must be 0 or -1 warnings.warn("MfList: ndarray dtype does not match self " +\ "dtype, trying to cast") make a dataframe of all data for all stress periods find relevant variable names may have to iterate over the first stress period create list of dataframes for each stress period each with index of k, i, j add an empty dataframe if a stress period is empty (e.g. no pumping during a predevelopment period) aggregate always return the first stress period Add a record to possible already set list for a given kper index is a list of k,i,j or nodes. values is a list of floats. The length of index + values must be equal to the number of names in dtype If we already have something for this kper, then add to it If a 0 or -1, reset If filename, load into recarray Extend the recarray Get the recarray for a given kper If the data entry for kper is a string, return the corresponding recarray, but don't reset the value in the data dict assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ str(kper) + " not in data.keys()" If data is a list, then all we can do is try to cast it to an ndarray, then cast again to a recarray warnings.warn("MfList casting list to array") cast data A single ndarray A single filename raise NotImplementedError("MfList.__setitem__() not implemented") d = np.fromfile(f,dtype=self.dtype,count=count) Fill missing early kpers with 0 py_filepath = '' py_filepath = os.path.join(py_filepath, self._model.external_path) forceInternal overrides isExternal (set below) for cases where external arrays are not supported (oh hello MNW1!) write the transient sequence described by the data dict Fill missing early kpers with 0 Fill late missing kpers with -1 switch file append mode to binary continue back to non-binary Write the recarray (data) to the file (or file handle) f Add one to the kij indices --make copy of data for multiple calls if this entry is valid kper_vtype = self.__vtype[kper] if self.sr is None: raise Exception("MfList.to_shapefile: SpatialReference not set") import flopy.utils.flopy_io as fio if kper is None: keys = self.data.keys() keys.sort() else: keys = [kper] array_dict = {} for kk in keys: arrays = self.to_array(kk) for name, array in arrays.items(): for k in range(array.shape[0]): aname = name+"{0:03d}_{1:02d}".format(kk, k) n = fio.shape_attr_name(name, length=4) aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1) array_dict[aname] = array[k] fio.write_grid_shapefile(filename, self.sr, array_dict) if this kper is not found if this kper is before the first entry, (maybe) mask and return find the last kper if there are no entries for this kper print(name,kper) average keys that should not be added elif mask: for name, arr in arrays.items(): arrays[name][:] = np.NaN get the first kper initialize these big arrays get the first kper initialize these big arrays
9,859
en
0.629773
# -*- coding: utf-8 -*- import requests from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists class RequestMixin(object): CODE_EXCEPTION_MSG = { 400: InvalidAPIRequest, 404: RecordNotFound, 409: RecordAlreadyExists, 422: InvalidAPIRequest, 500: ServerError, } def __init__(self): self.session = requests.Session() @property def _headers(self): return { "Content-Type": "application/json", } def request(self, server, method, url, json=None, params=None, timeout=60): try: response = self.session.request( method, url, json=json, params=params, timeout=timeout, headers=self._headers ) except requests.exceptions.ConnectTimeout: raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!") except requests.exceptions.ConnectionError: raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!") try: response_data = response.json() except Exception as e: raise ServerError(f"{server}服务器参数解析失败!") if not (200 <= response.status_code < 300): exception = self.CODE_EXCEPTION_MSG[response.status_code] \ if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400] raise exception(f"{server} Response:{response_data.get('error').get('message')}") return response_data web_client = RequestMixin()
services/engine/webs/core/requests/request.py
1,624
-*- coding: utf-8 -*-
21
en
0.767281
# -*- coding: utf-8 -*- from flask import Blueprint, jsonify from flask_service.swagger import spec __all__ = ['main_app'] main_app = Blueprint('main_app', __name__) @main_app.route('/api') def swagger(): """ Responds with the OpenAPI specification for this application. """ return jsonify(spec.to_dict()) @main_app.route('/health') def health(): """ Responds with the current's service health. Could be used by the liveness probe of a Kubernetes cluster for instance. """ # put some logic here to decide if your app is doing well or not # by default, we'll always return everything is okay! return "" @main_app.route('/status') def status(): """ Responds with the current's service status. Could be used by the readiness probe of a Kubernetes cluster. """ # put some logic here to decide if your app is doing well or not # by default, we'll always return everything is okay! return ""
flask_service/views.py
968
Responds with the current's service health. Could be used by the liveness probe of a Kubernetes cluster for instance. Responds with the current's service status. Could be used by the readiness probe of a Kubernetes cluster. Responds with the OpenAPI specification for this application. -*- coding: utf-8 -*- put some logic here to decide if your app is doing well or not by default, we'll always return everything is okay! put some logic here to decide if your app is doing well or not by default, we'll always return everything is okay!
541
en
0.940466
"""Principal Component Analysis Base Classes""" # Author: Alexandre Gramfort <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Denis A. Engemann <[email protected]> # Kyle Kastner <[email protected]> # # License: BSD 3 clause import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin from ..utils.validation import check_is_fitted from abc import ABCMeta, abstractmethod class _BasePCA( _ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta ): """Base class for PCA methods. Warning: This class should not be used directly. Use derived classes instead. """ def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array of shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_ return precision @abstractmethod def fit(self, X, y=None): """Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- self : object Returns the instance itself. """ def transform(self, X): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : array-like of shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like of shape (n_samples, n_components) Projection of X in the first principal components, where `n_samples` is the number of samples and `n_components` is the number of the components. """ check_is_fitted(self) X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False) if self.mean_ is not None: X = X - self.mean_ X_transformed = np.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed def inverse_transform(self, X): """Transform data back to its original space. In other words, return an input `X_original` whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where `n_samples` is the number of samples and `n_components` is the number of components. Returns ------- X_original array-like of shape (n_samples, n_features) Original data, where `n_samples` is the number of samples and `n_features` is the number of features. Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening. """ if self.whiten: return ( np.dot( X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_, ) + self.mean_ ) else: return np.dot(X, self.components_) + self.mean_ @property def _n_features_out(self): """Number of transformed output features.""" return self.components_.shape[0]
sklearn/decomposition/_base.py
5,716
Base class for PCA methods. Warning: This class should not be used directly. Use derived classes instead. Number of transformed output features. Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- self : object Returns the instance itself. Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array of shape=(n_features, n_features) Estimated covariance of data. Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. Transform data back to its original space. In other words, return an input `X_original` whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where `n_samples` is the number of samples and `n_components` is the number of components. Returns ------- X_original array-like of shape (n_samples, n_features) Original data, where `n_samples` is the number of samples and `n_features` is the number of features. Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening. Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : array-like of shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like of shape (n_samples, n_components) Projection of X in the first principal components, where `n_samples` is the number of samples and `n_components` is the number of the components. Principal Component Analysis Base Classes Author: Alexandre Gramfort <[email protected]> Olivier Grisel <[email protected]> Mathieu Blondel <[email protected]> Denis A. Engemann <[email protected]> Kyle Kastner <[email protected]> License: BSD 3 clause modify diag inplace handle corner cases first Get precision using matrix inversion lemma
2,599
en
0.698375
""" enCount tasks and analyses. enCount is a Python library for processing RNA-Seq data from ENCODE. """ # from ._version import __version__ from . import config # load from myconfig.py if it exists from . import db from . import queues from . import encode from . import externals from . import gtfs from . import fastqs from . import experiments from . import mappings from . import integration
enCount/__init__.py
403
enCount tasks and analyses. enCount is a Python library for processing RNA-Seq data from ENCODE. from ._version import __version__ load from myconfig.py if it exists
168
en
0.636759
from asgiref.sync import sync_to_async from channels.layers import get_channel_layer from ....models import Participant import humps channel_layer = get_channel_layer() def get_participant(room_channel_name, channel_name): participant = Participant.objects.get( channel_room__channel_name=room_channel_name, channel_name=channel_name ) return participant def get_participant_id(participant): return participant.id async def broadcast_avatar_position(room_channel_name, channel_name, json_data): """ Sends the new avatar's position to the users of the room. """ type = json_data['type'] payload = json_data['payload'] position = payload["position"] animate = payload["animate"] # receive the participant that sent this message participant = await sync_to_async(get_participant)(room_channel_name, channel_name) participant_id = await sync_to_async(get_participant_id)(participant) # if this was for an avatar, then set participant's position to the payload data def set_participant_position(): participant.x = position["x"] participant.y = position["y"] participant.direction_x = position["directionX"] participant.save() await sync_to_async(set_participant_position)() await channel_layer.group_send( room_channel_name, { 'type': type, 'payload': { "participant_id": participant_id, "position": position, "animate": animate, } } ) async def broadcast_avatar_state(room_channel_name, channel_name, json_data): """ Sends the new avatar's state to the users of the room. """ type = json_data['type'] payload = json_data['payload'] state = payload['value'] # receive the participant that sent this message participant = await sync_to_async(get_participant)(room_channel_name, channel_name) participant_id = await sync_to_async(get_participant_id)(participant) await channel_layer.group_send( room_channel_name, { 'type': humps.decamelize(type), 'payload': { "participant_id": participant_id, "state": state } } )
server/websockets/consumers/world/broadcasts/avatar.py
2,293
receive the participant that sent this message if this was for an avatar, then set participant's position to the payload data receive the participant that sent this message
172
en
0.910202
"""Plot graphs from human-readable file formats."""
uniplot/__init__.py
52
Plot graphs from human-readable file formats.
45
en
0.783492
# https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem def height(root): """ DFS v = Vertices e = Edges d = Depth Time complexity: O(v + e) Space complexity: O(d) """ if root: return 1 + max(height(root.left), height(root.right)) else: return -1
HackerRank/Data Structures/Trees/height-of-a-binary-tree.py
331
DFS v = Vertices e = Edges d = Depth Time complexity: O(v + e) Space complexity: O(d) https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem
167
en
0.722572
from dbt.clients.system import load_file_contents from dbt.contracts.files import ( FilePath, ParseFileType, SourceFile, FileHash, AnySourceFile, SchemaSourceFile ) from dbt.parser.schemas import yaml_from_file from dbt.parser.search import FilesystemSearcher # This loads the files contents and creates the SourceFile object def load_source_file( path: FilePath, parse_file_type: ParseFileType, project_name: str) -> AnySourceFile: file_contents = load_file_contents(path.absolute_path, strip=False) checksum = FileHash.from_contents(file_contents) sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile source_file = sf_cls(path=path, checksum=checksum, parse_file_type=parse_file_type, project_name=project_name) source_file.contents = file_contents.strip() if parse_file_type == ParseFileType.Schema: source_file.dfy = yaml_from_file(source_file) return source_file # Special processing for big seed files def load_seed_source_file(match: FilePath, project_name) -> SourceFile: if match.seed_too_large(): # We don't want to calculate a hash of this file. Use the path. source_file = SourceFile.big_seed(match) else: file_contents = load_file_contents(match.absolute_path, strip=False) checksum = FileHash.from_contents(file_contents) source_file = SourceFile(path=match, checksum=checksum) source_file.contents = '' source_file.parse_file_type = ParseFileType.Seed source_file.project_name = project_name return source_file # Use the FilesystemSearcher to get a bunch of FilePaths, then turn # them into a bunch of FileSource objects def get_source_files(project, paths, extension, parse_file_type): # file path list fp_list = list(FilesystemSearcher( project, paths, extension )) # file block list fb_list = [] for fp in fp_list: if parse_file_type == ParseFileType.Seed: fb_list.append(load_seed_source_file(fp, project.project_name)) else: fb_list.append(load_source_file( fp, parse_file_type, project.project_name)) return fb_list def read_files_for_parser(project, files, dirs, extension, parse_ft): parser_files = [] source_files = get_source_files( project, dirs, extension, parse_ft ) for sf in source_files: files[sf.file_id] = sf parser_files.append(sf.file_id) return parser_files # This needs to read files for multiple projects, so the 'files' # dictionary needs to be passed in. What determines the order of # the various projects? Is the root project always last? Do the # non-root projects need to be done separately in order? def read_files(project, files, parser_files): project_files = {} project_files['MacroParser'] = read_files_for_parser( project, files, project.macro_paths, '.sql', ParseFileType.Macro, ) project_files['ModelParser'] = read_files_for_parser( project, files, project.source_paths, '.sql', ParseFileType.Model, ) project_files['SnapshotParser'] = read_files_for_parser( project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot, ) project_files['AnalysisParser'] = read_files_for_parser( project, files, project.analysis_paths, '.sql', ParseFileType.Analysis, ) project_files['DataTestParser'] = read_files_for_parser( project, files, project.test_paths, '.sql', ParseFileType.Test, ) project_files['SeedParser'] = read_files_for_parser( project, files, project.data_paths, '.csv', ParseFileType.Seed, ) project_files['DocumentationParser'] = read_files_for_parser( project, files, project.docs_paths, '.md', ParseFileType.Documentation, ) project_files['SchemaParser'] = read_files_for_parser( project, files, project.all_source_paths, '.yml', ParseFileType.Schema, ) # Also read .yaml files for schema files. Might be better to change # 'read_files_for_parser' accept an array in the future. yaml_files = read_files_for_parser( project, files, project.all_source_paths, '.yaml', ParseFileType.Schema, ) project_files['SchemaParser'].extend(yaml_files) # Store the parser files for this particular project parser_files[project.project_name] = project_files
core/dbt/parser/read_files.py
4,420
This loads the files contents and creates the SourceFile object Special processing for big seed files We don't want to calculate a hash of this file. Use the path. Use the FilesystemSearcher to get a bunch of FilePaths, then turn them into a bunch of FileSource objects file path list file block list This needs to read files for multiple projects, so the 'files' dictionary needs to be passed in. What determines the order of the various projects? Is the root project always last? Do the non-root projects need to be done separately in order? Also read .yaml files for schema files. Might be better to change 'read_files_for_parser' accept an array in the future. Store the parser files for this particular project
715
en
0.86283
from typing import List ''' 1. subproblems: dp(amount) the minimum number of coins needed to make changes for amount of S using the given coin denomination 2. guessing: all the available denomination c_i 3. relate subproblems: dp(amount) = min(dp(amount - c_i) + 1) for all possible c_i Time complexity: O(#subproblems * #coins) ''' class Solution: # top down solution def coinChange(self, coins: List[int], amount: int) -> int: # for amount less than 1, return 0 if amount < 1: return 0 memo = {} def helper(coins, amount): # for subproblems that we have alreay solve and memorized if amount in memo: return memo[amount] # base case, we reach out the bottom of the tree. if amount == 0: return 0 # go through all possible coin denomination(breaches in tree) dp = float('inf') for coin in coins: if coin > amount: continue # relate subproblems dp = min(helper(coins, amount - coin) + 1, dp) memo[amount] = dp return dp helper(coins, amount) return -1 if memo[amount] == float('inf') else memo[amount] # bottom-up solution, DAG def coinChange_2(self, coins: List[int], amount: int) -> int: memo = [float('inf') for i in range(amount + 1)] # dp[i] = min{dp[i - c_i] + 1} for all c_i memo[0] = 0 for i in range(amount + 1): # check all the states that are reachable by coins to state i for coin in coins: if i < coin: continue memo[i] = min(memo[i], memo[i - coin] + 1) print(memo) return -1 if memo[amount] == float('inf') else memo[amount] x = Solution() # rs = x.coinChange([1, 2, 5], 2) print(x.coinChange_2([1,2,5], 11))
solution/322. coin-change.py
2,052
top down solution for amount less than 1, return 0 for subproblems that we have alreay solve and memorized base case, we reach out the bottom of the tree. go through all possible coin denomination(breaches in tree) relate subproblems bottom-up solution, DAG dp[i] = min{dp[i - c_i] + 1} for all c_i check all the states that are reachable by coins to state i rs = x.coinChange([1, 2, 5], 2)
390
en
0.86493
import SimpleITK as sitk import numpy as np import torch import math import time import sys import cv2 from scipy.ndimage.interpolation import zoom from torch.autograd import Variable sys.path.append('../lung_nodule_detector') from training.layers import nms def load_itk_image(filename): with open(filename) as f: contents = f.readlines() line = [k for k in contents if k.startswith('TransformMatrix')][0] transformM = np.array(line.split(' = ')[1].split(' ')).astype('float') transformM = np.round(transformM) if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])): isflip = True else: isflip = False itkimage = sitk.ReadImage(filename) numpyImage = sitk.GetArrayFromImage(itkimage) numpyOrigin = np.array(list(reversed(itkimage.GetOrigin()))) numpySpacing = np.array(list(reversed(itkimage.GetSpacing()))) return numpyImage, numpyOrigin, numpySpacing, isflip def lumTrans(img): lungwin = np.array([-1200.,600.]) newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0]) newimg[newimg<0]=0 newimg[newimg>1]=1 newimg = (newimg*255).astype('uint8') return newimg def resample(imgs, spacing, new_spacing, progressBar, order=2): print (len(imgs.shape)) if len(imgs.shape)==3: new_shape = np.round(imgs.shape * spacing / new_spacing) true_spacing = spacing * imgs.shape / new_shape resize_factor = new_shape / imgs.shape imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order) progressBar.setValue(40) return imgs, true_spacing elif len(imgs.shape)==4: n = imgs.shape[-1] newimg = [] for i in range(n): slice = imgs[:,:,:,i] newslice,true_spacing = resample(slice,spacing,new_spacing) newimg.append(newslice) newimg=np.transpose(np.array(newimg),[1,2,3,0]) return newimg,true_spacing else: raise ValueError('wrong shape') def resample_v1(imgs, spacing, new_spacing, order=2): print (len(imgs.shape)) if len(imgs.shape)==3: new_shape = np.round(imgs.shape * spacing / new_spacing) true_spacing = spacing * imgs.shape / new_shape resize_factor = new_shape / imgs.shape imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order) return imgs, true_spacing elif len(imgs.shape)==4: n = imgs.shape[-1] newimg = [] for i in range(n): slice = imgs[:,:,:,i] newslice,true_spacing = resample(slice,spacing,new_spacing) newimg.append(newslice) newimg=np.transpose(np.array(newimg),[1,2,3,0]) return newimg,true_spacing else: raise ValueError('wrong shape') def split_data(data, stride, split_comber): print (data.shape[1:]) nz, nh, nw = data.shape[1:] pz = int(np.ceil(float(nz) / stride)) * stride ph = int(np.ceil(float(nh) / stride)) * stride pw = int(np.ceil(float(nw) / stride)) * stride data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0) xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride), np.linspace(-0.5, 0.5, data.shape[2] / stride), np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij') coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32') data, nzhw = split_comber.split(data) coord2, nzhw2 = split_comber.split(coord, side_len=split_comber.side_len / stride, max_stride=split_comber.max_stride / stride, margin=split_comber.margin / stride) assert np.all(nzhw == nzhw2) data = (data.astype(np.float32) - 128) / 128 return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw) def convert_prob(pbb): for label in pbb: pos_ori = label[1:4] radious_ori = label[4] #pos_ori = pos_ori + extendbox[:, 0] label[1:4] = pos_ori label[4] = radious_ori label[0] = sigmoid(label[0]) return pbb def sigmoid(x): return 1 / (1 + math.exp(-x)) def predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar): net.eval() total_label = 0 total_candi = 0 splitlist = list(range(0, len(data) + 1, n_per_run)) if splitlist[-1] != len(data): splitlist.append(len(data)) outputlist = [] for i in range(len(splitlist) - 1): with torch.no_grad(): inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda() inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda() output = net(inputdata, inputcoord) outputlist.append(output.data.cpu().numpy()) progressBar.setValue(10 + (80/len(splitlist) * (i+1))) output = np.concatenate(outputlist, 0) output = split_comber.combine(output, nzhw=nzhw) # fps 1.215909091, sens 0.933333333, thres 0.371853054 thresh = 0.371853054 pbb, mask = get_pbb(output, thresh, ismask=True) pbb = pbb[pbb[:, 0].argsort()[::-1]] pbb_cand_list = [] # check overlap under 3mm for cand in pbb: is_overlap = False for appended in pbb_cand_list: minimum_dist = 3 dist = math.sqrt( math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow( appended[3] - cand[3], 2)) if (dist < minimum_dist): is_overlap = True break; if not is_overlap: pbb_cand_list.append(cand) pbb_cand_list = np.array(pbb_cand_list) pbb_cand_list_nms = nms(pbb_cand_list, 0.3) # print (name) # print (lbb) world_pbb = convert_prob(pbb_cand_list_nms) # print (world_pbb) print("label", len(lbb)) print("z_pos y_pos x_pos size") for i in range(len(lbb)): for j in range(len(lbb[i])): print(round(lbb[i][j], 2), end='\t') print() print("candidate", len(world_pbb)) print("prob z_pos y_pos x_pos size") for i in range(len(world_pbb)): for j in range(len(world_pbb[i])): print(round(world_pbb[i][j], 2), end='\t') print() total_label += len(lbb) total_candi += len(world_pbb) return lbb, world_pbb def predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb): net.eval() total_label = 0 total_candi = 0 splitlist = list(range(0, len(data) + 1, n_per_run)) if splitlist[-1] != len(data): splitlist.append(len(data)) outputlist = [] for i in range(len(splitlist) - 1): with torch.no_grad(): inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda() inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda() output = net(inputdata, inputcoord) outputlist.append(output.data.cpu().numpy()) output = np.concatenate(outputlist, 0) output = split_comber.combine(output, nzhw=nzhw) # fps 1.215909091, sens 0.933333333, thres 0.371853054 thresh = 0.371853054 pbb, mask = get_pbb(output, thresh, ismask=True) pbb = pbb[pbb[:, 0].argsort()[::-1]] pbb_cand_list = [] # check overlap under 3mm for cand in pbb: is_overlap = False for appended in pbb_cand_list: minimum_dist = 3 dist = math.sqrt( math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow( appended[3] - cand[3], 2)) if (dist < minimum_dist): is_overlap = True break; if not is_overlap: pbb_cand_list.append(cand) pbb_cand_list = np.array(pbb_cand_list) pbb_cand_list_nms = nms(pbb_cand_list, 0.3) # print (name) # print (lbb) world_pbb = convert_prob(pbb_cand_list_nms) # print (world_pbb) print("label", len(lbb)) print("z_pos y_pos x_pos size") for i in range(len(lbb)): for j in range(len(lbb[i])): print(round(lbb[i][j], 2), end='\t') print() print("candidate", len(world_pbb)) print("prob z_pos y_pos x_pos size") for i in range(len(world_pbb)): for j in range(len(world_pbb[i])): print(round(world_pbb[i][j], 2), end='\t') print() total_label += len(lbb) total_candi += len(world_pbb) return lbb, world_pbb def draw_nodule_rect(lbb, world_pbb, img_arr): for i in range(len(lbb)): label = lbb[i] # label = np.ceil(label) r = (label[3] / 2) * 1.3 top_left = (max(int(math.ceil(label[2] - r)), 0), max(int(math.ceil(label[1] - r)), 0)) bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]), min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2])) z_range = [max(int(math.ceil(label[0] - r)), 0), min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])] for j in range(z_range[0], z_range[1]): cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1) for i in range(len(world_pbb)): candidate = world_pbb[i] r = (candidate[4] / 2) * 1.3 top_left = (max(int(math.ceil(candidate[3] - r)), 0), max(int(math.ceil(candidate[2] - r)), 0)) text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0), max(int(math.ceil(candidate[2] - r)) - 1, 0)) bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]), min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2])) z_range = [max(int(math.ceil(candidate[1] - r)), 0), min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])] font = cv2.FONT_HERSHEY_SIMPLEX for j in range(z_range[0], z_range[1]): cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1) #cv2.putText(img_arr[j], "c" + str(i) + "_" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA) cv2.putText(img_arr[j], "c" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA) def crop_all(target, img_arr, crop_size = 48): target = np.copy(target) start = [] for i in range(3): start.append(int(round(target[i])) - int(crop_size / 2)) pad = [] pad.append([0, 0]) for i in range(3): leftpad = max(0, -start[i]) rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1]) pad.append([leftpad, rightpad]) crop = img_arr[:, max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]), max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]), max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])] crop = np.pad(crop, pad, 'constant', constant_values=0) for i in range(3): target[i] = target[i] - start[i] return crop, target def crop_nodule_arr_2ch(target, img_arr, crop_size = 48): img_size = [crop_size, crop_size, crop_size] crop_img, target = crop_all(target, img_arr, crop_size) imgs = np.squeeze(crop_img, axis=0) z = int(target[0]) y = int(target[1]) x = int(target[2]) print (z, y, x) # z = 24 # y = 24 # x = 24 nodule_size = int(target[3]) margin = max(7, nodule_size * 0.4) radius = int((nodule_size + margin) / 2) s_z_pad = 0 e_z_pad = 0 s_y_pad = 0 e_y_pad = 0 s_x_pad = 0 e_x_pad = 0 s_z = max(0, z - radius) if (s_z == 0): s_z_pad = -(z - radius) e_z = min(np.shape(imgs)[0], z + radius) if (e_z == np.shape(imgs)[0]): e_z_pad = (z + radius) - np.shape(imgs)[0] s_y = max(0, y - radius) if (s_y == 0): s_y_pad = -(y - radius) e_y = min(np.shape(imgs)[1], y + radius) if (e_y == np.shape(imgs)[1]): e_y_pad = (y + radius) - np.shape(imgs)[1] s_x = max(0, x - radius) if (s_x == 0): s_x_pad = -(x - radius) e_x = min(np.shape(imgs)[2], x + radius) if (e_x == np.shape(imgs)[2]): e_x_pad = (x + radius) - np.shape(imgs)[2] # print (s_x, e_x, s_y, e_y, s_z, e_z) # print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x])) nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x] nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant', constant_values=0) imgpad_size = [img_size[0] - np.shape(nodule_img)[0], img_size[1] - np.shape(nodule_img)[1], img_size[2] - np.shape(nodule_img)[2]] imgpad = [] imgpad_left = [int(imgpad_size[0] / 2), int(imgpad_size[1] / 2), int(imgpad_size[2] / 2)] imgpad_right = [int(imgpad_size[0] / 2), int(imgpad_size[1] / 2), int(imgpad_size[2] / 2)] for i in range(3): if (imgpad_size[i] % 2 != 0): rand = np.random.randint(2) if rand == 0: imgpad.append([imgpad_left[i], imgpad_right[i] + 1]) else: imgpad.append([imgpad_left[i] + 1, imgpad_right[i]]) else: imgpad.append([imgpad_left[i], imgpad_right[i]]) padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0) padding_crop = np.expand_dims(padding_crop, axis=0) crop = np.concatenate((padding_crop, crop_img)) crop = (crop.astype(np.float32) - 128) / 128 return torch.from_numpy(crop), crop def predict_attribute(attribute_net, crop_img): attribute_net.eval() with torch.no_grad(): crop_img = Variable(crop_img.cuda(async=True)) output = attribute_net(crop_img) return output
UI_util.py
14,065
pos_ori = pos_ori + extendbox[:, 0] fps 1.215909091, sens 0.933333333, thres 0.371853054 check overlap under 3mm print (name) print (lbb) print (world_pbb) fps 1.215909091, sens 0.933333333, thres 0.371853054 check overlap under 3mm print (name) print (lbb) print (world_pbb) label = np.ceil(label)cv2.putText(img_arr[j], "c" + str(i) + "_" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA) z = 24 y = 24 x = 24 print (s_x, e_x, s_y, e_y, s_z, e_z) print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))
531
en
0.428478
# Copyright (c) 2021 Sen Wu. All Rights Reserved. """Helper function to set random seed for reproducibility of models.""" import logging import random from typing import Optional import numpy as np import torch logger = logging.getLogger(__name__) def set_random_seed(seed: Optional[int] = None) -> None: """Set random seed for random, numpy, and pytorch. Args: seed: The random seed, defaults to `None` which select it randomly. """ max_value = np.iinfo(np.uint32).max min_value = np.iinfo(np.uint32).min try: seed = int(seed) logger.info(f"Set random seed to {seed}.") except (TypeError, ValueError): seed = random.randint(min_value, max_value) logger.info(f"No random seed specified, randomly set random seed to {seed}.") if not (min_value <= seed <= max_value): new_seed = random.randint(min_value, max_value) logger.info( f"Random seed {seed} is not valid, randomly set random seed to {new_seed}." ) seed = new_seed # Set random seed for random random.seed(seed) # Set random seed for all numpy operations np.random.seed(seed=seed) # Set random seed for PyTorch torch.manual_seed(seed)
src/emmental/utils/seed.py
1,240
Set random seed for random, numpy, and pytorch. Args: seed: The random seed, defaults to `None` which select it randomly. Helper function to set random seed for reproducibility of models. Copyright (c) 2021 Sen Wu. All Rights Reserved. Set random seed for random Set random seed for all numpy operations Set random seed for PyTorch
336
en
0.725011